Add NDK APIs to query preferred memory alignment and padding.

This CL adds NDK APIs to query the preferred alignment and padding for
input/output buffers or memory object regions. The alignment and padding
is reported at the level of a specific compilation input/output.

The current implementation always returns a fixed value. We will add HAL
APIs for drivers to report the preferences in future updates.

Bug: 179691454
Test: NNT_static
Change-Id: Ib39ab3497b22eb2438f2a1a8754d5ea593852aca
diff --git a/common/include/nnapi/Types.h b/common/include/nnapi/Types.h
index 7a0199e..4cfc084 100644
--- a/common/include/nnapi/Types.h
+++ b/common/include/nnapi/Types.h
@@ -52,6 +52,10 @@
 constexpr uint32_t kMaxNumberOfCacheFiles = 32;
 constexpr uint8_t kExtensionTypeBits = 16;
 constexpr uint8_t kExtensionPrefixBits = 16;
+constexpr uint32_t kDefaultRequestMemoryAlignment = 64;
+constexpr uint32_t kDefaultRequestMemoryPadding = 64;
+constexpr uint32_t kMinMemoryAlignment = alignof(std::max_align_t);
+constexpr uint32_t kMinMemoryPadding = 1;
 
 // Aliases
 
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp
index d5c82c2..5bd1f13 100644
--- a/runtime/CompilationBuilder.cpp
+++ b/runtime/CompilationBuilder.cpp
@@ -20,6 +20,7 @@
 
 #include <LegacyUtils.h>
 #include <nnapi/IBurst.h>
+#include <nnapi/Types.h>
 
 #include <algorithm>
 #include <limits>
@@ -188,6 +189,117 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
+int CompilationBuilder::getPreferredMemoryAlignmentForInput(uint32_t index,
+                                                            uint32_t* alignment) const {
+    CHECK(alignment != nullptr);
+    if (!mFinished) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an "
+                      "unfinished compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (!mPlan.isValid()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an "
+                      "invalid compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (index >= mModel->inputCount()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an "
+                      "invalid input index "
+                   << index;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    uint32_t value = kMinMemoryAlignment;
+    mPlan.forEachStepRoleOfInput(
+            index, [&value](const RuntimePreparedModel* preparedModel, IOType, uint32_t) {
+                value = std::max(value, preparedModel->getMemoryPreference().first);
+            });
+    *alignment = value;
+    return ANEURALNETWORKS_NO_ERROR;
+}
+
+int CompilationBuilder::getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const {
+    CHECK(padding != nullptr);
+    if (!mFinished) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an "
+                      "unfinished compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (!mPlan.isValid()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an "
+                      "invalid compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (index >= mModel->inputCount()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an "
+                      "invalid input index "
+                   << index;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    uint32_t value = kMinMemoryPadding;
+    mPlan.forEachStepRoleOfInput(
+            index, [&value](const RuntimePreparedModel* preparedModel, IOType, uint32_t) {
+                value = std::max(value, preparedModel->getMemoryPreference().second);
+            });
+    *padding = value;
+    return ANEURALNETWORKS_NO_ERROR;
+}
+
+int CompilationBuilder::getPreferredMemoryAlignmentForOutput(uint32_t index,
+                                                             uint32_t* alignment) const {
+    CHECK(alignment != nullptr);
+    if (!mFinished) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an "
+                      "unfinished compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (!mPlan.isValid()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an "
+                      "invalid compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (index >= mModel->outputCount()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an "
+                      "invalid output index "
+                   << index;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    uint32_t value = kMinMemoryAlignment;
+    mPlan.forEachStepRoleOfOutput(
+            index, [&value](const RuntimePreparedModel* preparedModel, IOType, uint32_t) {
+                value = std::max(value, preparedModel->getMemoryPreference().first);
+            });
+    *alignment = value;
+    return ANEURALNETWORKS_NO_ERROR;
+}
+
+int CompilationBuilder::getPreferredMemoryPaddingForOutput(uint32_t index,
+                                                           uint32_t* padding) const {
+    CHECK(padding != nullptr);
+    if (!mFinished) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an "
+                      "unfinished compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (!mPlan.isValid()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an "
+                      "invalid compilation";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    if (index >= mModel->outputCount()) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an "
+                      "invalid output index "
+                   << index;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    uint32_t value = kMinMemoryPadding;
+    mPlan.forEachStepRoleOfOutput(
+            index, [&value](const RuntimePreparedModel* preparedModel, IOType, uint32_t) {
+                value = std::max(value, preparedModel->getMemoryPreference().second);
+            });
+    *padding = value;
+    return ANEURALNETWORKS_NO_ERROR;
+}
+
 int CompilationBuilder::createExecution(ExecutionBuilder** execution) {
     if (!mFinished) {
         LOG(ERROR) << "ANeuralNetworksExecution_create passed an unfinished compilation";
diff --git a/runtime/CompilationBuilder.h b/runtime/CompilationBuilder.h
index 0f2db4d..5c74731 100644
--- a/runtime/CompilationBuilder.h
+++ b/runtime/CompilationBuilder.h
@@ -55,6 +55,11 @@
 
     int finish();
 
+    int getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const;
+    int getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const;
+    int getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const;
+    int getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const;
+
     int createExecution(ExecutionBuilder** execution);
 
     int createBurst(BurstBuilder** burst);
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index ffe0a46..b536163 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -26,6 +26,7 @@
 #include <nnapi/IDevice.h>
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/SharedMemory.h>
+#include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 
 #include <algorithm>
@@ -157,6 +158,16 @@
         return mPreparedModel->configureExecutionBurst();
     }
 
+    std::pair<uint32_t, uint32_t> getMemoryPreference() const override {
+        if (mDevice->getFeatureLevel() >= __ANDROID_API_S__) {
+            return {kDefaultRequestMemoryAlignment, kDefaultRequestMemoryPadding};
+        } else {
+            // We are not able to pass memory padding information to HIDL drivers, so return the
+            // minimum padding.
+            return {kDefaultRequestMemoryAlignment, kMinMemoryPadding};
+        }
+    }
+
    private:
     const Device* mDevice;
     const SharedPreparedModel mPreparedModel;
@@ -789,11 +800,19 @@
             const OptionalDuration& loopTimeoutDuration,
             const OptionalDuration& timeoutDurationAfterFence) const override;
 
+    std::pair<uint32_t, uint32_t> getMemoryPreference() const override {
+        return {kPreferredAlignment, kPreferredPadding};
+    }
+
     // Prefer to use CpuPreparedModel::create.
     CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos)
         : mModel(std::move(model)), mModelPoolInfos(std::move(poolInfos)) {}
 
    private:
+    // TFLite kernels prefers 64 bytes for padding and alignment.
+    static constexpr uint32_t kPreferredAlignment = 64;
+    static constexpr uint32_t kPreferredPadding = 64;
+
     const Model mModel;
     const std::vector<RunTimePoolInfo> mModelPoolInfos;
 };
diff --git a/runtime/Manager.h b/runtime/Manager.h
index 89f431f..d173e9d 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -73,6 +73,9 @@
             const OptionalDuration& timeoutDurationAfterFence) const = 0;
 
     virtual GeneralResult<SharedBurst> configureExecutionBurst() const = 0;
+
+    // Returns a pair of {alignment, padding}.
+    virtual std::pair<uint32_t, uint32_t> getMemoryPreference() const = 0;
 };
 
 using ModelFactory = std::function<Model()>;
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 27cc6ba..3df11df 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -1583,3 +1583,55 @@
     ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->enableInputAndOutputPadding(enable);
 }
+
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) {
+    NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+               "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput");
+    if (!compilation || !alignment) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed a "
+                      "nullptr";
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+    return c->getPreferredMemoryAlignmentForInput(index, alignment);
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) {
+    NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+               "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput");
+    if (!compilation || !padding) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed a "
+                      "nullptr";
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+    return c->getPreferredMemoryPaddingForInput(index, padding);
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) {
+    NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+               "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput");
+    if (!compilation || !alignment) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed a "
+                      "nullptr";
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+    return c->getPreferredMemoryAlignmentForOutput(index, alignment);
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) {
+    NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+               "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput");
+    if (!compilation || !padding) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed a "
+                      "nullptr";
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+    return c->getPreferredMemoryPaddingForOutput(index, padding);
+}
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 3513516..db4fa35 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -7686,6 +7686,9 @@
  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
  *
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
+ * preferred buffer alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -7751,6 +7754,9 @@
  * AHardwareBuffer usage.
  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
  * created from memory descriptors.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
+ * preferred memory alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -7806,6 +7812,9 @@
  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
  *
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
+ * preferred buffer alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -7875,6 +7884,9 @@
  * AHardwareBuffer usage.
  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
  * created from memory descriptors.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
+ * preferred memory alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -8285,6 +8297,146 @@
 int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution,
                                                          bool enable) __INTRODUCED_IN(31);
 
+/**
+ * Get the preferred buffer and memory alignment of an input to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned alignment value to guide the layout of the input buffer or memory
+ * pool. To achieve the best performance, make sure the address of the buffer passed in
+ * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in
+ * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the perferred alignment
+ * value of the same input. A driver may choose to allocate a separate buffer and do memory copying
+ * if the provided buffer or memory does not satisfy the preferred alignment.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the input argument we are referencing from the compilation. It is
+ *              an index into the inputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param alignment The returned preferred alignment. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred buffer and memory end padding of an input to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned padding value to guide the layout of the input buffer or memory
+ * pool. To achieve the best performance, make sure the length value passed in
+ * {@link ANeuralNetworksExecution_setInput} or
+ * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of
+ * the input (i.e. the size of an element multiplied by the number of elements) rounding up to
+ * a multiple of the perferred padding value of the same input. A driver may choose to allocate a
+ * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
+ * the preferred padding.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
+ * {@link ANeuralNetworksExecution_setInput}, and
+ * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing
+ * input buffer or memory padding to the driver.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the input argument we are referencing from the compilation. It is
+ *              an index into the inputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned preferred padding. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred buffer and memory alignment of an output to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned alignment value to guide the layout of the output buffer or memory
+ * pool. To achieve the best performance, make sure the address of the buffer passed in
+ * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in
+ * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the perferred alignment
+ * value of the same output. A driver may choose to allocate a separate buffer and do memory copying
+ * if the provided buffer or memory does not satisfy the preferred alignment.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the output argument we are referencing from the compilation. It is
+ *              an index into the outputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned perferred alignment. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred memory end padding of an output to an execution created from a particular
+ * compilation.
+ *
+ * The user may use the returned padding value to guide the layout of the output buffer or memory
+ * pool. To achieve the best performance, make sure the length value passed in
+ * {@link ANeuralNetworksExecution_setOutput} or
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of
+ * the output (i.e. the size of an element multiplied by the number of elements) rounding up to
+ * a multiple of the perferred padding value of the same output. A driver may choose to allocate a
+ * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
+ * the preferred padding.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
+ * {@link ANeuralNetworksExecution_setOutput}, and
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing
+ * output buffer or memory padding to the driver.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the output argument we are referencing from the compilation. It is
+ *              an index into the outputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned perferred padding. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
+        __INTRODUCED_IN(31);
+
 __END_DECLS
 
 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h
index dd9654d..c5af0b5 100644
--- a/runtime/include/NeuralNetworksWrapper.h
+++ b/runtime/include/NeuralNetworksWrapper.h
@@ -506,6 +506,30 @@
         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_finish(mCompilation)));
     }
 
+    Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(
+                NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+                        mCompilation, index, alignment)));
+    };
+
+    Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(
+                NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+                        mCompilation, index, padding)));
+    };
+
+    Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(
+                NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+                        mCompilation, index, alignment)));
+    };
+
+    Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(
+                NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+                        mCompilation, index, padding)));
+    };
+
     ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
 
 #ifdef NNTEST_SLTS
diff --git a/runtime/libneuralnetworks.map.txt b/runtime/libneuralnetworks.map.txt
index f5927d9..043f670 100644
--- a/runtime/libneuralnetworks.map.txt
+++ b/runtime/libneuralnetworks.map.txt
@@ -61,6 +61,10 @@
     ANeuralNetworksCompilation_finish;
     ANeuralNetworksCompilation_setPriority; # introduced=30
     ANeuralNetworksCompilation_setTimeout; # introduced=30
+    ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput; # introduced=31
+    ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput; # introduced=31
+    ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput; # introduced=31
+    ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput; # introduced=31
     ANeuralNetworksBurst_create; # introduced=Q
     ANeuralNetworksBurst_free; # introduced=Q
     ANeuralNetworksExecution_burstCompute; # introduced=Q
diff --git a/runtime/test/TestGenerated.cpp b/runtime/test/TestGenerated.cpp
index 2eb904d..4a4639e 100644
--- a/runtime/test/TestGenerated.cpp
+++ b/runtime/test/TestGenerated.cpp
@@ -280,11 +280,37 @@
     }
 }
 
+static bool isPowerOfTwo(uint32_t x) {
+    return x > 0 && ((x & (x - 1)) == 0);
+}
+
+static void validateCompilationMemoryPreferences(const Compilation& compilation,
+                                                 const TestModel& testModel) {
+    for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
+        SCOPED_TRACE("Input index: " + std::to_string(i));
+        uint32_t alignment = 0, padding = 0;
+        ASSERT_EQ(compilation.getPreferredMemoryAlignmentForInput(i, &alignment), Result::NO_ERROR);
+        ASSERT_EQ(compilation.getPreferredMemoryPaddingForInput(i, &padding), Result::NO_ERROR);
+        EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
+        EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
+    }
+    for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
+        SCOPED_TRACE("Output index: " + std::to_string(i));
+        uint32_t alignment = 0, padding = 0;
+        ASSERT_EQ(compilation.getPreferredMemoryAlignmentForOutput(i, &alignment),
+                  Result::NO_ERROR);
+        ASSERT_EQ(compilation.getPreferredMemoryPaddingForOutput(i, &padding), Result::NO_ERROR);
+        EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
+        EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
+    }
+}
+
 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
     NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
     std::optional<Compilation> compilation = compileModel(model);
     // Early return if compilation fails. The compilation result code is checked in compileModel.
     if (!compilation) return;
+    validateCompilationMemoryPreferences(compilation.value(), testModel);
     executeWithCompilation(compilation.value(), testModel);
 }
 
diff --git a/runtime/test/TestNeuralNetworksWrapper.h b/runtime/test/TestNeuralNetworksWrapper.h
index c67b215..e3f3527 100644
--- a/runtime/test/TestNeuralNetworksWrapper.h
+++ b/runtime/test/TestNeuralNetworksWrapper.h
@@ -308,6 +308,26 @@
 
     Result finish() { return static_cast<Result>(ANeuralNetworksCompilation_finish(mCompilation)); }
 
+    Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+                mCompilation, index, alignment));
+    };
+
+    Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+                mCompilation, index, padding));
+    };
+
+    Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+                mCompilation, index, alignment));
+    };
+
+    Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+                mCompilation, index, padding));
+    };
+
     ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
 
    protected:
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index ef0a28c..046010c 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -1145,6 +1145,63 @@
               ANEURALNETWORKS_BAD_DATA);
 }
 
+TEST_F(ValidationTestCompilation, GetPreferredMemoryAlignmentAndPadding) {
+    uint32_t result;
+
+    // The following calls should fail, because the compilation has not been finished.
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0,
+                                                                             &result),
+              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(
+            ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0, &result),
+            ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0,
+                                                                              &result),
+              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(
+            ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0, &result),
+            ANEURALNETWORKS_BAD_STATE);
+
+    EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
+
+    // The following calls should fail because of unexpected nullptr.
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(nullptr, 0, &result),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0,
+                                                                             nullptr),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(nullptr, 0, &result),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(
+            ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0, nullptr),
+            ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(nullptr, 0, &result),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0,
+                                                                              nullptr),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(nullptr, 0, &result),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(
+            ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0, nullptr),
+            ANEURALNETWORKS_UNEXPECTED_NULL);
+
+    // The following calls should fail, because the index is out of range.
+    const uint32_t invalidIndex = 1000;
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation,
+                                                                             invalidIndex, &result),
+              ANEURALNETWORKS_BAD_DATA);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation,
+                                                                           invalidIndex, &result),
+              ANEURALNETWORKS_BAD_DATA);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+                      mCompilation, invalidIndex, &result),
+              ANEURALNETWORKS_BAD_DATA);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation,
+                                                                            invalidIndex, &result),
+              ANEURALNETWORKS_BAD_DATA);
+}
+
 // Also see TEST_F(ValidationTestCompilationForDevices_1, CreateExecution)
 TEST_F(ValidationTestCompilation, CreateExecution) {
     ANeuralNetworksExecution* execution = nullptr;
@@ -2767,6 +2824,25 @@
     ANeuralNetworksCompilation* mInvalidCompilation = nullptr;
 };
 
+TEST_F(ValidationTestInvalidCompilation, GetPreferredMemoryAlignmentAndPadding) {
+    if (!mInvalidCompilation) {
+        return;
+    }
+    uint32_t result;
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mInvalidCompilation, 0,
+                                                                             &result),
+              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mInvalidCompilation, 0,
+                                                                           &result),
+              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mInvalidCompilation,
+                                                                              0, &result),
+              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mInvalidCompilation, 0,
+                                                                            &result),
+              ANEURALNETWORKS_BAD_STATE);
+}
+
 TEST_F(ValidationTestInvalidCompilation, CreateExecution) {
     if (!mInvalidCompilation) {
         return;
diff --git a/sl/SupportLibrary.cpp b/sl/SupportLibrary.cpp
index d0a9338..c5daa49 100644
--- a/sl/SupportLibrary.cpp
+++ b/sl/SupportLibrary.cpp
@@ -127,6 +127,10 @@
     LOAD_FUNCTION(lib_handle, ANeuralNetworksModel_getExtensionOperandType);
     LOAD_FUNCTION(lib_handle, ANeuralNetworksModel_getExtensionOperationType);
     LOAD_FUNCTION(lib_handle, ANeuralNetworksModel_setOperandExtensionData);
+    LOAD_FUNCTION(lib_handle, ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput);
+    LOAD_FUNCTION(lib_handle, ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput);
+    LOAD_FUNCTION(lib_handle, ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput);
+    LOAD_FUNCTION(lib_handle, ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput);
 
     nnapi->lib_handle = lib_handle;
     return nnapi;
diff --git a/sl/SupportLibrary.h b/sl/SupportLibrary.h
index a078571..41113ca 100644
--- a/sl/SupportLibrary.h
+++ b/sl/SupportLibrary.h
@@ -181,6 +181,14 @@
                                                           ANeuralNetworksOperationType* type);
     int (*ANeuralNetworksModel_setOperandExtensionData)(ANeuralNetworksModel* model, int32_t index,
                                                         const void* data, size_t length);
+    int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput)(
+            const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment);
+    int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput)(
+            const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding);
+    int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput)(
+            const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment);
+    int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput)(
+            const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding);
 };
 
 /**
diff --git a/sl/SupportLibraryTypes.h b/sl/SupportLibraryTypes.h
index 9dacb68..75e76c3 100644
--- a/sl/SupportLibraryTypes.h
+++ b/sl/SupportLibraryTypes.h
@@ -708,4 +708,16 @@
 typedef int (*ANeuralNetworksEvent_getSyncFenceFd_fn)(const ANeuralNetworksEvent* event,
                                                       int* sync_fence_fd);
 
+typedef int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput_fn)(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment);
+
+typedef int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput_fn)(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding);
+
+typedef int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput_fn)(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment);
+
+typedef int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput_fn)(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding);
+
 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_TYPES_H
diff --git a/sl/SupportLibraryWrapper.h b/sl/SupportLibraryWrapper.h
index 595e4e4..ec6de8d 100644
--- a/sl/SupportLibraryWrapper.h
+++ b/sl/SupportLibraryWrapper.h
@@ -365,6 +365,30 @@
         return static_cast<Result>(mNnApi->ANeuralNetworksCompilation_finish(mCompilation));
     }
 
+    Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(
+                mNnApi->ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+                        mCompilation, index, alignment));
+    };
+
+    Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(
+                mNnApi->ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+                        mCompilation, index, padding));
+    };
+
+    Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const {
+        return static_cast<Result>(
+                mNnApi->ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+                        mCompilation, index, alignment));
+    };
+
+    Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const {
+        return static_cast<Result>(
+                mNnApi->ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+                        mCompilation, index, padding));
+    };
+
     ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
 
    protected:
diff --git a/tools/api/NeuralNetworks.t b/tools/api/NeuralNetworks.t
index 367a980..5bcf4b1 100644
--- a/tools/api/NeuralNetworks.t
+++ b/tools/api/NeuralNetworks.t
@@ -2106,6 +2106,9 @@
  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
  *
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
+ * preferred buffer alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -2171,6 +2174,9 @@
  * AHardwareBuffer usage.
  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
  * created from memory descriptors.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
+ * preferred memory alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -2226,6 +2232,9 @@
  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
  *
  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
+ * preferred buffer alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -2295,6 +2304,9 @@
  * AHardwareBuffer usage.
  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
  * created from memory descriptors.
+ * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
+ * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
+ * preferred memory alignment and padding, to improve performance.
  *
  * Available since NNAPI feature level 1.
  *
@@ -2705,6 +2717,146 @@
 int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution,
                                                          bool enable) __INTRODUCED_IN(31);
 
+/**
+ * Get the preferred buffer and memory alignment of an input to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned alignment value to guide the layout of the input buffer or memory
+ * pool. To achieve the best performance, make sure the address of the buffer passed in
+ * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in
+ * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the perferred alignment
+ * value of the same input. A driver may choose to allocate a separate buffer and do memory copying
+ * if the provided buffer or memory does not satisfy the preferred alignment.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the input argument we are referencing from the compilation. It is
+ *              an index into the inputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param alignment The returned preferred alignment. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred buffer and memory end padding of an input to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned padding value to guide the layout of the input buffer or memory
+ * pool. To achieve the best performance, make sure the length value passed in
+ * {@link ANeuralNetworksExecution_setInput} or
+ * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of
+ * the input (i.e. the size of an element multiplied by the number of elements) rounding up to
+ * a multiple of the perferred padding value of the same input. A driver may choose to allocate a
+ * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
+ * the preferred padding.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
+ * {@link ANeuralNetworksExecution_setInput}, and
+ * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing
+ * input buffer or memory padding to the driver.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the input argument we are referencing from the compilation. It is
+ *              an index into the inputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned preferred padding. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred buffer and memory alignment of an output to an execution created from a
+ * particular compilation.
+ *
+ * The user may use the returned alignment value to guide the layout of the output buffer or memory
+ * pool. To achieve the best performance, make sure the address of the buffer passed in
+ * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in
+ * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the perferred alignment
+ * value of the same output. A driver may choose to allocate a separate buffer and do memory copying
+ * if the provided buffer or memory does not satisfy the preferred alignment.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the output argument we are referencing from the compilation. It is
+ *              an index into the outputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned perferred alignment. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
+        __INTRODUCED_IN(31);
+
+/**
+ * Get the preferred memory end padding of an output to an execution created from a particular
+ * compilation.
+ *
+ * The user may use the returned padding value to guide the layout of the output buffer or memory
+ * pool. To achieve the best performance, make sure the length value passed in
+ * {@link ANeuralNetworksExecution_setOutput} or
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of
+ * the output (i.e. the size of an element multiplied by the number of elements) rounding up to
+ * a multiple of the perferred padding value of the same output. A driver may choose to allocate a
+ * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
+ * the preferred padding.
+ *
+ * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
+ * {@link ANeuralNetworksExecution_setOutput}, and
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing
+ * output buffer or memory padding to the driver.
+ *
+ * @param compilation The compilation object. It must already have been finished by calling
+ *                    {@link ANeuralNetworksCompilation_finish}.
+ * @param index The index of the output argument we are referencing from the compilation. It is
+ *              an index into the outputs list passed to
+ *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ *              the index associated with {@link ANeuralNetworksModel_addOperand}.
+ * @param padding The returned perferred padding. It will be a power of 2.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
+ *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
+ *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
+ *
+ * Available since NNAPI feature level 5.
+ */
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+        const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
+        __INTRODUCED_IN(31);
+
 __END_DECLS
 
 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H