blob: 9c591fc7a7bcfc9545fb9d89733f794d09c17b75 [file] [log] [blame]
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/backends/Workload.hpp>
#include <arm_compute/core/Error.h>
#include <arm_compute/runtime/IFunction.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
namespace armnn
{
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor);
class NeonL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor>
{
public:
NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
// Replace input tensor handle with the given TensorHandle
void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
// Replace output tensor handle with the given TensorHandle
void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
std::unique_ptr<arm_compute::IFunction> m_Layer;
virtual void Reconfigure();
};
} //namespace armnn