blob: 468a34dd62d4aacc0d48a839d83e664ab206a206 [file] [log] [blame]
//
// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "InstanceNormalizationTestImpl.hpp"
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <armnn/ArmNN.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
#include <test/TensorHelpers.hpp>
namespace
{
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> InstanceNormTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorInfo& inputTensorInfo,
const armnn::TensorInfo& outputTensorInfo,
const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues,
armnn::InstanceNormalizationQueueDescriptor descriptor,
float qScale = 0.0f,
int32_t qOffset = 0)
{
boost::ignore_unused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
LayerTestResult<T, 4> result(outputTensorInfo);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
workload->Execute();
CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
return result;
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> InstanceNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
// BatchSize: 2
// Height: 2
// Width: 2
// Channels: 2
const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
std::vector<float> inputValues
{
// Batch 0, Height 0, Width 0 x Channel (2)
0.f, 1.f,
// Batch 0, Height 0, Width 1 x Channel (2)
0.f, 2.f,
// Batch 0, Height 1, Width 0 x Channel (2)
0.f, 2.f,
// Batch 0, Height 1, Width 1 x Channel (2)
0.f, 4.f,
// Batch 1, Height 0, Width 0 x Channel (2)
1.f, -1.f,
// Batch 1, Height 0, Width 1 x Channel (2)
-1.f, 2.f,
// Batch 1, Height 1, Width 0 x Channel (2)
-1.f, -2.f,
// Batch 1, Height 1, Width 1 x Channel (2)
1.f, 4.f
};
std::vector<float> expectedOutputValues
{
// Batch 0, Height 0, Width 0 x Channel (2)
0.f, -1.1470304f,
// Batch 0, Height 0, Width 1 x Channel (2)
0.f, -0.22940612f,
// Batch 0, Height 1, Width 0 x Channel (2)
0.f, -0.22940612f,
// Batch 0, Height 1, Width 1 x Channel (2)
0.f, 1.6058424f,
// Batch 1, Height 0, Width 0 x Channel (2)
0.99995005f, -0.7337929f,
// Batch 1, Height 0, Width 1 x Channel (2)
-0.99995005f, 0.52413774f,
// Batch 1, Height 1, Width 0 x Channel (2)
-0.99995005f, -1.1531031f,
// Batch 1, Height 1, Width 1 x Channel (2)
0.99995005f, 1.3627582f
};
if (dataLayout == armnn::DataLayout::NCHW)
{
PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
}
armnn::InstanceNormalizationQueueDescriptor descriptor;
descriptor.m_Parameters.m_Eps = 0.0001f;
descriptor.m_Parameters.m_Beta = 0.0f;
descriptor.m_Parameters.m_Gamma = 1.0f;
descriptor.m_Parameters.m_DataLayout = dataLayout;
return InstanceNormTestImpl<ArmnnType>(
workloadFactory,
memoryManager,
inputTensorInfo,
outputTensorInfo,
inputValues,
expectedOutputValues,
descriptor);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> InstanceNormTest2(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
// BatchSize: 2
// Height: 2
// Width: 2
// Channels: 2
const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
std::vector<float> inputValues
{
// Batch 0, Height 0, Width 0 x Channel (2)
0.f, 1.f,
// Batch 0, Height 0, Width 1 x Channel (2)
0.f, 2.f,
// Batch 0, Height 1, Width 0 x Channel (2)
0.f, 2.f,
// Batch 0, Height 1, Width 1 x Channel (2)
0.f, 4.f,
// Batch 1, Height 0, Width 0 x Channel (2)
1.f, -1.f,
// Batch 1, Height 0, Width 1 x Channel (2)
-1.f, 2.f,
// Batch 1, Height 1, Width 0 x Channel (2)
-1.f, -2.f,
// Batch 1, Height 1, Width 1 x Channel (2)
1.f, 4.f
};
std::vector<float> expectedOutputValues
{
// Batch 0, Height 0, Width 0 x Channel (2)
10.f, 7.7059393f,
// Batch 0, Height 0, Width 1 x Channel (2)
10.f, 9.541187f,
// Batch 0, Height 1, Width 0 x Channel (2)
10.f, 9.541187f,
// Batch 0, Height 1, Width 1 x Channel (2)
10.f, 13.211685f,
// Batch 1, Height 0, Width 0 x Channel (2)
11.9999f, 8.532414f,
// Batch 1, Height 0, Width 1 x Channel (2)
8.0001f, 11.048275f,
// Batch 1, Height 1, Width 0 x Channel (2)
8.0001f, 7.693794f,
// Batch 1, Height 1, Width 1 x Channel (2)
11.9999f, 12.725516f
};
if (dataLayout == armnn::DataLayout::NCHW)
{
PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
}
armnn::InstanceNormalizationQueueDescriptor descriptor;
descriptor.m_Parameters.m_Eps = 0.0001f;
descriptor.m_Parameters.m_Beta = 10.0f;
descriptor.m_Parameters.m_Gamma = 2.0f;
descriptor.m_Parameters.m_DataLayout = dataLayout;
return InstanceNormTestImpl<ArmnnType>(
workloadFactory,
memoryManager,
inputTensorInfo,
outputTensorInfo,
inputValues,
expectedOutputValues,
descriptor);
}
} // anonymous namespace
LayerTestResult<float, 4> InstanceNormFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> InstanceNormFloat32Test2(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout)
{
return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
}