blob: 6e0f8b00387b0dd513b116b29013ea681d39d855 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <gtest/gtest.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <xnnpack.h>
#include <xnnpack/params-init.h>
#include <xnnpack/params.h>
class VUnOpMicrokernelTester {
public:
enum class OpType {
Sigmoid,
};
enum class Variant {
Native,
Scalar,
};
inline VUnOpMicrokernelTester& batch_size(size_t batch_size) {
assert(batch_size != 0);
this->batch_size_ = batch_size;
return *this;
}
inline size_t batch_size() const {
return this->batch_size_;
}
inline VUnOpMicrokernelTester& inplace(bool inplace) {
this->inplace_ = inplace;
return *this;
}
inline bool inplace() const {
return this->inplace_;
}
inline VUnOpMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline VUnOpMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline VUnOpMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void Test(xnn_f32_vunary_ukernel_function vunary, OpType op_type, Variant variant = Variant::Native) const {
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto f32rng = std::bind(std::uniform_real_distribution<float>(-125.0f, 125.0f), rng);
std::vector<float> x(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
std::vector<double> y_ref(batch_size());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
if (inplace()) {
std::generate(y.begin(), y.end(), std::ref(f32rng));
} else {
std::generate(x.begin(), x.end(), std::ref(f32rng));
std::fill(y.begin(), y.end(), nanf(""));
}
const float* x_data = inplace() ? y.data() : x.data();
// Compute reference results.
for (size_t i = 0; i < batch_size(); i++) {
switch (op_type) {
case OpType::Sigmoid:
{
const double e = std::exp(double(x_data[i]));
y_ref[i] = e / (1.0 + e);
break;
}
}
}
const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
const float accumulated_range = accumulated_max - accumulated_min;
const float y_max = accumulated_range > 0.0f ?
(accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) :
+std::numeric_limits<float>::infinity();
const float y_min = accumulated_range > 0.0f ?
(accumulated_min + accumulated_range / 255.0f * float(qmin())) :
-std::numeric_limits<float>::infinity();
for (size_t i = 0; i < batch_size(); i++) {
y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min);
}
// Prepare output parameters.
xnn_f32_output_params output_params = { };
switch (variant) {
case Variant::Native:
output_params = xnn_init_f32_output_params(y_min, y_max);
break;
case Variant::Scalar:
output_params = xnn_init_scalar_f32_output_params(y_min, y_max);
break;
}
// Call optimized micro-kernel.
vunary(batch_size() * sizeof(float), x_data, y.data(), &output_params);
// Verify results.
for (size_t i = 0; i < batch_size(); i++) {
ASSERT_NEAR(y[i], y_ref[i], 5.0e-6)
<< "at " << i << " / " << batch_size() << ", x[" << i << "] = " << x[i];
}
}
}
private:
size_t batch_size_{1};
bool inplace_{false};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};