| /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| ==============================================================================*/ |
| // automatically generated by the FlatBuffers compiler, do not modify |
| |
| |
| #ifndef FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_ |
| #define FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_ |
| |
| #include "flatbuffers/flatbuffers.h" |
| |
| namespace tflite { |
| |
| struct ComputeSettings; |
| struct ComputeSettingsT; |
| |
| struct NNAPISettings; |
| struct NNAPISettingsT; |
| |
| struct GPUSettings; |
| struct GPUSettingsT; |
| |
| struct HexagonSettings; |
| struct HexagonSettingsT; |
| |
| struct XNNPackSettings; |
| struct XNNPackSettingsT; |
| |
| struct EdgeTpuDeviceSpec; |
| struct EdgeTpuDeviceSpecT; |
| |
| struct EdgeTpuInactivePowerConfig; |
| struct EdgeTpuInactivePowerConfigT; |
| |
| struct EdgeTpuSettings; |
| struct EdgeTpuSettingsT; |
| |
| struct CoralSettings; |
| struct CoralSettingsT; |
| |
| struct CPUSettings; |
| struct CPUSettingsT; |
| |
| struct TFLiteSettings; |
| struct TFLiteSettingsT; |
| |
| struct FallbackSettings; |
| struct FallbackSettingsT; |
| |
| struct BenchmarkMetric; |
| struct BenchmarkMetricT; |
| |
| struct BenchmarkResult; |
| struct BenchmarkResultT; |
| |
| struct ErrorCode; |
| struct ErrorCodeT; |
| |
| struct BenchmarkError; |
| struct BenchmarkErrorT; |
| |
| struct BenchmarkEvent; |
| struct BenchmarkEventT; |
| |
| bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs); |
| bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs); |
| bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs); |
| bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs); |
| bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs); |
| bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs); |
| bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs); |
| bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs); |
| bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs); |
| bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs); |
| bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs); |
| bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs); |
| bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs); |
| bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs); |
| bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs); |
| bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs); |
| bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs); |
| bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs); |
| bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs); |
| bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs); |
| bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs); |
| bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs); |
| bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs); |
| bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs); |
| bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs); |
| bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs); |
| bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs); |
| bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs); |
| bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs); |
| bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs); |
| bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs); |
| bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs); |
| bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs); |
| bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs); |
| |
| enum ExecutionPreference { |
| ExecutionPreference_ANY = 0, |
| ExecutionPreference_LOW_LATENCY = 1, |
| ExecutionPreference_LOW_POWER = 2, |
| ExecutionPreference_FORCE_CPU = 3, |
| ExecutionPreference_MIN = ExecutionPreference_ANY, |
| ExecutionPreference_MAX = ExecutionPreference_FORCE_CPU |
| }; |
| |
| inline const ExecutionPreference (&EnumValuesExecutionPreference())[4] { |
| static const ExecutionPreference values[] = { |
| ExecutionPreference_ANY, |
| ExecutionPreference_LOW_LATENCY, |
| ExecutionPreference_LOW_POWER, |
| ExecutionPreference_FORCE_CPU |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesExecutionPreference() { |
| static const char * const names[5] = { |
| "ANY", |
| "LOW_LATENCY", |
| "LOW_POWER", |
| "FORCE_CPU", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameExecutionPreference(ExecutionPreference e) { |
| if (flatbuffers::IsOutRange(e, ExecutionPreference_ANY, ExecutionPreference_FORCE_CPU)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesExecutionPreference()[index]; |
| } |
| |
| enum Delegate { |
| Delegate_NONE = 0, |
| Delegate_NNAPI = 1, |
| Delegate_GPU = 2, |
| Delegate_HEXAGON = 3, |
| Delegate_XNNPACK = 4, |
| Delegate_EDGETPU = 5, |
| Delegate_EDGETPU_CORAL = 6, |
| Delegate_MIN = Delegate_NONE, |
| Delegate_MAX = Delegate_EDGETPU_CORAL |
| }; |
| |
| inline const Delegate (&EnumValuesDelegate())[7] { |
| static const Delegate values[] = { |
| Delegate_NONE, |
| Delegate_NNAPI, |
| Delegate_GPU, |
| Delegate_HEXAGON, |
| Delegate_XNNPACK, |
| Delegate_EDGETPU, |
| Delegate_EDGETPU_CORAL |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesDelegate() { |
| static const char * const names[8] = { |
| "NONE", |
| "NNAPI", |
| "GPU", |
| "HEXAGON", |
| "XNNPACK", |
| "EDGETPU", |
| "EDGETPU_CORAL", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameDelegate(Delegate e) { |
| if (flatbuffers::IsOutRange(e, Delegate_NONE, Delegate_EDGETPU_CORAL)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesDelegate()[index]; |
| } |
| |
| enum NNAPIExecutionPreference { |
| NNAPIExecutionPreference_UNDEFINED = 0, |
| NNAPIExecutionPreference_NNAPI_LOW_POWER = 1, |
| NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER = 2, |
| NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED = 3, |
| NNAPIExecutionPreference_MIN = NNAPIExecutionPreference_UNDEFINED, |
| NNAPIExecutionPreference_MAX = NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED |
| }; |
| |
| inline const NNAPIExecutionPreference (&EnumValuesNNAPIExecutionPreference())[4] { |
| static const NNAPIExecutionPreference values[] = { |
| NNAPIExecutionPreference_UNDEFINED, |
| NNAPIExecutionPreference_NNAPI_LOW_POWER, |
| NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER, |
| NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesNNAPIExecutionPreference() { |
| static const char * const names[5] = { |
| "UNDEFINED", |
| "NNAPI_LOW_POWER", |
| "NNAPI_FAST_SINGLE_ANSWER", |
| "NNAPI_SUSTAINED_SPEED", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameNNAPIExecutionPreference(NNAPIExecutionPreference e) { |
| if (flatbuffers::IsOutRange(e, NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesNNAPIExecutionPreference()[index]; |
| } |
| |
| enum NNAPIExecutionPriority { |
| NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED = 0, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_LOW = 1, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM = 2, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH = 3, |
| NNAPIExecutionPriority_MIN = NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, |
| NNAPIExecutionPriority_MAX = NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH |
| }; |
| |
| inline const NNAPIExecutionPriority (&EnumValuesNNAPIExecutionPriority())[4] { |
| static const NNAPIExecutionPriority values[] = { |
| NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_LOW, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM, |
| NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesNNAPIExecutionPriority() { |
| static const char * const names[5] = { |
| "NNAPI_PRIORITY_UNDEFINED", |
| "NNAPI_PRIORITY_LOW", |
| "NNAPI_PRIORITY_MEDIUM", |
| "NNAPI_PRIORITY_HIGH", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameNNAPIExecutionPriority(NNAPIExecutionPriority e) { |
| if (flatbuffers::IsOutRange(e, NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesNNAPIExecutionPriority()[index]; |
| } |
| |
| enum GPUBackend { |
| GPUBackend_UNSET = 0, |
| GPUBackend_OPENCL = 1, |
| GPUBackend_OPENGL = 2, |
| GPUBackend_MIN = GPUBackend_UNSET, |
| GPUBackend_MAX = GPUBackend_OPENGL |
| }; |
| |
| inline const GPUBackend (&EnumValuesGPUBackend())[3] { |
| static const GPUBackend values[] = { |
| GPUBackend_UNSET, |
| GPUBackend_OPENCL, |
| GPUBackend_OPENGL |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesGPUBackend() { |
| static const char * const names[4] = { |
| "UNSET", |
| "OPENCL", |
| "OPENGL", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameGPUBackend(GPUBackend e) { |
| if (flatbuffers::IsOutRange(e, GPUBackend_UNSET, GPUBackend_OPENGL)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesGPUBackend()[index]; |
| } |
| |
| namespace EdgeTpuDeviceSpec_ { |
| |
| enum PlatformType { |
| PlatformType_MMIO = 0, |
| PlatformType_REFERENCE = 1, |
| PlatformType_SIMULATOR = 2, |
| PlatformType_REMOTE_SIMULATOR = 3, |
| PlatformType_MIN = PlatformType_MMIO, |
| PlatformType_MAX = PlatformType_REMOTE_SIMULATOR |
| }; |
| |
| inline const PlatformType (&EnumValuesPlatformType())[4] { |
| static const PlatformType values[] = { |
| PlatformType_MMIO, |
| PlatformType_REFERENCE, |
| PlatformType_SIMULATOR, |
| PlatformType_REMOTE_SIMULATOR |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesPlatformType() { |
| static const char * const names[5] = { |
| "MMIO", |
| "REFERENCE", |
| "SIMULATOR", |
| "REMOTE_SIMULATOR", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNamePlatformType(PlatformType e) { |
| if (flatbuffers::IsOutRange(e, PlatformType_MMIO, PlatformType_REMOTE_SIMULATOR)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesPlatformType()[index]; |
| } |
| |
| } // namespace EdgeTpuDeviceSpec_ |
| |
| enum EdgeTpuPowerState { |
| EdgeTpuPowerState_UNDEFINED_POWERSTATE = 0, |
| EdgeTpuPowerState_TPU_CORE_OFF = 1, |
| EdgeTpuPowerState_READY = 2, |
| EdgeTpuPowerState_ACTIVE_MIN_POWER = 3, |
| EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER = 4, |
| EdgeTpuPowerState_ACTIVE_LOW_POWER = 5, |
| EdgeTpuPowerState_ACTIVE = 6, |
| EdgeTpuPowerState_OVER_DRIVE = 7, |
| EdgeTpuPowerState_MIN = EdgeTpuPowerState_UNDEFINED_POWERSTATE, |
| EdgeTpuPowerState_MAX = EdgeTpuPowerState_OVER_DRIVE |
| }; |
| |
| inline const EdgeTpuPowerState (&EnumValuesEdgeTpuPowerState())[8] { |
| static const EdgeTpuPowerState values[] = { |
| EdgeTpuPowerState_UNDEFINED_POWERSTATE, |
| EdgeTpuPowerState_TPU_CORE_OFF, |
| EdgeTpuPowerState_READY, |
| EdgeTpuPowerState_ACTIVE_MIN_POWER, |
| EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER, |
| EdgeTpuPowerState_ACTIVE_LOW_POWER, |
| EdgeTpuPowerState_ACTIVE, |
| EdgeTpuPowerState_OVER_DRIVE |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesEdgeTpuPowerState() { |
| static const char * const names[9] = { |
| "UNDEFINED_POWERSTATE", |
| "TPU_CORE_OFF", |
| "READY", |
| "ACTIVE_MIN_POWER", |
| "ACTIVE_VERY_LOW_POWER", |
| "ACTIVE_LOW_POWER", |
| "ACTIVE", |
| "OVER_DRIVE", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameEdgeTpuPowerState(EdgeTpuPowerState e) { |
| if (flatbuffers::IsOutRange(e, EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_OVER_DRIVE)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesEdgeTpuPowerState()[index]; |
| } |
| |
| namespace CoralSettings_ { |
| |
| enum Performance { |
| Performance_UNDEFINED = 0, |
| Performance_MAXIMUM = 1, |
| Performance_HIGH = 2, |
| Performance_MEDIUM = 3, |
| Performance_LOW = 4, |
| Performance_MIN = Performance_UNDEFINED, |
| Performance_MAX = Performance_LOW |
| }; |
| |
| inline const Performance (&EnumValuesPerformance())[5] { |
| static const Performance values[] = { |
| Performance_UNDEFINED, |
| Performance_MAXIMUM, |
| Performance_HIGH, |
| Performance_MEDIUM, |
| Performance_LOW |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesPerformance() { |
| static const char * const names[6] = { |
| "UNDEFINED", |
| "MAXIMUM", |
| "HIGH", |
| "MEDIUM", |
| "LOW", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNamePerformance(Performance e) { |
| if (flatbuffers::IsOutRange(e, Performance_UNDEFINED, Performance_LOW)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesPerformance()[index]; |
| } |
| |
| } // namespace CoralSettings_ |
| |
| enum BenchmarkEventType { |
| BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE = 0, |
| BenchmarkEventType_START = 1, |
| BenchmarkEventType_END = 2, |
| BenchmarkEventType_ERROR = 3, |
| BenchmarkEventType_LOGGED = 4, |
| BenchmarkEventType_MIN = BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, |
| BenchmarkEventType_MAX = BenchmarkEventType_LOGGED |
| }; |
| |
| inline const BenchmarkEventType (&EnumValuesBenchmarkEventType())[5] { |
| static const BenchmarkEventType values[] = { |
| BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, |
| BenchmarkEventType_START, |
| BenchmarkEventType_END, |
| BenchmarkEventType_ERROR, |
| BenchmarkEventType_LOGGED |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesBenchmarkEventType() { |
| static const char * const names[6] = { |
| "UNDEFINED_BENCHMARK_EVENT_TYPE", |
| "START", |
| "END", |
| "ERROR", |
| "LOGGED", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameBenchmarkEventType(BenchmarkEventType e) { |
| if (flatbuffers::IsOutRange(e, BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_LOGGED)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesBenchmarkEventType()[index]; |
| } |
| |
| enum BenchmarkStage { |
| BenchmarkStage_UNKNOWN = 0, |
| BenchmarkStage_INITIALIZATION = 1, |
| BenchmarkStage_INFERENCE = 2, |
| BenchmarkStage_MIN = BenchmarkStage_UNKNOWN, |
| BenchmarkStage_MAX = BenchmarkStage_INFERENCE |
| }; |
| |
| inline const BenchmarkStage (&EnumValuesBenchmarkStage())[3] { |
| static const BenchmarkStage values[] = { |
| BenchmarkStage_UNKNOWN, |
| BenchmarkStage_INITIALIZATION, |
| BenchmarkStage_INFERENCE |
| }; |
| return values; |
| } |
| |
| inline const char * const *EnumNamesBenchmarkStage() { |
| static const char * const names[4] = { |
| "UNKNOWN", |
| "INITIALIZATION", |
| "INFERENCE", |
| nullptr |
| }; |
| return names; |
| } |
| |
| inline const char *EnumNameBenchmarkStage(BenchmarkStage e) { |
| if (flatbuffers::IsOutRange(e, BenchmarkStage_UNKNOWN, BenchmarkStage_INFERENCE)) return ""; |
| const size_t index = static_cast<size_t>(e); |
| return EnumNamesBenchmarkStage()[index]; |
| } |
| |
| struct ComputeSettingsT : public flatbuffers::NativeTable { |
| typedef ComputeSettings TableType; |
| tflite::ExecutionPreference preference; |
| std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings; |
| std::string model_namespace_for_statistics; |
| std::string model_identifier_for_statistics; |
| ComputeSettingsT() |
| : preference(tflite::ExecutionPreference_ANY) { |
| } |
| }; |
| |
| struct ComputeSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef ComputeSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_PREFERENCE = 4, |
| VT_TFLITE_SETTINGS = 6, |
| VT_MODEL_NAMESPACE_FOR_STATISTICS = 8, |
| VT_MODEL_IDENTIFIER_FOR_STATISTICS = 10 |
| }; |
| tflite::ExecutionPreference preference() const { |
| return static_cast<tflite::ExecutionPreference>(GetField<int32_t>(VT_PREFERENCE, 0)); |
| } |
| const tflite::TFLiteSettings *tflite_settings() const { |
| return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS); |
| } |
| const flatbuffers::String *model_namespace_for_statistics() const { |
| return GetPointer<const flatbuffers::String *>(VT_MODEL_NAMESPACE_FOR_STATISTICS); |
| } |
| const flatbuffers::String *model_identifier_for_statistics() const { |
| return GetPointer<const flatbuffers::String *>(VT_MODEL_IDENTIFIER_FOR_STATISTICS); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_PREFERENCE) && |
| VerifyOffset(verifier, VT_TFLITE_SETTINGS) && |
| verifier.VerifyTable(tflite_settings()) && |
| VerifyOffset(verifier, VT_MODEL_NAMESPACE_FOR_STATISTICS) && |
| verifier.VerifyString(model_namespace_for_statistics()) && |
| VerifyOffset(verifier, VT_MODEL_IDENTIFIER_FOR_STATISTICS) && |
| verifier.VerifyString(model_identifier_for_statistics()) && |
| verifier.EndTable(); |
| } |
| ComputeSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<ComputeSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct ComputeSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_preference(tflite::ExecutionPreference preference) { |
| fbb_.AddElement<int32_t>(ComputeSettings::VT_PREFERENCE, static_cast<int32_t>(preference), 0); |
| } |
| void add_tflite_settings(flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) { |
| fbb_.AddOffset(ComputeSettings::VT_TFLITE_SETTINGS, tflite_settings); |
| } |
| void add_model_namespace_for_statistics(flatbuffers::Offset<flatbuffers::String> model_namespace_for_statistics) { |
| fbb_.AddOffset(ComputeSettings::VT_MODEL_NAMESPACE_FOR_STATISTICS, model_namespace_for_statistics); |
| } |
| void add_model_identifier_for_statistics(flatbuffers::Offset<flatbuffers::String> model_identifier_for_statistics) { |
| fbb_.AddOffset(ComputeSettings::VT_MODEL_IDENTIFIER_FOR_STATISTICS, model_identifier_for_statistics); |
| } |
| explicit ComputeSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| ComputeSettingsBuilder &operator=(const ComputeSettingsBuilder &); |
| flatbuffers::Offset<ComputeSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<ComputeSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<ComputeSettings> CreateComputeSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY, |
| flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0, |
| flatbuffers::Offset<flatbuffers::String> model_namespace_for_statistics = 0, |
| flatbuffers::Offset<flatbuffers::String> model_identifier_for_statistics = 0) { |
| ComputeSettingsBuilder builder_(_fbb); |
| builder_.add_model_identifier_for_statistics(model_identifier_for_statistics); |
| builder_.add_model_namespace_for_statistics(model_namespace_for_statistics); |
| builder_.add_tflite_settings(tflite_settings); |
| builder_.add_preference(preference); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<ComputeSettings> CreateComputeSettingsDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY, |
| flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0, |
| const char *model_namespace_for_statistics = nullptr, |
| const char *model_identifier_for_statistics = nullptr) { |
| auto model_namespace_for_statistics__ = model_namespace_for_statistics ? _fbb.CreateString(model_namespace_for_statistics) : 0; |
| auto model_identifier_for_statistics__ = model_identifier_for_statistics ? _fbb.CreateString(model_identifier_for_statistics) : 0; |
| return tflite::CreateComputeSettings( |
| _fbb, |
| preference, |
| tflite_settings, |
| model_namespace_for_statistics__, |
| model_identifier_for_statistics__); |
| } |
| |
| flatbuffers::Offset<ComputeSettings> CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct NNAPISettingsT : public flatbuffers::NativeTable { |
| typedef NNAPISettings TableType; |
| std::string accelerator_name; |
| std::string cache_directory; |
| std::string model_token; |
| tflite::NNAPIExecutionPreference execution_preference; |
| int32_t no_of_nnapi_instances_to_cache; |
| std::unique_ptr<tflite::FallbackSettingsT> fallback_settings; |
| bool allow_nnapi_cpu_on_android_10_plus; |
| tflite::NNAPIExecutionPriority execution_priority; |
| bool allow_dynamic_dimensions; |
| bool allow_fp16_precision_for_fp32; |
| NNAPISettingsT() |
| : execution_preference(tflite::NNAPIExecutionPreference_UNDEFINED), |
| no_of_nnapi_instances_to_cache(0), |
| allow_nnapi_cpu_on_android_10_plus(false), |
| execution_priority(tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED), |
| allow_dynamic_dimensions(false), |
| allow_fp16_precision_for_fp32(false) { |
| } |
| }; |
| |
| struct NNAPISettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef NNAPISettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_ACCELERATOR_NAME = 4, |
| VT_CACHE_DIRECTORY = 6, |
| VT_MODEL_TOKEN = 8, |
| VT_EXECUTION_PREFERENCE = 10, |
| VT_NO_OF_NNAPI_INSTANCES_TO_CACHE = 12, |
| VT_FALLBACK_SETTINGS = 14, |
| VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS = 16, |
| VT_EXECUTION_PRIORITY = 18, |
| VT_ALLOW_DYNAMIC_DIMENSIONS = 20, |
| VT_ALLOW_FP16_PRECISION_FOR_FP32 = 22 |
| }; |
| const flatbuffers::String *accelerator_name() const { |
| return GetPointer<const flatbuffers::String *>(VT_ACCELERATOR_NAME); |
| } |
| const flatbuffers::String *cache_directory() const { |
| return GetPointer<const flatbuffers::String *>(VT_CACHE_DIRECTORY); |
| } |
| const flatbuffers::String *model_token() const { |
| return GetPointer<const flatbuffers::String *>(VT_MODEL_TOKEN); |
| } |
| tflite::NNAPIExecutionPreference execution_preference() const { |
| return static_cast<tflite::NNAPIExecutionPreference>(GetField<int32_t>(VT_EXECUTION_PREFERENCE, 0)); |
| } |
| int32_t no_of_nnapi_instances_to_cache() const { |
| return GetField<int32_t>(VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 0); |
| } |
| const tflite::FallbackSettings *fallback_settings() const { |
| return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS); |
| } |
| bool allow_nnapi_cpu_on_android_10_plus() const { |
| return GetField<uint8_t>(VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 0) != 0; |
| } |
| tflite::NNAPIExecutionPriority execution_priority() const { |
| return static_cast<tflite::NNAPIExecutionPriority>(GetField<int32_t>(VT_EXECUTION_PRIORITY, 0)); |
| } |
| bool allow_dynamic_dimensions() const { |
| return GetField<uint8_t>(VT_ALLOW_DYNAMIC_DIMENSIONS, 0) != 0; |
| } |
| bool allow_fp16_precision_for_fp32() const { |
| return GetField<uint8_t>(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0; |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyOffset(verifier, VT_ACCELERATOR_NAME) && |
| verifier.VerifyString(accelerator_name()) && |
| VerifyOffset(verifier, VT_CACHE_DIRECTORY) && |
| verifier.VerifyString(cache_directory()) && |
| VerifyOffset(verifier, VT_MODEL_TOKEN) && |
| verifier.VerifyString(model_token()) && |
| VerifyField<int32_t>(verifier, VT_EXECUTION_PREFERENCE) && |
| VerifyField<int32_t>(verifier, VT_NO_OF_NNAPI_INSTANCES_TO_CACHE) && |
| VerifyOffset(verifier, VT_FALLBACK_SETTINGS) && |
| verifier.VerifyTable(fallback_settings()) && |
| VerifyField<uint8_t>(verifier, VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS) && |
| VerifyField<int32_t>(verifier, VT_EXECUTION_PRIORITY) && |
| VerifyField<uint8_t>(verifier, VT_ALLOW_DYNAMIC_DIMENSIONS) && |
| VerifyField<uint8_t>(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32) && |
| verifier.EndTable(); |
| } |
| NNAPISettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<NNAPISettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct NNAPISettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_accelerator_name(flatbuffers::Offset<flatbuffers::String> accelerator_name) { |
| fbb_.AddOffset(NNAPISettings::VT_ACCELERATOR_NAME, accelerator_name); |
| } |
| void add_cache_directory(flatbuffers::Offset<flatbuffers::String> cache_directory) { |
| fbb_.AddOffset(NNAPISettings::VT_CACHE_DIRECTORY, cache_directory); |
| } |
| void add_model_token(flatbuffers::Offset<flatbuffers::String> model_token) { |
| fbb_.AddOffset(NNAPISettings::VT_MODEL_TOKEN, model_token); |
| } |
| void add_execution_preference(tflite::NNAPIExecutionPreference execution_preference) { |
| fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PREFERENCE, static_cast<int32_t>(execution_preference), 0); |
| } |
| void add_no_of_nnapi_instances_to_cache(int32_t no_of_nnapi_instances_to_cache) { |
| fbb_.AddElement<int32_t>(NNAPISettings::VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, no_of_nnapi_instances_to_cache, 0); |
| } |
| void add_fallback_settings(flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) { |
| fbb_.AddOffset(NNAPISettings::VT_FALLBACK_SETTINGS, fallback_settings); |
| } |
| void add_allow_nnapi_cpu_on_android_10_plus(bool allow_nnapi_cpu_on_android_10_plus) { |
| fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, static_cast<uint8_t>(allow_nnapi_cpu_on_android_10_plus), 0); |
| } |
| void add_execution_priority(tflite::NNAPIExecutionPriority execution_priority) { |
| fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PRIORITY, static_cast<int32_t>(execution_priority), 0); |
| } |
| void add_allow_dynamic_dimensions(bool allow_dynamic_dimensions) { |
| fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_DYNAMIC_DIMENSIONS, static_cast<uint8_t>(allow_dynamic_dimensions), 0); |
| } |
| void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) { |
| fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast<uint8_t>(allow_fp16_precision_for_fp32), 0); |
| } |
| explicit NNAPISettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| NNAPISettingsBuilder &operator=(const NNAPISettingsBuilder &); |
| flatbuffers::Offset<NNAPISettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<NNAPISettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| flatbuffers::Offset<flatbuffers::String> accelerator_name = 0, |
| flatbuffers::Offset<flatbuffers::String> cache_directory = 0, |
| flatbuffers::Offset<flatbuffers::String> model_token = 0, |
| tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED, |
| int32_t no_of_nnapi_instances_to_cache = 0, |
| flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0, |
| bool allow_nnapi_cpu_on_android_10_plus = false, |
| tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, |
| bool allow_dynamic_dimensions = false, |
| bool allow_fp16_precision_for_fp32 = false) { |
| NNAPISettingsBuilder builder_(_fbb); |
| builder_.add_execution_priority(execution_priority); |
| builder_.add_fallback_settings(fallback_settings); |
| builder_.add_no_of_nnapi_instances_to_cache(no_of_nnapi_instances_to_cache); |
| builder_.add_execution_preference(execution_preference); |
| builder_.add_model_token(model_token); |
| builder_.add_cache_directory(cache_directory); |
| builder_.add_accelerator_name(accelerator_name); |
| builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32); |
| builder_.add_allow_dynamic_dimensions(allow_dynamic_dimensions); |
| builder_.add_allow_nnapi_cpu_on_android_10_plus(allow_nnapi_cpu_on_android_10_plus); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettingsDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| const char *accelerator_name = nullptr, |
| const char *cache_directory = nullptr, |
| const char *model_token = nullptr, |
| tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED, |
| int32_t no_of_nnapi_instances_to_cache = 0, |
| flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0, |
| bool allow_nnapi_cpu_on_android_10_plus = false, |
| tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, |
| bool allow_dynamic_dimensions = false, |
| bool allow_fp16_precision_for_fp32 = false) { |
| auto accelerator_name__ = accelerator_name ? _fbb.CreateString(accelerator_name) : 0; |
| auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0; |
| auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0; |
| return tflite::CreateNNAPISettings( |
| _fbb, |
| accelerator_name__, |
| cache_directory__, |
| model_token__, |
| execution_preference, |
| no_of_nnapi_instances_to_cache, |
| fallback_settings, |
| allow_nnapi_cpu_on_android_10_plus, |
| execution_priority, |
| allow_dynamic_dimensions, |
| allow_fp16_precision_for_fp32); |
| } |
| |
| flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct GPUSettingsT : public flatbuffers::NativeTable { |
| typedef GPUSettings TableType; |
| bool is_precision_loss_allowed; |
| bool enable_quantized_inference; |
| tflite::GPUBackend force_backend; |
| GPUSettingsT() |
| : is_precision_loss_allowed(false), |
| enable_quantized_inference(true), |
| force_backend(tflite::GPUBackend_UNSET) { |
| } |
| }; |
| |
| struct GPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef GPUSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_IS_PRECISION_LOSS_ALLOWED = 4, |
| VT_ENABLE_QUANTIZED_INFERENCE = 6, |
| VT_FORCE_BACKEND = 8 |
| }; |
| bool is_precision_loss_allowed() const { |
| return GetField<uint8_t>(VT_IS_PRECISION_LOSS_ALLOWED, 0) != 0; |
| } |
| bool enable_quantized_inference() const { |
| return GetField<uint8_t>(VT_ENABLE_QUANTIZED_INFERENCE, 1) != 0; |
| } |
| tflite::GPUBackend force_backend() const { |
| return static_cast<tflite::GPUBackend>(GetField<int32_t>(VT_FORCE_BACKEND, 0)); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<uint8_t>(verifier, VT_IS_PRECISION_LOSS_ALLOWED) && |
| VerifyField<uint8_t>(verifier, VT_ENABLE_QUANTIZED_INFERENCE) && |
| VerifyField<int32_t>(verifier, VT_FORCE_BACKEND) && |
| verifier.EndTable(); |
| } |
| GPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<GPUSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct GPUSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_is_precision_loss_allowed(bool is_precision_loss_allowed) { |
| fbb_.AddElement<uint8_t>(GPUSettings::VT_IS_PRECISION_LOSS_ALLOWED, static_cast<uint8_t>(is_precision_loss_allowed), 0); |
| } |
| void add_enable_quantized_inference(bool enable_quantized_inference) { |
| fbb_.AddElement<uint8_t>(GPUSettings::VT_ENABLE_QUANTIZED_INFERENCE, static_cast<uint8_t>(enable_quantized_inference), 1); |
| } |
| void add_force_backend(tflite::GPUBackend force_backend) { |
| fbb_.AddElement<int32_t>(GPUSettings::VT_FORCE_BACKEND, static_cast<int32_t>(force_backend), 0); |
| } |
| explicit GPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| GPUSettingsBuilder &operator=(const GPUSettingsBuilder &); |
| flatbuffers::Offset<GPUSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<GPUSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<GPUSettings> CreateGPUSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| bool is_precision_loss_allowed = false, |
| bool enable_quantized_inference = true, |
| tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET) { |
| GPUSettingsBuilder builder_(_fbb); |
| builder_.add_force_backend(force_backend); |
| builder_.add_enable_quantized_inference(enable_quantized_inference); |
| builder_.add_is_precision_loss_allowed(is_precision_loss_allowed); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<GPUSettings> CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct HexagonSettingsT : public flatbuffers::NativeTable { |
| typedef HexagonSettings TableType; |
| int32_t debug_level; |
| int32_t powersave_level; |
| bool print_graph_profile; |
| bool print_graph_debug; |
| HexagonSettingsT() |
| : debug_level(0), |
| powersave_level(0), |
| print_graph_profile(false), |
| print_graph_debug(false) { |
| } |
| }; |
| |
| struct HexagonSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef HexagonSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_DEBUG_LEVEL = 4, |
| VT_POWERSAVE_LEVEL = 6, |
| VT_PRINT_GRAPH_PROFILE = 8, |
| VT_PRINT_GRAPH_DEBUG = 10 |
| }; |
| int32_t debug_level() const { |
| return GetField<int32_t>(VT_DEBUG_LEVEL, 0); |
| } |
| int32_t powersave_level() const { |
| return GetField<int32_t>(VT_POWERSAVE_LEVEL, 0); |
| } |
| bool print_graph_profile() const { |
| return GetField<uint8_t>(VT_PRINT_GRAPH_PROFILE, 0) != 0; |
| } |
| bool print_graph_debug() const { |
| return GetField<uint8_t>(VT_PRINT_GRAPH_DEBUG, 0) != 0; |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_DEBUG_LEVEL) && |
| VerifyField<int32_t>(verifier, VT_POWERSAVE_LEVEL) && |
| VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_PROFILE) && |
| VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_DEBUG) && |
| verifier.EndTable(); |
| } |
| HexagonSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<HexagonSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct HexagonSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_debug_level(int32_t debug_level) { |
| fbb_.AddElement<int32_t>(HexagonSettings::VT_DEBUG_LEVEL, debug_level, 0); |
| } |
| void add_powersave_level(int32_t powersave_level) { |
| fbb_.AddElement<int32_t>(HexagonSettings::VT_POWERSAVE_LEVEL, powersave_level, 0); |
| } |
| void add_print_graph_profile(bool print_graph_profile) { |
| fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_PROFILE, static_cast<uint8_t>(print_graph_profile), 0); |
| } |
| void add_print_graph_debug(bool print_graph_debug) { |
| fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_DEBUG, static_cast<uint8_t>(print_graph_debug), 0); |
| } |
| explicit HexagonSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| HexagonSettingsBuilder &operator=(const HexagonSettingsBuilder &); |
| flatbuffers::Offset<HexagonSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<HexagonSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<HexagonSettings> CreateHexagonSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| int32_t debug_level = 0, |
| int32_t powersave_level = 0, |
| bool print_graph_profile = false, |
| bool print_graph_debug = false) { |
| HexagonSettingsBuilder builder_(_fbb); |
| builder_.add_powersave_level(powersave_level); |
| builder_.add_debug_level(debug_level); |
| builder_.add_print_graph_debug(print_graph_debug); |
| builder_.add_print_graph_profile(print_graph_profile); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct XNNPackSettingsT : public flatbuffers::NativeTable { |
| typedef XNNPackSettings TableType; |
| int32_t num_threads; |
| XNNPackSettingsT() |
| : num_threads(0) { |
| } |
| }; |
| |
| struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef XNNPackSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_NUM_THREADS = 4 |
| }; |
| int32_t num_threads() const { |
| return GetField<int32_t>(VT_NUM_THREADS, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_NUM_THREADS) && |
| verifier.EndTable(); |
| } |
| XNNPackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<XNNPackSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct XNNPackSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_num_threads(int32_t num_threads) { |
| fbb_.AddElement<int32_t>(XNNPackSettings::VT_NUM_THREADS, num_threads, 0); |
| } |
| explicit XNNPackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| XNNPackSettingsBuilder &operator=(const XNNPackSettingsBuilder &); |
| flatbuffers::Offset<XNNPackSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<XNNPackSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| int32_t num_threads = 0) { |
| XNNPackSettingsBuilder builder_(_fbb); |
| builder_.add_num_threads(num_threads); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct EdgeTpuDeviceSpecT : public flatbuffers::NativeTable { |
| typedef EdgeTpuDeviceSpec TableType; |
| tflite::EdgeTpuDeviceSpec_::PlatformType platform_type; |
| int32_t num_chips; |
| std::vector<std::string> device_paths; |
| int32_t chip_family; |
| EdgeTpuDeviceSpecT() |
| : platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO), |
| num_chips(0), |
| chip_family(0) { |
| } |
| }; |
| |
| struct EdgeTpuDeviceSpec FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef EdgeTpuDeviceSpecT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_PLATFORM_TYPE = 4, |
| VT_NUM_CHIPS = 6, |
| VT_DEVICE_PATHS = 8, |
| VT_CHIP_FAMILY = 10 |
| }; |
| tflite::EdgeTpuDeviceSpec_::PlatformType platform_type() const { |
| return static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(GetField<int32_t>(VT_PLATFORM_TYPE, 0)); |
| } |
| int32_t num_chips() const { |
| return GetField<int32_t>(VT_NUM_CHIPS, 0); |
| } |
| const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *device_paths() const { |
| return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DEVICE_PATHS); |
| } |
| int32_t chip_family() const { |
| return GetField<int32_t>(VT_CHIP_FAMILY, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_PLATFORM_TYPE) && |
| VerifyField<int32_t>(verifier, VT_NUM_CHIPS) && |
| VerifyOffset(verifier, VT_DEVICE_PATHS) && |
| verifier.VerifyVector(device_paths()) && |
| verifier.VerifyVectorOfStrings(device_paths()) && |
| VerifyField<int32_t>(verifier, VT_CHIP_FAMILY) && |
| verifier.EndTable(); |
| } |
| EdgeTpuDeviceSpecT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<EdgeTpuDeviceSpec> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct EdgeTpuDeviceSpecBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType platform_type) { |
| fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_PLATFORM_TYPE, static_cast<int32_t>(platform_type), 0); |
| } |
| void add_num_chips(int32_t num_chips) { |
| fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_NUM_CHIPS, num_chips, 0); |
| } |
| void add_device_paths(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> device_paths) { |
| fbb_.AddOffset(EdgeTpuDeviceSpec::VT_DEVICE_PATHS, device_paths); |
| } |
| void add_chip_family(int32_t chip_family) { |
| fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_CHIP_FAMILY, chip_family, 0); |
| } |
| explicit EdgeTpuDeviceSpecBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| EdgeTpuDeviceSpecBuilder &operator=(const EdgeTpuDeviceSpecBuilder &); |
| flatbuffers::Offset<EdgeTpuDeviceSpec> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<EdgeTpuDeviceSpec>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO, |
| int32_t num_chips = 0, |
| flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> device_paths = 0, |
| int32_t chip_family = 0) { |
| EdgeTpuDeviceSpecBuilder builder_(_fbb); |
| builder_.add_chip_family(chip_family); |
| builder_.add_device_paths(device_paths); |
| builder_.add_num_chips(num_chips); |
| builder_.add_platform_type(platform_type); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpecDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO, |
| int32_t num_chips = 0, |
| const std::vector<flatbuffers::Offset<flatbuffers::String>> *device_paths = nullptr, |
| int32_t chip_family = 0) { |
| auto device_paths__ = device_paths ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*device_paths) : 0; |
| return tflite::CreateEdgeTpuDeviceSpec( |
| _fbb, |
| platform_type, |
| num_chips, |
| device_paths__, |
| chip_family); |
| } |
| |
| flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct EdgeTpuInactivePowerConfigT : public flatbuffers::NativeTable { |
| typedef EdgeTpuInactivePowerConfig TableType; |
| tflite::EdgeTpuPowerState inactive_power_state; |
| int64_t inactive_timeout_us; |
| EdgeTpuInactivePowerConfigT() |
| : inactive_power_state(tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE), |
| inactive_timeout_us(0) { |
| } |
| }; |
| |
| struct EdgeTpuInactivePowerConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef EdgeTpuInactivePowerConfigT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_INACTIVE_POWER_STATE = 4, |
| VT_INACTIVE_TIMEOUT_US = 6 |
| }; |
| tflite::EdgeTpuPowerState inactive_power_state() const { |
| return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INACTIVE_POWER_STATE, 0)); |
| } |
| int64_t inactive_timeout_us() const { |
| return GetField<int64_t>(VT_INACTIVE_TIMEOUT_US, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_INACTIVE_POWER_STATE) && |
| VerifyField<int64_t>(verifier, VT_INACTIVE_TIMEOUT_US) && |
| verifier.EndTable(); |
| } |
| EdgeTpuInactivePowerConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<EdgeTpuInactivePowerConfig> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct EdgeTpuInactivePowerConfigBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_inactive_power_state(tflite::EdgeTpuPowerState inactive_power_state) { |
| fbb_.AddElement<int32_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_POWER_STATE, static_cast<int32_t>(inactive_power_state), 0); |
| } |
| void add_inactive_timeout_us(int64_t inactive_timeout_us) { |
| fbb_.AddElement<int64_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_TIMEOUT_US, inactive_timeout_us, 0); |
| } |
| explicit EdgeTpuInactivePowerConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| EdgeTpuInactivePowerConfigBuilder &operator=(const EdgeTpuInactivePowerConfigBuilder &); |
| flatbuffers::Offset<EdgeTpuInactivePowerConfig> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<EdgeTpuInactivePowerConfig>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, |
| int64_t inactive_timeout_us = 0) { |
| EdgeTpuInactivePowerConfigBuilder builder_(_fbb); |
| builder_.add_inactive_timeout_us(inactive_timeout_us); |
| builder_.add_inactive_power_state(inactive_power_state); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct EdgeTpuSettingsT : public flatbuffers::NativeTable { |
| typedef EdgeTpuSettings TableType; |
| tflite::EdgeTpuPowerState inference_power_state; |
| std::vector<std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>> inactive_power_configs; |
| int32_t inference_priority; |
| std::unique_ptr<tflite::EdgeTpuDeviceSpecT> edgetpu_device_spec; |
| EdgeTpuSettingsT() |
| : inference_power_state(tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE), |
| inference_priority(-1) { |
| } |
| }; |
| |
| struct EdgeTpuSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef EdgeTpuSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_INFERENCE_POWER_STATE = 4, |
| VT_INACTIVE_POWER_CONFIGS = 6, |
| VT_INFERENCE_PRIORITY = 8, |
| VT_EDGETPU_DEVICE_SPEC = 10 |
| }; |
| tflite::EdgeTpuPowerState inference_power_state() const { |
| return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INFERENCE_POWER_STATE, 0)); |
| } |
| const flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs() const { |
| return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *>(VT_INACTIVE_POWER_CONFIGS); |
| } |
| int32_t inference_priority() const { |
| return GetField<int32_t>(VT_INFERENCE_PRIORITY, -1); |
| } |
| const tflite::EdgeTpuDeviceSpec *edgetpu_device_spec() const { |
| return GetPointer<const tflite::EdgeTpuDeviceSpec *>(VT_EDGETPU_DEVICE_SPEC); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_INFERENCE_POWER_STATE) && |
| VerifyOffset(verifier, VT_INACTIVE_POWER_CONFIGS) && |
| verifier.VerifyVector(inactive_power_configs()) && |
| verifier.VerifyVectorOfTables(inactive_power_configs()) && |
| VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY) && |
| VerifyOffset(verifier, VT_EDGETPU_DEVICE_SPEC) && |
| verifier.VerifyTable(edgetpu_device_spec()) && |
| verifier.EndTable(); |
| } |
| EdgeTpuSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<EdgeTpuSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct EdgeTpuSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_inference_power_state(tflite::EdgeTpuPowerState inference_power_state) { |
| fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_POWER_STATE, static_cast<int32_t>(inference_power_state), 0); |
| } |
| void add_inactive_power_configs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs) { |
| fbb_.AddOffset(EdgeTpuSettings::VT_INACTIVE_POWER_CONFIGS, inactive_power_configs); |
| } |
| void add_inference_priority(int32_t inference_priority) { |
| fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_PRIORITY, inference_priority, -1); |
| } |
| void add_edgetpu_device_spec(flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec) { |
| fbb_.AddOffset(EdgeTpuSettings::VT_EDGETPU_DEVICE_SPEC, edgetpu_device_spec); |
| } |
| explicit EdgeTpuSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| EdgeTpuSettingsBuilder &operator=(const EdgeTpuSettingsBuilder &); |
| flatbuffers::Offset<EdgeTpuSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<EdgeTpuSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, |
| flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs = 0, |
| int32_t inference_priority = -1, |
| flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0) { |
| EdgeTpuSettingsBuilder builder_(_fbb); |
| builder_.add_edgetpu_device_spec(edgetpu_device_spec); |
| builder_.add_inference_priority(inference_priority); |
| builder_.add_inactive_power_configs(inactive_power_configs); |
| builder_.add_inference_power_state(inference_power_state); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettingsDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, |
| const std::vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs = nullptr, |
| int32_t inference_priority = -1, |
| flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0) { |
| auto inactive_power_configs__ = inactive_power_configs ? _fbb.CreateVector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>(*inactive_power_configs) : 0; |
| return tflite::CreateEdgeTpuSettings( |
| _fbb, |
| inference_power_state, |
| inactive_power_configs__, |
| inference_priority, |
| edgetpu_device_spec); |
| } |
| |
| flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct CoralSettingsT : public flatbuffers::NativeTable { |
| typedef CoralSettings TableType; |
| std::string device; |
| tflite::CoralSettings_::Performance performance; |
| bool usb_always_dfu; |
| int32_t usb_max_bulk_in_queue_length; |
| CoralSettingsT() |
| : performance(tflite::CoralSettings_::Performance_UNDEFINED), |
| usb_always_dfu(false), |
| usb_max_bulk_in_queue_length(0) { |
| } |
| }; |
| |
| struct CoralSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef CoralSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_DEVICE = 4, |
| VT_PERFORMANCE = 6, |
| VT_USB_ALWAYS_DFU = 8, |
| VT_USB_MAX_BULK_IN_QUEUE_LENGTH = 10 |
| }; |
| const flatbuffers::String *device() const { |
| return GetPointer<const flatbuffers::String *>(VT_DEVICE); |
| } |
| tflite::CoralSettings_::Performance performance() const { |
| return static_cast<tflite::CoralSettings_::Performance>(GetField<int32_t>(VT_PERFORMANCE, 0)); |
| } |
| bool usb_always_dfu() const { |
| return GetField<uint8_t>(VT_USB_ALWAYS_DFU, 0) != 0; |
| } |
| int32_t usb_max_bulk_in_queue_length() const { |
| return GetField<int32_t>(VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyOffset(verifier, VT_DEVICE) && |
| verifier.VerifyString(device()) && |
| VerifyField<int32_t>(verifier, VT_PERFORMANCE) && |
| VerifyField<uint8_t>(verifier, VT_USB_ALWAYS_DFU) && |
| VerifyField<int32_t>(verifier, VT_USB_MAX_BULK_IN_QUEUE_LENGTH) && |
| verifier.EndTable(); |
| } |
| CoralSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<CoralSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct CoralSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_device(flatbuffers::Offset<flatbuffers::String> device) { |
| fbb_.AddOffset(CoralSettings::VT_DEVICE, device); |
| } |
| void add_performance(tflite::CoralSettings_::Performance performance) { |
| fbb_.AddElement<int32_t>(CoralSettings::VT_PERFORMANCE, static_cast<int32_t>(performance), 0); |
| } |
| void add_usb_always_dfu(bool usb_always_dfu) { |
| fbb_.AddElement<uint8_t>(CoralSettings::VT_USB_ALWAYS_DFU, static_cast<uint8_t>(usb_always_dfu), 0); |
| } |
| void add_usb_max_bulk_in_queue_length(int32_t usb_max_bulk_in_queue_length) { |
| fbb_.AddElement<int32_t>(CoralSettings::VT_USB_MAX_BULK_IN_QUEUE_LENGTH, usb_max_bulk_in_queue_length, 0); |
| } |
| explicit CoralSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| CoralSettingsBuilder &operator=(const CoralSettingsBuilder &); |
| flatbuffers::Offset<CoralSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<CoralSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<CoralSettings> CreateCoralSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| flatbuffers::Offset<flatbuffers::String> device = 0, |
| tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED, |
| bool usb_always_dfu = false, |
| int32_t usb_max_bulk_in_queue_length = 0) { |
| CoralSettingsBuilder builder_(_fbb); |
| builder_.add_usb_max_bulk_in_queue_length(usb_max_bulk_in_queue_length); |
| builder_.add_performance(performance); |
| builder_.add_device(device); |
| builder_.add_usb_always_dfu(usb_always_dfu); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<CoralSettings> CreateCoralSettingsDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| const char *device = nullptr, |
| tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED, |
| bool usb_always_dfu = false, |
| int32_t usb_max_bulk_in_queue_length = 0) { |
| auto device__ = device ? _fbb.CreateString(device) : 0; |
| return tflite::CreateCoralSettings( |
| _fbb, |
| device__, |
| performance, |
| usb_always_dfu, |
| usb_max_bulk_in_queue_length); |
| } |
| |
| flatbuffers::Offset<CoralSettings> CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct CPUSettingsT : public flatbuffers::NativeTable { |
| typedef CPUSettings TableType; |
| int32_t num_threads; |
| CPUSettingsT() |
| : num_threads(0) { |
| } |
| }; |
| |
| struct CPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef CPUSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_NUM_THREADS = 4 |
| }; |
| int32_t num_threads() const { |
| return GetField<int32_t>(VT_NUM_THREADS, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_NUM_THREADS) && |
| verifier.EndTable(); |
| } |
| CPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<CPUSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct CPUSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_num_threads(int32_t num_threads) { |
| fbb_.AddElement<int32_t>(CPUSettings::VT_NUM_THREADS, num_threads, 0); |
| } |
| explicit CPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| CPUSettingsBuilder &operator=(const CPUSettingsBuilder &); |
| flatbuffers::Offset<CPUSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<CPUSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<CPUSettings> CreateCPUSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| int32_t num_threads = 0) { |
| CPUSettingsBuilder builder_(_fbb); |
| builder_.add_num_threads(num_threads); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<CPUSettings> CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct TFLiteSettingsT : public flatbuffers::NativeTable { |
| typedef TFLiteSettings TableType; |
| tflite::Delegate delegate; |
| std::unique_ptr<tflite::NNAPISettingsT> nnapi_settings; |
| std::unique_ptr<tflite::GPUSettingsT> gpu_settings; |
| std::unique_ptr<tflite::HexagonSettingsT> hexagon_settings; |
| std::unique_ptr<tflite::XNNPackSettingsT> xnnpack_settings; |
| std::unique_ptr<tflite::CPUSettingsT> cpu_settings; |
| int32_t max_delegated_partitions; |
| std::unique_ptr<tflite::EdgeTpuSettingsT> edgetpu_settings; |
| std::unique_ptr<tflite::CoralSettingsT> coral_settings; |
| std::unique_ptr<tflite::FallbackSettingsT> fallback_settings; |
| TFLiteSettingsT() |
| : delegate(tflite::Delegate_NONE), |
| max_delegated_partitions(0) { |
| } |
| }; |
| |
| struct TFLiteSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef TFLiteSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_DELEGATE = 4, |
| VT_NNAPI_SETTINGS = 6, |
| VT_GPU_SETTINGS = 8, |
| VT_HEXAGON_SETTINGS = 10, |
| VT_XNNPACK_SETTINGS = 12, |
| VT_CPU_SETTINGS = 14, |
| VT_MAX_DELEGATED_PARTITIONS = 16, |
| VT_EDGETPU_SETTINGS = 18, |
| VT_CORAL_SETTINGS = 20, |
| VT_FALLBACK_SETTINGS = 22 |
| }; |
| tflite::Delegate delegate() const { |
| return static_cast<tflite::Delegate>(GetField<int32_t>(VT_DELEGATE, 0)); |
| } |
| const tflite::NNAPISettings *nnapi_settings() const { |
| return GetPointer<const tflite::NNAPISettings *>(VT_NNAPI_SETTINGS); |
| } |
| const tflite::GPUSettings *gpu_settings() const { |
| return GetPointer<const tflite::GPUSettings *>(VT_GPU_SETTINGS); |
| } |
| const tflite::HexagonSettings *hexagon_settings() const { |
| return GetPointer<const tflite::HexagonSettings *>(VT_HEXAGON_SETTINGS); |
| } |
| const tflite::XNNPackSettings *xnnpack_settings() const { |
| return GetPointer<const tflite::XNNPackSettings *>(VT_XNNPACK_SETTINGS); |
| } |
| const tflite::CPUSettings *cpu_settings() const { |
| return GetPointer<const tflite::CPUSettings *>(VT_CPU_SETTINGS); |
| } |
| int32_t max_delegated_partitions() const { |
| return GetField<int32_t>(VT_MAX_DELEGATED_PARTITIONS, 0); |
| } |
| const tflite::EdgeTpuSettings *edgetpu_settings() const { |
| return GetPointer<const tflite::EdgeTpuSettings *>(VT_EDGETPU_SETTINGS); |
| } |
| const tflite::CoralSettings *coral_settings() const { |
| return GetPointer<const tflite::CoralSettings *>(VT_CORAL_SETTINGS); |
| } |
| const tflite::FallbackSettings *fallback_settings() const { |
| return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_DELEGATE) && |
| VerifyOffset(verifier, VT_NNAPI_SETTINGS) && |
| verifier.VerifyTable(nnapi_settings()) && |
| VerifyOffset(verifier, VT_GPU_SETTINGS) && |
| verifier.VerifyTable(gpu_settings()) && |
| VerifyOffset(verifier, VT_HEXAGON_SETTINGS) && |
| verifier.VerifyTable(hexagon_settings()) && |
| VerifyOffset(verifier, VT_XNNPACK_SETTINGS) && |
| verifier.VerifyTable(xnnpack_settings()) && |
| VerifyOffset(verifier, VT_CPU_SETTINGS) && |
| verifier.VerifyTable(cpu_settings()) && |
| VerifyField<int32_t>(verifier, VT_MAX_DELEGATED_PARTITIONS) && |
| VerifyOffset(verifier, VT_EDGETPU_SETTINGS) && |
| verifier.VerifyTable(edgetpu_settings()) && |
| VerifyOffset(verifier, VT_CORAL_SETTINGS) && |
| verifier.VerifyTable(coral_settings()) && |
| VerifyOffset(verifier, VT_FALLBACK_SETTINGS) && |
| verifier.VerifyTable(fallback_settings()) && |
| verifier.EndTable(); |
| } |
| TFLiteSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<TFLiteSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct TFLiteSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_delegate(tflite::Delegate delegate) { |
| fbb_.AddElement<int32_t>(TFLiteSettings::VT_DELEGATE, static_cast<int32_t>(delegate), 0); |
| } |
| void add_nnapi_settings(flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_NNAPI_SETTINGS, nnapi_settings); |
| } |
| void add_gpu_settings(flatbuffers::Offset<tflite::GPUSettings> gpu_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_GPU_SETTINGS, gpu_settings); |
| } |
| void add_hexagon_settings(flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_HEXAGON_SETTINGS, hexagon_settings); |
| } |
| void add_xnnpack_settings(flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_XNNPACK_SETTINGS, xnnpack_settings); |
| } |
| void add_cpu_settings(flatbuffers::Offset<tflite::CPUSettings> cpu_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_CPU_SETTINGS, cpu_settings); |
| } |
| void add_max_delegated_partitions(int32_t max_delegated_partitions) { |
| fbb_.AddElement<int32_t>(TFLiteSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0); |
| } |
| void add_edgetpu_settings(flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_EDGETPU_SETTINGS, edgetpu_settings); |
| } |
| void add_coral_settings(flatbuffers::Offset<tflite::CoralSettings> coral_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_CORAL_SETTINGS, coral_settings); |
| } |
| void add_fallback_settings(flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) { |
| fbb_.AddOffset(TFLiteSettings::VT_FALLBACK_SETTINGS, fallback_settings); |
| } |
| explicit TFLiteSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| TFLiteSettingsBuilder &operator=(const TFLiteSettingsBuilder &); |
| flatbuffers::Offset<TFLiteSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<TFLiteSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::Delegate delegate = tflite::Delegate_NONE, |
| flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings = 0, |
| flatbuffers::Offset<tflite::GPUSettings> gpu_settings = 0, |
| flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings = 0, |
| flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings = 0, |
| flatbuffers::Offset<tflite::CPUSettings> cpu_settings = 0, |
| int32_t max_delegated_partitions = 0, |
| flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings = 0, |
| flatbuffers::Offset<tflite::CoralSettings> coral_settings = 0, |
| flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0) { |
| TFLiteSettingsBuilder builder_(_fbb); |
| builder_.add_fallback_settings(fallback_settings); |
| builder_.add_coral_settings(coral_settings); |
| builder_.add_edgetpu_settings(edgetpu_settings); |
| builder_.add_max_delegated_partitions(max_delegated_partitions); |
| builder_.add_cpu_settings(cpu_settings); |
| builder_.add_xnnpack_settings(xnnpack_settings); |
| builder_.add_hexagon_settings(hexagon_settings); |
| builder_.add_gpu_settings(gpu_settings); |
| builder_.add_nnapi_settings(nnapi_settings); |
| builder_.add_delegate(delegate); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct FallbackSettingsT : public flatbuffers::NativeTable { |
| typedef FallbackSettings TableType; |
| bool allow_automatic_fallback_on_compilation_error; |
| bool allow_automatic_fallback_on_execution_error; |
| FallbackSettingsT() |
| : allow_automatic_fallback_on_compilation_error(false), |
| allow_automatic_fallback_on_execution_error(false) { |
| } |
| }; |
| |
| struct FallbackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef FallbackSettingsT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR = 4, |
| VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR = 6 |
| }; |
| bool allow_automatic_fallback_on_compilation_error() const { |
| return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 0) != 0; |
| } |
| bool allow_automatic_fallback_on_execution_error() const { |
| return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 0) != 0; |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR) && |
| VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR) && |
| verifier.EndTable(); |
| } |
| FallbackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<FallbackSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct FallbackSettingsBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_allow_automatic_fallback_on_compilation_error(bool allow_automatic_fallback_on_compilation_error) { |
| fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_compilation_error), 0); |
| } |
| void add_allow_automatic_fallback_on_execution_error(bool allow_automatic_fallback_on_execution_error) { |
| fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_execution_error), 0); |
| } |
| explicit FallbackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| FallbackSettingsBuilder &operator=(const FallbackSettingsBuilder &); |
| flatbuffers::Offset<FallbackSettings> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<FallbackSettings>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<FallbackSettings> CreateFallbackSettings( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| bool allow_automatic_fallback_on_compilation_error = false, |
| bool allow_automatic_fallback_on_execution_error = false) { |
| FallbackSettingsBuilder builder_(_fbb); |
| builder_.add_allow_automatic_fallback_on_execution_error(allow_automatic_fallback_on_execution_error); |
| builder_.add_allow_automatic_fallback_on_compilation_error(allow_automatic_fallback_on_compilation_error); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct BenchmarkMetricT : public flatbuffers::NativeTable { |
| typedef BenchmarkMetric TableType; |
| std::string name; |
| std::vector<float> values; |
| BenchmarkMetricT() { |
| } |
| }; |
| |
| struct BenchmarkMetric FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef BenchmarkMetricT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_NAME = 4, |
| VT_VALUES = 6 |
| }; |
| const flatbuffers::String *name() const { |
| return GetPointer<const flatbuffers::String *>(VT_NAME); |
| } |
| const flatbuffers::Vector<float> *values() const { |
| return GetPointer<const flatbuffers::Vector<float> *>(VT_VALUES); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyOffset(verifier, VT_NAME) && |
| verifier.VerifyString(name()) && |
| VerifyOffset(verifier, VT_VALUES) && |
| verifier.VerifyVector(values()) && |
| verifier.EndTable(); |
| } |
| BenchmarkMetricT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<BenchmarkMetric> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct BenchmarkMetricBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_name(flatbuffers::Offset<flatbuffers::String> name) { |
| fbb_.AddOffset(BenchmarkMetric::VT_NAME, name); |
| } |
| void add_values(flatbuffers::Offset<flatbuffers::Vector<float>> values) { |
| fbb_.AddOffset(BenchmarkMetric::VT_VALUES, values); |
| } |
| explicit BenchmarkMetricBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| BenchmarkMetricBuilder &operator=(const BenchmarkMetricBuilder &); |
| flatbuffers::Offset<BenchmarkMetric> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<BenchmarkMetric>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| flatbuffers::Offset<flatbuffers::String> name = 0, |
| flatbuffers::Offset<flatbuffers::Vector<float>> values = 0) { |
| BenchmarkMetricBuilder builder_(_fbb); |
| builder_.add_values(values); |
| builder_.add_name(name); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetricDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| const char *name = nullptr, |
| const std::vector<float> *values = nullptr) { |
| auto name__ = name ? _fbb.CreateString(name) : 0; |
| auto values__ = values ? _fbb.CreateVector<float>(*values) : 0; |
| return tflite::CreateBenchmarkMetric( |
| _fbb, |
| name__, |
| values__); |
| } |
| |
| flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct BenchmarkResultT : public flatbuffers::NativeTable { |
| typedef BenchmarkResult TableType; |
| std::vector<int64_t> initialization_time_us; |
| std::vector<int64_t> inference_time_us; |
| int32_t max_memory_kb; |
| bool ok; |
| std::vector<std::unique_ptr<tflite::BenchmarkMetricT>> metrics; |
| BenchmarkResultT() |
| : max_memory_kb(0), |
| ok(false) { |
| } |
| }; |
| |
| struct BenchmarkResult FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef BenchmarkResultT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_INITIALIZATION_TIME_US = 4, |
| VT_INFERENCE_TIME_US = 6, |
| VT_MAX_MEMORY_KB = 8, |
| VT_OK = 10, |
| VT_METRICS = 12 |
| }; |
| const flatbuffers::Vector<int64_t> *initialization_time_us() const { |
| return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INITIALIZATION_TIME_US); |
| } |
| const flatbuffers::Vector<int64_t> *inference_time_us() const { |
| return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INFERENCE_TIME_US); |
| } |
| int32_t max_memory_kb() const { |
| return GetField<int32_t>(VT_MAX_MEMORY_KB, 0); |
| } |
| bool ok() const { |
| return GetField<uint8_t>(VT_OK, 0) != 0; |
| } |
| const flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics() const { |
| return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *>(VT_METRICS); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyOffset(verifier, VT_INITIALIZATION_TIME_US) && |
| verifier.VerifyVector(initialization_time_us()) && |
| VerifyOffset(verifier, VT_INFERENCE_TIME_US) && |
| verifier.VerifyVector(inference_time_us()) && |
| VerifyField<int32_t>(verifier, VT_MAX_MEMORY_KB) && |
| VerifyField<uint8_t>(verifier, VT_OK) && |
| VerifyOffset(verifier, VT_METRICS) && |
| verifier.VerifyVector(metrics()) && |
| verifier.VerifyVectorOfTables(metrics()) && |
| verifier.EndTable(); |
| } |
| BenchmarkResultT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<BenchmarkResult> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct BenchmarkResultBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_initialization_time_us(flatbuffers::Offset<flatbuffers::Vector<int64_t>> initialization_time_us) { |
| fbb_.AddOffset(BenchmarkResult::VT_INITIALIZATION_TIME_US, initialization_time_us); |
| } |
| void add_inference_time_us(flatbuffers::Offset<flatbuffers::Vector<int64_t>> inference_time_us) { |
| fbb_.AddOffset(BenchmarkResult::VT_INFERENCE_TIME_US, inference_time_us); |
| } |
| void add_max_memory_kb(int32_t max_memory_kb) { |
| fbb_.AddElement<int32_t>(BenchmarkResult::VT_MAX_MEMORY_KB, max_memory_kb, 0); |
| } |
| void add_ok(bool ok) { |
| fbb_.AddElement<uint8_t>(BenchmarkResult::VT_OK, static_cast<uint8_t>(ok), 0); |
| } |
| void add_metrics(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics) { |
| fbb_.AddOffset(BenchmarkResult::VT_METRICS, metrics); |
| } |
| explicit BenchmarkResultBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| BenchmarkResultBuilder &operator=(const BenchmarkResultBuilder &); |
| flatbuffers::Offset<BenchmarkResult> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<BenchmarkResult>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| flatbuffers::Offset<flatbuffers::Vector<int64_t>> initialization_time_us = 0, |
| flatbuffers::Offset<flatbuffers::Vector<int64_t>> inference_time_us = 0, |
| int32_t max_memory_kb = 0, |
| bool ok = false, |
| flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics = 0) { |
| BenchmarkResultBuilder builder_(_fbb); |
| builder_.add_metrics(metrics); |
| builder_.add_max_memory_kb(max_memory_kb); |
| builder_.add_inference_time_us(inference_time_us); |
| builder_.add_initialization_time_us(initialization_time_us); |
| builder_.add_ok(ok); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResultDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| const std::vector<int64_t> *initialization_time_us = nullptr, |
| const std::vector<int64_t> *inference_time_us = nullptr, |
| int32_t max_memory_kb = 0, |
| bool ok = false, |
| const std::vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics = nullptr) { |
| auto initialization_time_us__ = initialization_time_us ? _fbb.CreateVector<int64_t>(*initialization_time_us) : 0; |
| auto inference_time_us__ = inference_time_us ? _fbb.CreateVector<int64_t>(*inference_time_us) : 0; |
| auto metrics__ = metrics ? _fbb.CreateVector<flatbuffers::Offset<tflite::BenchmarkMetric>>(*metrics) : 0; |
| return tflite::CreateBenchmarkResult( |
| _fbb, |
| initialization_time_us__, |
| inference_time_us__, |
| max_memory_kb, |
| ok, |
| metrics__); |
| } |
| |
| flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct ErrorCodeT : public flatbuffers::NativeTable { |
| typedef ErrorCode TableType; |
| tflite::Delegate source; |
| int32_t tflite_error; |
| int64_t underlying_api_error; |
| ErrorCodeT() |
| : source(tflite::Delegate_NONE), |
| tflite_error(0), |
| underlying_api_error(0) { |
| } |
| }; |
| |
| struct ErrorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef ErrorCodeT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_SOURCE = 4, |
| VT_TFLITE_ERROR = 6, |
| VT_UNDERLYING_API_ERROR = 8 |
| }; |
| tflite::Delegate source() const { |
| return static_cast<tflite::Delegate>(GetField<int32_t>(VT_SOURCE, 0)); |
| } |
| int32_t tflite_error() const { |
| return GetField<int32_t>(VT_TFLITE_ERROR, 0); |
| } |
| int64_t underlying_api_error() const { |
| return GetField<int64_t>(VT_UNDERLYING_API_ERROR, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_SOURCE) && |
| VerifyField<int32_t>(verifier, VT_TFLITE_ERROR) && |
| VerifyField<int64_t>(verifier, VT_UNDERLYING_API_ERROR) && |
| verifier.EndTable(); |
| } |
| ErrorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<ErrorCode> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct ErrorCodeBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_source(tflite::Delegate source) { |
| fbb_.AddElement<int32_t>(ErrorCode::VT_SOURCE, static_cast<int32_t>(source), 0); |
| } |
| void add_tflite_error(int32_t tflite_error) { |
| fbb_.AddElement<int32_t>(ErrorCode::VT_TFLITE_ERROR, tflite_error, 0); |
| } |
| void add_underlying_api_error(int64_t underlying_api_error) { |
| fbb_.AddElement<int64_t>(ErrorCode::VT_UNDERLYING_API_ERROR, underlying_api_error, 0); |
| } |
| explicit ErrorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| ErrorCodeBuilder &operator=(const ErrorCodeBuilder &); |
| flatbuffers::Offset<ErrorCode> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<ErrorCode>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<ErrorCode> CreateErrorCode( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::Delegate source = tflite::Delegate_NONE, |
| int32_t tflite_error = 0, |
| int64_t underlying_api_error = 0) { |
| ErrorCodeBuilder builder_(_fbb); |
| builder_.add_underlying_api_error(underlying_api_error); |
| builder_.add_tflite_error(tflite_error); |
| builder_.add_source(source); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<ErrorCode> CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct BenchmarkErrorT : public flatbuffers::NativeTable { |
| typedef BenchmarkError TableType; |
| tflite::BenchmarkStage stage; |
| int32_t exit_code; |
| int32_t signal; |
| std::vector<std::unique_ptr<tflite::ErrorCodeT>> error_code; |
| BenchmarkErrorT() |
| : stage(tflite::BenchmarkStage_UNKNOWN), |
| exit_code(0), |
| signal(0) { |
| } |
| }; |
| |
| struct BenchmarkError FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef BenchmarkErrorT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_STAGE = 4, |
| VT_EXIT_CODE = 6, |
| VT_SIGNAL = 8, |
| VT_ERROR_CODE = 10 |
| }; |
| tflite::BenchmarkStage stage() const { |
| return static_cast<tflite::BenchmarkStage>(GetField<int32_t>(VT_STAGE, 0)); |
| } |
| int32_t exit_code() const { |
| return GetField<int32_t>(VT_EXIT_CODE, 0); |
| } |
| int32_t signal() const { |
| return GetField<int32_t>(VT_SIGNAL, 0); |
| } |
| const flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>> *error_code() const { |
| return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>> *>(VT_ERROR_CODE); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyField<int32_t>(verifier, VT_STAGE) && |
| VerifyField<int32_t>(verifier, VT_EXIT_CODE) && |
| VerifyField<int32_t>(verifier, VT_SIGNAL) && |
| VerifyOffset(verifier, VT_ERROR_CODE) && |
| verifier.VerifyVector(error_code()) && |
| verifier.VerifyVectorOfTables(error_code()) && |
| verifier.EndTable(); |
| } |
| BenchmarkErrorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<BenchmarkError> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct BenchmarkErrorBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_stage(tflite::BenchmarkStage stage) { |
| fbb_.AddElement<int32_t>(BenchmarkError::VT_STAGE, static_cast<int32_t>(stage), 0); |
| } |
| void add_exit_code(int32_t exit_code) { |
| fbb_.AddElement<int32_t>(BenchmarkError::VT_EXIT_CODE, exit_code, 0); |
| } |
| void add_signal(int32_t signal) { |
| fbb_.AddElement<int32_t>(BenchmarkError::VT_SIGNAL, signal, 0); |
| } |
| void add_error_code(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>>> error_code) { |
| fbb_.AddOffset(BenchmarkError::VT_ERROR_CODE, error_code); |
| } |
| explicit BenchmarkErrorBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| BenchmarkErrorBuilder &operator=(const BenchmarkErrorBuilder &); |
| flatbuffers::Offset<BenchmarkError> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<BenchmarkError>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkError( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN, |
| int32_t exit_code = 0, |
| int32_t signal = 0, |
| flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>>> error_code = 0) { |
| BenchmarkErrorBuilder builder_(_fbb); |
| builder_.add_error_code(error_code); |
| builder_.add_signal(signal); |
| builder_.add_exit_code(exit_code); |
| builder_.add_stage(stage); |
| return builder_.Finish(); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkErrorDirect( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN, |
| int32_t exit_code = 0, |
| int32_t signal = 0, |
| const std::vector<flatbuffers::Offset<tflite::ErrorCode>> *error_code = nullptr) { |
| auto error_code__ = error_code ? _fbb.CreateVector<flatbuffers::Offset<tflite::ErrorCode>>(*error_code) : 0; |
| return tflite::CreateBenchmarkError( |
| _fbb, |
| stage, |
| exit_code, |
| signal, |
| error_code__); |
| } |
| |
| flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| struct BenchmarkEventT : public flatbuffers::NativeTable { |
| typedef BenchmarkEvent TableType; |
| std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings; |
| tflite::BenchmarkEventType event_type; |
| std::unique_ptr<tflite::BenchmarkResultT> result; |
| std::unique_ptr<tflite::BenchmarkErrorT> error; |
| int64_t boottime_us; |
| int64_t wallclock_us; |
| BenchmarkEventT() |
| : event_type(tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE), |
| boottime_us(0), |
| wallclock_us(0) { |
| } |
| }; |
| |
| struct BenchmarkEvent FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| typedef BenchmarkEventT NativeTableType; |
| enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| VT_TFLITE_SETTINGS = 4, |
| VT_EVENT_TYPE = 6, |
| VT_RESULT = 8, |
| VT_ERROR = 10, |
| VT_BOOTTIME_US = 12, |
| VT_WALLCLOCK_US = 14 |
| }; |
| const tflite::TFLiteSettings *tflite_settings() const { |
| return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS); |
| } |
| tflite::BenchmarkEventType event_type() const { |
| return static_cast<tflite::BenchmarkEventType>(GetField<int32_t>(VT_EVENT_TYPE, 0)); |
| } |
| const tflite::BenchmarkResult *result() const { |
| return GetPointer<const tflite::BenchmarkResult *>(VT_RESULT); |
| } |
| const tflite::BenchmarkError *error() const { |
| return GetPointer<const tflite::BenchmarkError *>(VT_ERROR); |
| } |
| int64_t boottime_us() const { |
| return GetField<int64_t>(VT_BOOTTIME_US, 0); |
| } |
| int64_t wallclock_us() const { |
| return GetField<int64_t>(VT_WALLCLOCK_US, 0); |
| } |
| bool Verify(flatbuffers::Verifier &verifier) const { |
| return VerifyTableStart(verifier) && |
| VerifyOffset(verifier, VT_TFLITE_SETTINGS) && |
| verifier.VerifyTable(tflite_settings()) && |
| VerifyField<int32_t>(verifier, VT_EVENT_TYPE) && |
| VerifyOffset(verifier, VT_RESULT) && |
| verifier.VerifyTable(result()) && |
| VerifyOffset(verifier, VT_ERROR) && |
| verifier.VerifyTable(error()) && |
| VerifyField<int64_t>(verifier, VT_BOOTTIME_US) && |
| VerifyField<int64_t>(verifier, VT_WALLCLOCK_US) && |
| verifier.EndTable(); |
| } |
| BenchmarkEventT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| void UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; |
| static flatbuffers::Offset<BenchmarkEvent> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| }; |
| |
| struct BenchmarkEventBuilder { |
| flatbuffers::FlatBufferBuilder &fbb_; |
| flatbuffers::uoffset_t start_; |
| void add_tflite_settings(flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) { |
| fbb_.AddOffset(BenchmarkEvent::VT_TFLITE_SETTINGS, tflite_settings); |
| } |
| void add_event_type(tflite::BenchmarkEventType event_type) { |
| fbb_.AddElement<int32_t>(BenchmarkEvent::VT_EVENT_TYPE, static_cast<int32_t>(event_type), 0); |
| } |
| void add_result(flatbuffers::Offset<tflite::BenchmarkResult> result) { |
| fbb_.AddOffset(BenchmarkEvent::VT_RESULT, result); |
| } |
| void add_error(flatbuffers::Offset<tflite::BenchmarkError> error) { |
| fbb_.AddOffset(BenchmarkEvent::VT_ERROR, error); |
| } |
| void add_boottime_us(int64_t boottime_us) { |
| fbb_.AddElement<int64_t>(BenchmarkEvent::VT_BOOTTIME_US, boottime_us, 0); |
| } |
| void add_wallclock_us(int64_t wallclock_us) { |
| fbb_.AddElement<int64_t>(BenchmarkEvent::VT_WALLCLOCK_US, wallclock_us, 0); |
| } |
| explicit BenchmarkEventBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| : fbb_(_fbb) { |
| start_ = fbb_.StartTable(); |
| } |
| BenchmarkEventBuilder &operator=(const BenchmarkEventBuilder &); |
| flatbuffers::Offset<BenchmarkEvent> Finish() { |
| const auto end = fbb_.EndTable(start_); |
| auto o = flatbuffers::Offset<BenchmarkEvent>(end); |
| return o; |
| } |
| }; |
| |
| inline flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent( |
| flatbuffers::FlatBufferBuilder &_fbb, |
| flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0, |
| tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, |
| flatbuffers::Offset<tflite::BenchmarkResult> result = 0, |
| flatbuffers::Offset<tflite::BenchmarkError> error = 0, |
| int64_t boottime_us = 0, |
| int64_t wallclock_us = 0) { |
| BenchmarkEventBuilder builder_(_fbb); |
| builder_.add_wallclock_us(wallclock_us); |
| builder_.add_boottime_us(boottime_us); |
| builder_.add_error(error); |
| builder_.add_result(result); |
| builder_.add_event_type(event_type); |
| builder_.add_tflite_settings(tflite_settings); |
| return builder_.Finish(); |
| } |
| |
| flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); |
| |
| |
| inline bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) { |
| return |
| (lhs.preference == rhs.preference) && |
| ((!lhs.tflite_settings && !rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings) || (lhs.tflite_settings && !rhs.tflite_settings && *lhs.tflite_settings == decltype(lhs.tflite_settings)::element_type()) || (rhs.tflite_settings && !lhs.tflite_settings && *rhs.tflite_settings == decltype(rhs.tflite_settings)::element_type())) && |
| (lhs.model_namespace_for_statistics == rhs.model_namespace_for_statistics) && |
| (lhs.model_identifier_for_statistics == rhs.model_identifier_for_statistics); |
| } |
| |
| inline bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline ComputeSettingsT *ComputeSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new ComputeSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void ComputeSettings::UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = preference(); _o->preference = _e; } |
| { auto _e = tflite_settings(); if (_e) _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = model_namespace_for_statistics(); if (_e) _o->model_namespace_for_statistics = _e->str(); } |
| { auto _e = model_identifier_for_statistics(); if (_e) _o->model_identifier_for_statistics = _e->str(); } |
| } |
| |
| inline flatbuffers::Offset<ComputeSettings> ComputeSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateComputeSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<ComputeSettings> CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ComputeSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _preference = _o->preference; |
| auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0; |
| auto _model_namespace_for_statistics = _o->model_namespace_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_namespace_for_statistics); |
| auto _model_identifier_for_statistics = _o->model_identifier_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_identifier_for_statistics); |
| return tflite::CreateComputeSettings( |
| _fbb, |
| _preference, |
| _tflite_settings, |
| _model_namespace_for_statistics, |
| _model_identifier_for_statistics); |
| } |
| |
| |
| inline bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) { |
| return |
| (lhs.accelerator_name == rhs.accelerator_name) && |
| (lhs.cache_directory == rhs.cache_directory) && |
| (lhs.model_token == rhs.model_token) && |
| (lhs.execution_preference == rhs.execution_preference) && |
| (lhs.no_of_nnapi_instances_to_cache == rhs.no_of_nnapi_instances_to_cache) && |
| ((!lhs.fallback_settings && !rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings) || (lhs.fallback_settings && !rhs.fallback_settings && *lhs.fallback_settings == decltype(lhs.fallback_settings)::element_type()) || (rhs.fallback_settings && !lhs.fallback_settings && *rhs.fallback_settings == decltype(rhs.fallback_settings)::element_type())) && |
| (lhs.allow_nnapi_cpu_on_android_10_plus == rhs.allow_nnapi_cpu_on_android_10_plus) && |
| (lhs.execution_priority == rhs.execution_priority) && |
| (lhs.allow_dynamic_dimensions == rhs.allow_dynamic_dimensions) && |
| (lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32); |
| } |
| |
| inline bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline NNAPISettingsT *NNAPISettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new NNAPISettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void NNAPISettings::UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = accelerator_name(); if (_e) _o->accelerator_name = _e->str(); } |
| { auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); } |
| { auto _e = model_token(); if (_e) _o->model_token = _e->str(); } |
| { auto _e = execution_preference(); _o->execution_preference = _e; } |
| { auto _e = no_of_nnapi_instances_to_cache(); _o->no_of_nnapi_instances_to_cache = _e; } |
| { auto _e = fallback_settings(); if (_e) _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = allow_nnapi_cpu_on_android_10_plus(); _o->allow_nnapi_cpu_on_android_10_plus = _e; } |
| { auto _e = execution_priority(); _o->execution_priority = _e; } |
| { auto _e = allow_dynamic_dimensions(); _o->allow_dynamic_dimensions = _e; } |
| { auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; } |
| } |
| |
| inline flatbuffers::Offset<NNAPISettings> NNAPISettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateNNAPISettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NNAPISettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _accelerator_name = _o->accelerator_name.empty() ? 0 : _fbb.CreateString(_o->accelerator_name); |
| auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory); |
| auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token); |
| auto _execution_preference = _o->execution_preference; |
| auto _no_of_nnapi_instances_to_cache = _o->no_of_nnapi_instances_to_cache; |
| auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0; |
| auto _allow_nnapi_cpu_on_android_10_plus = _o->allow_nnapi_cpu_on_android_10_plus; |
| auto _execution_priority = _o->execution_priority; |
| auto _allow_dynamic_dimensions = _o->allow_dynamic_dimensions; |
| auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32; |
| return tflite::CreateNNAPISettings( |
| _fbb, |
| _accelerator_name, |
| _cache_directory, |
| _model_token, |
| _execution_preference, |
| _no_of_nnapi_instances_to_cache, |
| _fallback_settings, |
| _allow_nnapi_cpu_on_android_10_plus, |
| _execution_priority, |
| _allow_dynamic_dimensions, |
| _allow_fp16_precision_for_fp32); |
| } |
| |
| |
| inline bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs) { |
| return |
| (lhs.is_precision_loss_allowed == rhs.is_precision_loss_allowed) && |
| (lhs.enable_quantized_inference == rhs.enable_quantized_inference) && |
| (lhs.force_backend == rhs.force_backend); |
| } |
| |
| inline bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline GPUSettingsT *GPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new GPUSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void GPUSettings::UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = is_precision_loss_allowed(); _o->is_precision_loss_allowed = _e; } |
| { auto _e = enable_quantized_inference(); _o->enable_quantized_inference = _e; } |
| { auto _e = force_backend(); _o->force_backend = _e; } |
| } |
| |
| inline flatbuffers::Offset<GPUSettings> GPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateGPUSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<GPUSettings> CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _is_precision_loss_allowed = _o->is_precision_loss_allowed; |
| auto _enable_quantized_inference = _o->enable_quantized_inference; |
| auto _force_backend = _o->force_backend; |
| return tflite::CreateGPUSettings( |
| _fbb, |
| _is_precision_loss_allowed, |
| _enable_quantized_inference, |
| _force_backend); |
| } |
| |
| |
| inline bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) { |
| return |
| (lhs.debug_level == rhs.debug_level) && |
| (lhs.powersave_level == rhs.powersave_level) && |
| (lhs.print_graph_profile == rhs.print_graph_profile) && |
| (lhs.print_graph_debug == rhs.print_graph_debug); |
| } |
| |
| inline bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline HexagonSettingsT *HexagonSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new HexagonSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void HexagonSettings::UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = debug_level(); _o->debug_level = _e; } |
| { auto _e = powersave_level(); _o->powersave_level = _e; } |
| { auto _e = print_graph_profile(); _o->print_graph_profile = _e; } |
| { auto _e = print_graph_debug(); _o->print_graph_debug = _e; } |
| } |
| |
| inline flatbuffers::Offset<HexagonSettings> HexagonSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateHexagonSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HexagonSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _debug_level = _o->debug_level; |
| auto _powersave_level = _o->powersave_level; |
| auto _print_graph_profile = _o->print_graph_profile; |
| auto _print_graph_debug = _o->print_graph_debug; |
| return tflite::CreateHexagonSettings( |
| _fbb, |
| _debug_level, |
| _powersave_level, |
| _print_graph_profile, |
| _print_graph_debug); |
| } |
| |
| |
| inline bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { |
| return |
| (lhs.num_threads == rhs.num_threads); |
| } |
| |
| inline bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline XNNPackSettingsT *XNNPackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new XNNPackSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void XNNPackSettings::UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = num_threads(); _o->num_threads = _e; } |
| } |
| |
| inline flatbuffers::Offset<XNNPackSettings> XNNPackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateXNNPackSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const XNNPackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _num_threads = _o->num_threads; |
| return tflite::CreateXNNPackSettings( |
| _fbb, |
| _num_threads); |
| } |
| |
| |
| inline bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) { |
| return |
| (lhs.platform_type == rhs.platform_type) && |
| (lhs.num_chips == rhs.num_chips) && |
| (lhs.device_paths == rhs.device_paths) && |
| (lhs.chip_family == rhs.chip_family); |
| } |
| |
| inline bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline EdgeTpuDeviceSpecT *EdgeTpuDeviceSpec::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new EdgeTpuDeviceSpecT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void EdgeTpuDeviceSpec::UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = platform_type(); _o->platform_type = _e; } |
| { auto _e = num_chips(); _o->num_chips = _e; } |
| { auto _e = device_paths(); if (_e) { _o->device_paths.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->device_paths[_i] = _e->Get(_i)->str(); } } } |
| { auto _e = chip_family(); _o->chip_family = _e; } |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuDeviceSpec> EdgeTpuDeviceSpec::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateEdgeTpuDeviceSpec(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuDeviceSpecT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _platform_type = _o->platform_type; |
| auto _num_chips = _o->num_chips; |
| auto _device_paths = _o->device_paths.size() ? _fbb.CreateVectorOfStrings(_o->device_paths) : 0; |
| auto _chip_family = _o->chip_family; |
| return tflite::CreateEdgeTpuDeviceSpec( |
| _fbb, |
| _platform_type, |
| _num_chips, |
| _device_paths, |
| _chip_family); |
| } |
| |
| |
| inline bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) { |
| return |
| (lhs.inactive_power_state == rhs.inactive_power_state) && |
| (lhs.inactive_timeout_us == rhs.inactive_timeout_us); |
| } |
| |
| inline bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline EdgeTpuInactivePowerConfigT *EdgeTpuInactivePowerConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new EdgeTpuInactivePowerConfigT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void EdgeTpuInactivePowerConfig::UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = inactive_power_state(); _o->inactive_power_state = _e; } |
| { auto _e = inactive_timeout_us(); _o->inactive_timeout_us = _e; } |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> EdgeTpuInactivePowerConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateEdgeTpuInactivePowerConfig(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuInactivePowerConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _inactive_power_state = _o->inactive_power_state; |
| auto _inactive_timeout_us = _o->inactive_timeout_us; |
| return tflite::CreateEdgeTpuInactivePowerConfig( |
| _fbb, |
| _inactive_power_state, |
| _inactive_timeout_us); |
| } |
| |
| |
| inline bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) { |
| return |
| (lhs.inference_power_state == rhs.inference_power_state) && |
| (lhs.inactive_power_configs == rhs.inactive_power_configs) && |
| (lhs.inference_priority == rhs.inference_priority) && |
| ((!lhs.edgetpu_device_spec && !rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == *rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && !rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == decltype(lhs.edgetpu_device_spec)::element_type()) || (rhs.edgetpu_device_spec && !lhs.edgetpu_device_spec && *rhs.edgetpu_device_spec == decltype(rhs.edgetpu_device_spec)::element_type())); |
| } |
| |
| inline bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline EdgeTpuSettingsT *EdgeTpuSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new EdgeTpuSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void EdgeTpuSettings::UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = inference_power_state(); _o->inference_power_state = _e; } |
| { auto _e = inactive_power_configs(); if (_e) { _o->inactive_power_configs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inactive_power_configs[_i] = std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>(_e->Get(_i)->UnPack(_resolver)); } } } |
| { auto _e = inference_priority(); _o->inference_priority = _e; } |
| { auto _e = edgetpu_device_spec(); if (_e) _o->edgetpu_device_spec = std::unique_ptr<tflite::EdgeTpuDeviceSpecT>(_e->UnPack(_resolver)); } |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuSettings> EdgeTpuSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateEdgeTpuSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _inference_power_state = _o->inference_power_state; |
| auto _inactive_power_configs = _o->inactive_power_configs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> (_o->inactive_power_configs.size(), [](size_t i, _VectorArgs *__va) { return CreateEdgeTpuInactivePowerConfig(*__va->__fbb, __va->__o->inactive_power_configs[i].get(), __va->__rehasher); }, &_va ) : 0; |
| auto _inference_priority = _o->inference_priority; |
| auto _edgetpu_device_spec = _o->edgetpu_device_spec ? CreateEdgeTpuDeviceSpec(_fbb, _o->edgetpu_device_spec.get(), _rehasher) : 0; |
| return tflite::CreateEdgeTpuSettings( |
| _fbb, |
| _inference_power_state, |
| _inactive_power_configs, |
| _inference_priority, |
| _edgetpu_device_spec); |
| } |
| |
| |
| inline bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs) { |
| return |
| (lhs.device == rhs.device) && |
| (lhs.performance == rhs.performance) && |
| (lhs.usb_always_dfu == rhs.usb_always_dfu) && |
| (lhs.usb_max_bulk_in_queue_length == rhs.usb_max_bulk_in_queue_length); |
| } |
| |
| inline bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline CoralSettingsT *CoralSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new CoralSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void CoralSettings::UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = device(); if (_e) _o->device = _e->str(); } |
| { auto _e = performance(); _o->performance = _e; } |
| { auto _e = usb_always_dfu(); _o->usb_always_dfu = _e; } |
| { auto _e = usb_max_bulk_in_queue_length(); _o->usb_max_bulk_in_queue_length = _e; } |
| } |
| |
| inline flatbuffers::Offset<CoralSettings> CoralSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateCoralSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<CoralSettings> CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CoralSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _device = _o->device.empty() ? 0 : _fbb.CreateString(_o->device); |
| auto _performance = _o->performance; |
| auto _usb_always_dfu = _o->usb_always_dfu; |
| auto _usb_max_bulk_in_queue_length = _o->usb_max_bulk_in_queue_length; |
| return tflite::CreateCoralSettings( |
| _fbb, |
| _device, |
| _performance, |
| _usb_always_dfu, |
| _usb_max_bulk_in_queue_length); |
| } |
| |
| |
| inline bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs) { |
| return |
| (lhs.num_threads == rhs.num_threads); |
| } |
| |
| inline bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline CPUSettingsT *CPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new CPUSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void CPUSettings::UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = num_threads(); _o->num_threads = _e; } |
| } |
| |
| inline flatbuffers::Offset<CPUSettings> CPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateCPUSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<CPUSettings> CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _num_threads = _o->num_threads; |
| return tflite::CreateCPUSettings( |
| _fbb, |
| _num_threads); |
| } |
| |
| |
| inline bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) { |
| return |
| (lhs.delegate == rhs.delegate) && |
| ((!lhs.nnapi_settings && !rhs.nnapi_settings) || (lhs.nnapi_settings && rhs.nnapi_settings && *lhs.nnapi_settings == *rhs.nnapi_settings) || (lhs.nnapi_settings && !rhs.nnapi_settings && *lhs.nnapi_settings == decltype(lhs.nnapi_settings)::element_type()) || (rhs.nnapi_settings && !lhs.nnapi_settings && *rhs.nnapi_settings == decltype(rhs.nnapi_settings)::element_type())) && |
| ((!lhs.gpu_settings && !rhs.gpu_settings) || (lhs.gpu_settings && rhs.gpu_settings && *lhs.gpu_settings == *rhs.gpu_settings) || (lhs.gpu_settings && !rhs.gpu_settings && *lhs.gpu_settings == decltype(lhs.gpu_settings)::element_type()) || (rhs.gpu_settings && !lhs.gpu_settings && *rhs.gpu_settings == decltype(rhs.gpu_settings)::element_type())) && |
| ((!lhs.hexagon_settings && !rhs.hexagon_settings) || (lhs.hexagon_settings && rhs.hexagon_settings && *lhs.hexagon_settings == *rhs.hexagon_settings) || (lhs.hexagon_settings && !rhs.hexagon_settings && *lhs.hexagon_settings == decltype(lhs.hexagon_settings)::element_type()) || (rhs.hexagon_settings && !lhs.hexagon_settings && *rhs.hexagon_settings == decltype(rhs.hexagon_settings)::element_type())) && |
| ((!lhs.xnnpack_settings && !rhs.xnnpack_settings) || (lhs.xnnpack_settings && rhs.xnnpack_settings && *lhs.xnnpack_settings == *rhs.xnnpack_settings) || (lhs.xnnpack_settings && !rhs.xnnpack_settings && *lhs.xnnpack_settings == decltype(lhs.xnnpack_settings)::element_type()) || (rhs.xnnpack_settings && !lhs.xnnpack_settings && *rhs.xnnpack_settings == decltype(rhs.xnnpack_settings)::element_type())) && |
| ((!lhs.cpu_settings && !rhs.cpu_settings) || (lhs.cpu_settings && rhs.cpu_settings && *lhs.cpu_settings == *rhs.cpu_settings) || (lhs.cpu_settings && !rhs.cpu_settings && *lhs.cpu_settings == decltype(lhs.cpu_settings)::element_type()) || (rhs.cpu_settings && !lhs.cpu_settings && *rhs.cpu_settings == decltype(rhs.cpu_settings)::element_type())) && |
| (lhs.max_delegated_partitions == rhs.max_delegated_partitions) && |
| ((!lhs.edgetpu_settings && !rhs.edgetpu_settings) || (lhs.edgetpu_settings && rhs.edgetpu_settings && *lhs.edgetpu_settings == *rhs.edgetpu_settings) || (lhs.edgetpu_settings && !rhs.edgetpu_settings && *lhs.edgetpu_settings == decltype(lhs.edgetpu_settings)::element_type()) || (rhs.edgetpu_settings && !lhs.edgetpu_settings && *rhs.edgetpu_settings == decltype(rhs.edgetpu_settings)::element_type())) && |
| ((!lhs.coral_settings && !rhs.coral_settings) || (lhs.coral_settings && rhs.coral_settings && *lhs.coral_settings == *rhs.coral_settings) || (lhs.coral_settings && !rhs.coral_settings && *lhs.coral_settings == decltype(lhs.coral_settings)::element_type()) || (rhs.coral_settings && !lhs.coral_settings && *rhs.coral_settings == decltype(rhs.coral_settings)::element_type())) && |
| ((!lhs.fallback_settings && !rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings) || (lhs.fallback_settings && !rhs.fallback_settings && *lhs.fallback_settings == decltype(lhs.fallback_settings)::element_type()) || (rhs.fallback_settings && !lhs.fallback_settings && *rhs.fallback_settings == decltype(rhs.fallback_settings)::element_type())); |
| } |
| |
| inline bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline TFLiteSettingsT *TFLiteSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new TFLiteSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void TFLiteSettings::UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = delegate(); _o->delegate = _e; } |
| { auto _e = nnapi_settings(); if (_e) _o->nnapi_settings = std::unique_ptr<tflite::NNAPISettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = gpu_settings(); if (_e) _o->gpu_settings = std::unique_ptr<tflite::GPUSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = hexagon_settings(); if (_e) _o->hexagon_settings = std::unique_ptr<tflite::HexagonSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = xnnpack_settings(); if (_e) _o->xnnpack_settings = std::unique_ptr<tflite::XNNPackSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = cpu_settings(); if (_e) _o->cpu_settings = std::unique_ptr<tflite::CPUSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; } |
| { auto _e = edgetpu_settings(); if (_e) _o->edgetpu_settings = std::unique_ptr<tflite::EdgeTpuSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = coral_settings(); if (_e) _o->coral_settings = std::unique_ptr<tflite::CoralSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = fallback_settings(); if (_e) _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); } |
| } |
| |
| inline flatbuffers::Offset<TFLiteSettings> TFLiteSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateTFLiteSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TFLiteSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _delegate = _o->delegate; |
| auto _nnapi_settings = _o->nnapi_settings ? CreateNNAPISettings(_fbb, _o->nnapi_settings.get(), _rehasher) : 0; |
| auto _gpu_settings = _o->gpu_settings ? CreateGPUSettings(_fbb, _o->gpu_settings.get(), _rehasher) : 0; |
| auto _hexagon_settings = _o->hexagon_settings ? CreateHexagonSettings(_fbb, _o->hexagon_settings.get(), _rehasher) : 0; |
| auto _xnnpack_settings = _o->xnnpack_settings ? CreateXNNPackSettings(_fbb, _o->xnnpack_settings.get(), _rehasher) : 0; |
| auto _cpu_settings = _o->cpu_settings ? CreateCPUSettings(_fbb, _o->cpu_settings.get(), _rehasher) : 0; |
| auto _max_delegated_partitions = _o->max_delegated_partitions; |
| auto _edgetpu_settings = _o->edgetpu_settings ? CreateEdgeTpuSettings(_fbb, _o->edgetpu_settings.get(), _rehasher) : 0; |
| auto _coral_settings = _o->coral_settings ? CreateCoralSettings(_fbb, _o->coral_settings.get(), _rehasher) : 0; |
| auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0; |
| return tflite::CreateTFLiteSettings( |
| _fbb, |
| _delegate, |
| _nnapi_settings, |
| _gpu_settings, |
| _hexagon_settings, |
| _xnnpack_settings, |
| _cpu_settings, |
| _max_delegated_partitions, |
| _edgetpu_settings, |
| _coral_settings, |
| _fallback_settings); |
| } |
| |
| |
| inline bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) { |
| return |
| (lhs.allow_automatic_fallback_on_compilation_error == rhs.allow_automatic_fallback_on_compilation_error) && |
| (lhs.allow_automatic_fallback_on_execution_error == rhs.allow_automatic_fallback_on_execution_error); |
| } |
| |
| inline bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline FallbackSettingsT *FallbackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new FallbackSettingsT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void FallbackSettings::UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = allow_automatic_fallback_on_compilation_error(); _o->allow_automatic_fallback_on_compilation_error = _e; } |
| { auto _e = allow_automatic_fallback_on_execution_error(); _o->allow_automatic_fallback_on_execution_error = _e; } |
| } |
| |
| inline flatbuffers::Offset<FallbackSettings> FallbackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateFallbackSettings(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FallbackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _allow_automatic_fallback_on_compilation_error = _o->allow_automatic_fallback_on_compilation_error; |
| auto _allow_automatic_fallback_on_execution_error = _o->allow_automatic_fallback_on_execution_error; |
| return tflite::CreateFallbackSettings( |
| _fbb, |
| _allow_automatic_fallback_on_compilation_error, |
| _allow_automatic_fallback_on_execution_error); |
| } |
| |
| |
| inline bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) { |
| return |
| (lhs.name == rhs.name) && |
| (lhs.values == rhs.values); |
| } |
| |
| inline bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline BenchmarkMetricT *BenchmarkMetric::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new BenchmarkMetricT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void BenchmarkMetric::UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = name(); if (_e) _o->name = _e->str(); } |
| { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } |
| } |
| |
| inline flatbuffers::Offset<BenchmarkMetric> BenchmarkMetric::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateBenchmarkMetric(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkMetricT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); |
| auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; |
| return tflite::CreateBenchmarkMetric( |
| _fbb, |
| _name, |
| _values); |
| } |
| |
| |
| inline bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) { |
| return |
| (lhs.initialization_time_us == rhs.initialization_time_us) && |
| (lhs.inference_time_us == rhs.inference_time_us) && |
| (lhs.max_memory_kb == rhs.max_memory_kb) && |
| (lhs.ok == rhs.ok) && |
| (lhs.metrics == rhs.metrics); |
| } |
| |
| inline bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline BenchmarkResultT *BenchmarkResult::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new BenchmarkResultT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void BenchmarkResult::UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = initialization_time_us(); if (_e) { _o->initialization_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->initialization_time_us[_i] = _e->Get(_i); } } } |
| { auto _e = inference_time_us(); if (_e) { _o->inference_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inference_time_us[_i] = _e->Get(_i); } } } |
| { auto _e = max_memory_kb(); _o->max_memory_kb = _e; } |
| { auto _e = ok(); _o->ok = _e; } |
| { auto _e = metrics(); if (_e) { _o->metrics.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metrics[_i] = std::unique_ptr<tflite::BenchmarkMetricT>(_e->Get(_i)->UnPack(_resolver)); } } } |
| } |
| |
| inline flatbuffers::Offset<BenchmarkResult> BenchmarkResult::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateBenchmarkResult(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkResultT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _initialization_time_us = _o->initialization_time_us.size() ? _fbb.CreateVector(_o->initialization_time_us) : 0; |
| auto _inference_time_us = _o->inference_time_us.size() ? _fbb.CreateVector(_o->inference_time_us) : 0; |
| auto _max_memory_kb = _o->max_memory_kb; |
| auto _ok = _o->ok; |
| auto _metrics = _o->metrics.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::BenchmarkMetric>> (_o->metrics.size(), [](size_t i, _VectorArgs *__va) { return CreateBenchmarkMetric(*__va->__fbb, __va->__o->metrics[i].get(), __va->__rehasher); }, &_va ) : 0; |
| return tflite::CreateBenchmarkResult( |
| _fbb, |
| _initialization_time_us, |
| _inference_time_us, |
| _max_memory_kb, |
| _ok, |
| _metrics); |
| } |
| |
| |
| inline bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs) { |
| return |
| (lhs.source == rhs.source) && |
| (lhs.tflite_error == rhs.tflite_error) && |
| (lhs.underlying_api_error == rhs.underlying_api_error); |
| } |
| |
| inline bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline ErrorCodeT *ErrorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new ErrorCodeT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void ErrorCode::UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = source(); _o->source = _e; } |
| { auto _e = tflite_error(); _o->tflite_error = _e; } |
| { auto _e = underlying_api_error(); _o->underlying_api_error = _e; } |
| } |
| |
| inline flatbuffers::Offset<ErrorCode> ErrorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateErrorCode(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<ErrorCode> CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ErrorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _source = _o->source; |
| auto _tflite_error = _o->tflite_error; |
| auto _underlying_api_error = _o->underlying_api_error; |
| return tflite::CreateErrorCode( |
| _fbb, |
| _source, |
| _tflite_error, |
| _underlying_api_error); |
| } |
| |
| |
| inline bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) { |
| return |
| (lhs.stage == rhs.stage) && |
| (lhs.exit_code == rhs.exit_code) && |
| (lhs.signal == rhs.signal) && |
| (lhs.error_code == rhs.error_code); |
| } |
| |
| inline bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline BenchmarkErrorT *BenchmarkError::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new BenchmarkErrorT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void BenchmarkError::UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = stage(); _o->stage = _e; } |
| { auto _e = exit_code(); _o->exit_code = _e; } |
| { auto _e = signal(); _o->signal = _e; } |
| { auto _e = error_code(); if (_e) { _o->error_code.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->error_code[_i] = std::unique_ptr<tflite::ErrorCodeT>(_e->Get(_i)->UnPack(_resolver)); } } } |
| } |
| |
| inline flatbuffers::Offset<BenchmarkError> BenchmarkError::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateBenchmarkError(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkErrorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _stage = _o->stage; |
| auto _exit_code = _o->exit_code; |
| auto _signal = _o->signal; |
| auto _error_code = _o->error_code.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::ErrorCode>> (_o->error_code.size(), [](size_t i, _VectorArgs *__va) { return CreateErrorCode(*__va->__fbb, __va->__o->error_code[i].get(), __va->__rehasher); }, &_va ) : 0; |
| return tflite::CreateBenchmarkError( |
| _fbb, |
| _stage, |
| _exit_code, |
| _signal, |
| _error_code); |
| } |
| |
| |
| inline bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) { |
| return |
| ((!lhs.tflite_settings && !rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings) || (lhs.tflite_settings && !rhs.tflite_settings && *lhs.tflite_settings == decltype(lhs.tflite_settings)::element_type()) || (rhs.tflite_settings && !lhs.tflite_settings && *rhs.tflite_settings == decltype(rhs.tflite_settings)::element_type())) && |
| (lhs.event_type == rhs.event_type) && |
| ((!lhs.result && !rhs.result) || (lhs.result && rhs.result && *lhs.result == *rhs.result) || (lhs.result && !rhs.result && *lhs.result == decltype(lhs.result)::element_type()) || (rhs.result && !lhs.result && *rhs.result == decltype(rhs.result)::element_type())) && |
| ((!lhs.error && !rhs.error) || (lhs.error && rhs.error && *lhs.error == *rhs.error) || (lhs.error && !rhs.error && *lhs.error == decltype(lhs.error)::element_type()) || (rhs.error && !lhs.error && *rhs.error == decltype(rhs.error)::element_type())) && |
| (lhs.boottime_us == rhs.boottime_us) && |
| (lhs.wallclock_us == rhs.wallclock_us); |
| } |
| |
| inline bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) { |
| return !(lhs == rhs); |
| } |
| |
| |
| inline BenchmarkEventT *BenchmarkEvent::UnPack(const flatbuffers::resolver_function_t *_resolver) const { |
| auto _o = new BenchmarkEventT(); |
| UnPackTo(_o, _resolver); |
| return _o; |
| } |
| |
| inline void BenchmarkEvent::UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver) const { |
| (void)_o; |
| (void)_resolver; |
| { auto _e = tflite_settings(); if (_e) _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); } |
| { auto _e = event_type(); _o->event_type = _e; } |
| { auto _e = result(); if (_e) _o->result = std::unique_ptr<tflite::BenchmarkResultT>(_e->UnPack(_resolver)); } |
| { auto _e = error(); if (_e) _o->error = std::unique_ptr<tflite::BenchmarkErrorT>(_e->UnPack(_resolver)); } |
| { auto _e = boottime_us(); _o->boottime_us = _e; } |
| { auto _e = wallclock_us(); _o->wallclock_us = _e; } |
| } |
| |
| inline flatbuffers::Offset<BenchmarkEvent> BenchmarkEvent::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
| return CreateBenchmarkEvent(_fbb, _o, _rehasher); |
| } |
| |
| inline flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher) { |
| (void)_rehasher; |
| (void)_o; |
| struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkEventT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; |
| auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0; |
| auto _event_type = _o->event_type; |
| auto _result = _o->result ? CreateBenchmarkResult(_fbb, _o->result.get(), _rehasher) : 0; |
| auto _error = _o->error ? CreateBenchmarkError(_fbb, _o->error.get(), _rehasher) : 0; |
| auto _boottime_us = _o->boottime_us; |
| auto _wallclock_us = _o->wallclock_us; |
| return tflite::CreateBenchmarkEvent( |
| _fbb, |
| _tflite_settings, |
| _event_type, |
| _result, |
| _error, |
| _boottime_us, |
| _wallclock_us); |
| } |
| |
| } // namespace tflite |
| |
| #endif // FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_ |
| |