Merge "Do Not Pull Metric Data If Not Needed"
diff --git a/statsd/src/external/PullDataReceiver.h b/statsd/src/external/PullDataReceiver.h
index 8e3a8a1..47e1377 100644
--- a/statsd/src/external/PullDataReceiver.h
+++ b/statsd/src/external/PullDataReceiver.h
@@ -23,17 +23,22 @@
 namespace os {
 namespace statsd {
 
+// Determine if pull was needed and if so, whether the pull was successful
+enum PullResult { PULL_RESULT_SUCCESS = 1, PULL_RESULT_FAIL = 2, PULL_NOT_NEEDED = 3 };
+
 class PullDataReceiver : virtual public RefBase{
  public:
   virtual ~PullDataReceiver() {}
   /**
    * @param data The pulled data.
-   * @param pullSuccess Whether the pull succeeded. If the pull does not succeed, the data for the
-   * bucket should be invalidated.
+   * @param pullResult Whether the pull succeeded and was needed. If the pull does not succeed,
+   * the data for the bucket should be invalidated.
    * @param originalPullTimeNs This is when all the pulls have been initiated (elapsed time).
    */
-  virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
-                            int64_t originalPullTimeNs) = 0;
+  virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
+                            PullResult pullResult, int64_t originalPullTimeNs) = 0;
+
+  virtual bool isPullNeeded() const = 0;
 };
 
 }  // namespace statsd
diff --git a/statsd/src/external/StatsPullerManager.cpp b/statsd/src/external/StatsPullerManager.cpp
index 90e1cd0..ca5ee5b 100644
--- a/statsd/src/external/StatsPullerManager.cpp
+++ b/statsd/src/external/StatsPullerManager.cpp
@@ -32,6 +32,7 @@
 #include "../statscompanion_util.h"
 #include "StatsCallbackPuller.h"
 #include "TrainInfoPuller.h"
+#include "flags/FlagProvider.h"
 #include "statslog_statsd.h"
 
 using std::shared_ptr;
@@ -49,7 +50,8 @@
               // TrainInfo.
               {{.atomTag = util::TRAIN_INFO, .uid = AID_STATSD}, new TrainInfoPuller()},
       }),
-      mNextPullTimeNs(NO_ALARM_UPDATE) {
+      mNextPullTimeNs(NO_ALARM_UPDATE),
+      mLimitPull(FlagProvider::getInstance().getBootFlagBool(LIMIT_PULL_FLAG, FLAG_FALSE)) {
 }
 
 bool StatsPullerManager::Pull(int tagId, const ConfigKey& configKey, const int64_t eventTimeNs,
@@ -227,11 +229,32 @@
         vector<ReceiverInfo*> receivers;
         if (pair.second.size() != 0) {
             for (ReceiverInfo& receiverInfo : pair.second) {
-                if (receiverInfo.nextPullTimeNs <= elapsedTimeNs) {
-                    receivers.push_back(&receiverInfo);
+                // If mLimitPull is true, check if metric needs to pull data (pullNecessary).
+                // If pullNecessary and enough time has passed for the next bucket, then add
+                // receiver to the list that will pull on this alarm.
+                // If pullNecessary is false, check if next pull time needs to be updated.
+                if (mLimitPull) {
+                    sp<PullDataReceiver> receiverPtr = receiverInfo.receiver.promote();
+                    const bool pullNecessary =
+                            receiverPtr != nullptr && receiverPtr->isPullNeeded();
+                    if (receiverInfo.nextPullTimeNs <= elapsedTimeNs && pullNecessary) {
+                        receivers.push_back(&receiverInfo);
+                    } else {
+                        if (receiverInfo.nextPullTimeNs <= elapsedTimeNs) {
+                            receiverPtr->onDataPulled({}, PullResult::PULL_NOT_NEEDED,
+                                                      elapsedTimeNs);
+                            int numBucketsAhead = (elapsedTimeNs - receiverInfo.nextPullTimeNs) /
+                                                  receiverInfo.intervalNs;
+                            receiverInfo.nextPullTimeNs +=
+                                    (numBucketsAhead + 1) * receiverInfo.intervalNs;
+                        }
+                        minNextPullTimeNs = min(receiverInfo.nextPullTimeNs, minNextPullTimeNs);
+                    }
                 } else {
-                    if (receiverInfo.nextPullTimeNs < minNextPullTimeNs) {
-                        minNextPullTimeNs = receiverInfo.nextPullTimeNs;
+                    if (receiverInfo.nextPullTimeNs <= elapsedTimeNs) {
+                        receivers.push_back(&receiverInfo);
+                    } else {
+                        minNextPullTimeNs = min(receiverInfo.nextPullTimeNs, minNextPullTimeNs);
                     }
                 }
             }
@@ -242,9 +265,11 @@
     }
     for (const auto& pullInfo : needToPull) {
         vector<shared_ptr<LogEvent>> data;
-        bool pullSuccess = PullLocked(pullInfo.first->atomTag, pullInfo.first->configKey,
-                                      elapsedTimeNs, &data);
-        if (!pullSuccess) {
+        PullResult pullResult =
+                PullLocked(pullInfo.first->atomTag, pullInfo.first->configKey, elapsedTimeNs, &data)
+                        ? PullResult::PULL_RESULT_SUCCESS
+                        : PullResult::PULL_RESULT_FAIL;
+        if (pullResult == PullResult::PULL_RESULT_FAIL) {
             VLOG("pull failed at %lld, will try again later", (long long)elapsedTimeNs);
         }
 
@@ -263,14 +288,12 @@
         for (const auto& receiverInfo : pullInfo.second) {
             sp<PullDataReceiver> receiverPtr = receiverInfo->receiver.promote();
             if (receiverPtr != nullptr) {
-                receiverPtr->onDataPulled(data, pullSuccess, elapsedTimeNs);
+                receiverPtr->onDataPulled(data, pullResult, elapsedTimeNs);
                 // We may have just come out of a coma, compute next pull time.
                 int numBucketsAhead =
                         (elapsedTimeNs - receiverInfo->nextPullTimeNs) / receiverInfo->intervalNs;
                 receiverInfo->nextPullTimeNs += (numBucketsAhead + 1) * receiverInfo->intervalNs;
-                if (receiverInfo->nextPullTimeNs < minNextPullTimeNs) {
-                    minNextPullTimeNs = receiverInfo->nextPullTimeNs;
-                }
+                minNextPullTimeNs = min(receiverInfo->nextPullTimeNs, minNextPullTimeNs);
             } else {
                 VLOG("receiver already gone.");
             }
diff --git a/statsd/src/external/StatsPullerManager.h b/statsd/src/external/StatsPullerManager.h
index b503aa8..6e65f31 100644
--- a/statsd/src/external/StatsPullerManager.h
+++ b/statsd/src/external/StatsPullerManager.h
@@ -164,6 +164,8 @@
 
     int64_t mNextPullTimeNs;
 
+    const bool mLimitPull;
+
     FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents);
     FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm);
     FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation);
diff --git a/statsd/src/flags/FlagProvider.h b/statsd/src/flags/FlagProvider.h
index 6684ba4..e948911 100644
--- a/statsd/src/flags/FlagProvider.h
+++ b/statsd/src/flags/FlagProvider.h
@@ -39,6 +39,8 @@
 
 const std::string OPTIMIZATION_ATOM_MATCHER_MAP_FLAG = "optimization_atom_matcher_map";
 
+const std::string LIMIT_PULL_FLAG = "limit_pull";
+
 const std::string FLAG_TRUE = "true";
 const std::string FLAG_FALSE = "false";
 const std::string FLAG_EMPTY = "";
@@ -103,6 +105,7 @@
     friend class ConfigUpdateE2eTest;
     friend class ConfigUpdateTest;
     friend class EventMetricE2eTest;
+    friend class ValueMetricE2eTest;
     friend class GaugeMetricE2ePulledTest;
     friend class GaugeMetricE2ePushedTest;
     friend class EventMetricProducerTest;
diff --git a/statsd/src/main.cpp b/statsd/src/main.cpp
index cd64fe7..02dbbe8 100644
--- a/statsd/src/main.cpp
+++ b/statsd/src/main.cpp
@@ -76,7 +76,8 @@
             std::make_shared<LogEventQueue>(4000 /*buffer limit. Buffer is NOT pre-allocated*/);
 
     // Initialize boot flags
-    FlagProvider::getInstance().initBootFlags({OPTIMIZATION_ATOM_MATCHER_MAP_FLAG});
+    FlagProvider::getInstance().initBootFlags(
+            {OPTIMIZATION_ATOM_MATCHER_MAP_FLAG, LIMIT_PULL_FLAG});
 
     sp<UidMap> uidMap = UidMap::getInstance();
 
diff --git a/statsd/src/metrics/GaugeMetricProducer.cpp b/statsd/src/metrics/GaugeMetricProducer.cpp
index e306e05..cb61512 100644
--- a/statsd/src/metrics/GaugeMetricProducer.cpp
+++ b/statsd/src/metrics/GaugeMetricProducer.cpp
@@ -483,9 +483,9 @@
 }
 
 void GaugeMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData,
-                                       bool pullSuccess, int64_t originalPullTimeNs) {
+                                       PullResult pullResult, int64_t originalPullTimeNs) {
     std::lock_guard<std::mutex> lock(mMutex);
-    if (!pullSuccess || allData.size() == 0) {
+    if (pullResult != PullResult::PULL_RESULT_SUCCESS || allData.size() == 0) {
         return;
     }
     const int64_t pullDelayNs = getElapsedRealtimeNs() - originalPullTimeNs;
diff --git a/statsd/src/metrics/GaugeMetricProducer.h b/statsd/src/metrics/GaugeMetricProducer.h
index eb0ee37..f0a59ce 100644
--- a/statsd/src/metrics/GaugeMetricProducer.h
+++ b/statsd/src/metrics/GaugeMetricProducer.h
@@ -76,8 +76,14 @@
     virtual ~GaugeMetricProducer();
 
     // Handles when the pulled data arrives.
-    void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
-                      bool pullSuccess, int64_t originalPullTimeNs) override;
+    void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, PullResult pullResult,
+                      int64_t originalPullTimeNs) override;
+
+    // Determine if metric needs to pull
+    bool isPullNeeded() const override {
+        std::lock_guard<std::mutex> lock(mMutex);
+        return mIsActive && (mCondition == ConditionState::kTrue);
+    };
 
     // GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
     void notifyAppUpgradeInternalLocked(const int64_t eventTimeNs) override {
diff --git a/statsd/src/metrics/NumericValueMetricProducer.cpp b/statsd/src/metrics/NumericValueMetricProducer.cpp
index 9d054da..12c8c04 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.cpp
+++ b/statsd/src/metrics/NumericValueMetricProducer.cpp
@@ -183,14 +183,14 @@
 // By design, statsd pulls data at bucket boundaries using AlarmManager. These pulls are likely
 // to be delayed. Other events like condition changes or app upgrade which are not based on
 // AlarmManager might have arrived earlier and close the bucket.
-void NumericValueMetricProducer::onDataPulled(const vector<shared_ptr<LogEvent>>& allData,
-                                              bool pullSuccess, int64_t originalPullTimeNs) {
+void NumericValueMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData,
+                                              PullResult pullResult, int64_t originalPullTimeNs) {
     lock_guard<mutex> lock(mMutex);
     if (mCondition == ConditionState::kTrue) {
         // If the pull failed, we won't be able to compute a diff.
-        if (!pullSuccess) {
+        if (pullResult == PullResult::PULL_RESULT_FAIL) {
             invalidateCurrentBucket(originalPullTimeNs, BucketDropReason::PULL_FAILED);
-        } else {
+        } else if (pullResult == PullResult::PULL_RESULT_SUCCESS) {
             bool isEventLate = originalPullTimeNs < getCurrentBucketEndTimeNs();
             if (isEventLate) {
                 // If the event is late, we are in the middle of a bucket. Just
diff --git a/statsd/src/metrics/NumericValueMetricProducer.h b/statsd/src/metrics/NumericValueMetricProducer.h
index 8c883d5..78c414f 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.h
+++ b/statsd/src/metrics/NumericValueMetricProducer.h
@@ -39,9 +39,15 @@
                                const GuardrailOptions& guardrailOptions);
 
     // Process data pulled on bucket boundary.
-    void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
+    void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData, PullResult pullResult,
                       int64_t originalPullTimeNs) override;
 
+    // Determine if metric needs to pull
+    bool isPullNeeded() const override {
+        std::lock_guard<std::mutex> lock(mMutex);
+        return mIsActive && (mCondition == ConditionState::kTrue);
+    }
+
     inline MetricType getMetricType() const override {
         return METRIC_TYPE_VALUE;
     }
diff --git a/statsd/src/metrics/ValueMetricProducer.h b/statsd/src/metrics/ValueMetricProducer.h
index e8fe7e5..1c974b5 100644
--- a/statsd/src/metrics/ValueMetricProducer.h
+++ b/statsd/src/metrics/ValueMetricProducer.h
@@ -118,10 +118,14 @@
     virtual ~ValueMetricProducer();
 
     // Process data pulled on bucket boundary.
-    virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
-                              int64_t originalPullTimeNs) override {
+    virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
+                              PullResult pullResult, int64_t originalPullTimeNs) override {
     }
 
+    // Determine if metric needs to pull
+    virtual bool isPullNeeded() const override {
+        return false;
+    }
 
     // ValueMetric needs special logic if it's a pulled atom.
     void onStatsdInitCompleted(const int64_t& eventTimeNs) override;
diff --git a/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp b/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
index 984b72e..fc1db28 100644
--- a/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
+++ b/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
@@ -69,9 +69,11 @@
 }  // namespaces
 
 // Setup for test fixture.
-class GaugeMetricE2ePulledTest : public ::testing::Test {
+class GaugeMetricE2ePulledTest : public ::testing::TestWithParam<string> {
     void SetUp() override {
         FlagProvider::getInstance().overrideFuncs(&isAtLeastSFuncTrue);
+        FlagProvider::getInstance().overrideFlag(LIMIT_PULL_FLAG, GetParam(),
+                                                 /*isBootFlag=*/true);
     }
 
     void TearDown() override {
@@ -79,7 +81,10 @@
     }
 };
 
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents) {
+INSTANTIATE_TEST_SUITE_P(LimitPull, GaugeMetricE2ePulledTest,
+                         testing::Values(FLAG_FALSE, FLAG_TRUE));
+
+TEST_P(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents) {
     auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE);
     int64_t baseTimeNs = getElapsedRealtimeNs();
     int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -219,7 +224,7 @@
     EXPECT_GT(data.bucket_info(5).atom(0).subsystem_sleep_state().time_millis(), 0);
 }
 
-TEST_F(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents) {
+TEST_P(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents) {
     auto config = CreateStatsdConfig(GaugeMetric::CONDITION_CHANGE_TO_TRUE);
     int64_t baseTimeNs = getElapsedRealtimeNs();
     int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -317,7 +322,7 @@
     EXPECT_GT(data.bucket_info(2).atom(1).subsystem_sleep_state().time_millis(), 0);
 }
 
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm) {
+TEST_P(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm) {
     auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE);
     int64_t baseTimeNs = getElapsedRealtimeNs();
     int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -415,7 +420,7 @@
     EXPECT_GT(data.bucket_info(2).atom(0).subsystem_sleep_state().time_millis(), 0);
 }
 
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation) {
+TEST_P(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation) {
     auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE, /*useCondition=*/false);
 
     int64_t baseTimeNs = getElapsedRealtimeNs();
@@ -460,14 +465,15 @@
     // Check no pull occurred on metric initialization when it's not active.
     const int64_t metricInitTimeNs = configAddedTimeNs + 1;  // 10 mins + 1 ns.
     processor->onStatsdInitCompleted(metricInitTimeNs);
-    StatsdStatsReport_PulledAtomStats pulledAtomStats = getPulledAtomStats();
+    StatsdStatsReport_PulledAtomStats pulledAtomStats =
+            getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.atom_id(), ATOM_TAG);
     EXPECT_EQ(pulledAtomStats.total_pull(), 0);
 
     // Check no pull occurred on app upgrade when metric is not active.
     const int64_t appUpgradeTimeNs = metricInitTimeNs + 1;  // 10 mins + 2 ns.
     processor->notifyAppUpgrade(appUpgradeTimeNs, "appName", 1000 /* uid */, 2 /* version */);
-    pulledAtomStats = getPulledAtomStats();
+    pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.atom_id(), ATOM_TAG);
     EXPECT_EQ(pulledAtomStats.total_pull(), 0);
 
@@ -593,7 +599,7 @@
     EXPECT_EQ(gaugeMetrics.skipped_size(), 0);
 }
 
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition) {
+TEST_P(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition) {
     auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE, /*useCondition=*/false);
 
     int64_t baseTimeNs = getElapsedRealtimeNs();
diff --git a/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp b/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
index 385a473..810b550 100644
--- a/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
+++ b/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
@@ -15,12 +15,13 @@
 #include <android/binder_interface_utils.h>
 #include <gtest/gtest.h>
 
+#include <vector>
+
+#include "flags/FlagProvider.h"
 #include "src/StatsLogProcessor.h"
 #include "src/stats_log_util.h"
 #include "tests/statsd_test_util.h"
 
-#include <vector>
-
 using ::ndk::SharedRefBase;
 
 namespace android {
@@ -143,6 +144,20 @@
 
 }  // namespace
 
+// Setup for test fixture.
+class ValueMetricE2eTest : public testing::TestWithParam<string> {
+    void SetUp() override {
+        FlagProvider::getInstance().overrideFlag(LIMIT_PULL_FLAG, GetParam(),
+                                                 /*isBootFlag=*/true);
+    }
+
+    void TearDown() override {
+        FlagProvider::getInstance().resetOverrides();
+    }
+};
+
+INSTANTIATE_TEST_SUITE_P(LimitPull, ValueMetricE2eTest, testing::Values(FLAG_FALSE, FLAG_TRUE));
+
 /**
  * Tests the initial condition and condition after the first log events for
  * value metrics with either a combination condition or simple condition.
@@ -202,7 +217,7 @@
     EXPECT_EQ(ConditionState::kTrue, metricProducer2->mCondition);
 }
 
-TEST(ValueMetricE2eTest, TestPulledEvents) {
+TEST_P(ValueMetricE2eTest, TestPulledEvents) {
     auto config = CreateStatsdConfig();
     int64_t baseTimeNs = getElapsedRealtimeNs();
     int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -303,9 +318,26 @@
     EXPECT_EQ(baseTimeNs + 7 * bucketSizeNs, data.bucket_info(3).start_bucket_elapsed_nanos());
     EXPECT_EQ(baseTimeNs + 8 * bucketSizeNs, data.bucket_info(3).end_bucket_elapsed_nanos());
     ASSERT_EQ(1, data.bucket_info(3).values_size());
+
+    valueMetrics = reports.reports(0).metrics(0).value_metrics();
+    ASSERT_EQ(2, valueMetrics.skipped_size());
+
+    StatsLogReport::SkippedBuckets skipped = valueMetrics.skipped(0);
+    EXPECT_EQ(BucketDropReason::CONDITION_UNKNOWN, skipped.drop_event(0).drop_reason());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 2 * bucketSizeNs)),
+              skipped.start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 3 * bucketSizeNs)),
+              skipped.end_bucket_elapsed_nanos());
+
+    skipped = valueMetrics.skipped(1);
+    EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
+              skipped.start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 6 * bucketSizeNs)),
+              skipped.end_bucket_elapsed_nanos());
 }
 
-TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
+TEST_P(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
     auto config = CreateStatsdConfig();
     int64_t baseTimeNs = getElapsedRealtimeNs();
     // 10 mins == 2 bucket durations.
@@ -406,9 +438,33 @@
     EXPECT_EQ(baseTimeNs + 9 * bucketSizeNs, data.bucket_info(2).start_bucket_elapsed_nanos());
     EXPECT_EQ(baseTimeNs + 10 * bucketSizeNs, data.bucket_info(2).end_bucket_elapsed_nanos());
     ASSERT_EQ(1, data.bucket_info(2).values_size());
+
+    valueMetrics = reports.reports(0).metrics(0).value_metrics();
+    ASSERT_EQ(3, valueMetrics.skipped_size());
+
+    StatsLogReport::SkippedBuckets skipped = valueMetrics.skipped(0);
+    EXPECT_EQ(BucketDropReason::CONDITION_UNKNOWN, skipped.drop_event(0).drop_reason());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 2 * bucketSizeNs)),
+              skipped.start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
+              skipped.end_bucket_elapsed_nanos());
+
+    skipped = valueMetrics.skipped(1);
+    EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 6 * bucketSizeNs)),
+              skipped.start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 7 * bucketSizeNs)),
+              skipped.end_bucket_elapsed_nanos());
+
+    skipped = valueMetrics.skipped(2);
+    EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 7 * bucketSizeNs)),
+              skipped.start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 8 * bucketSizeNs)),
+              skipped.end_bucket_elapsed_nanos());
 }
 
-TEST(ValueMetricE2eTest, TestPulledEvents_WithActivation) {
+TEST_P(ValueMetricE2eTest, TestPulledEvents_WithActivation) {
     auto config = CreateStatsdConfig(false);
     int64_t baseTimeNs = getElapsedRealtimeNs();
     int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -454,7 +510,8 @@
     processor->onStatsdInitCompleted(metricInitTimeNs);
 
     // Check no pull occurred since metric not active.
-    StatsdStatsReport_PulledAtomStats pulledAtomStats = getPulledAtomStats();
+    StatsdStatsReport_PulledAtomStats pulledAtomStats =
+            getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.total_pull(), 0);
 
@@ -477,7 +534,7 @@
     processor->notifyAppUpgrade(appUpgradeTimeNs, "appName", 1000 /* uid */, 2 /* version */);
 
     // Check no pull occurred since metric not active.
-    pulledAtomStats = getPulledAtomStats();
+    pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.total_pull(), 0);
 
@@ -498,7 +555,7 @@
     buffer.clear();
     processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
                             true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &buffer);
-    pulledAtomStats = getPulledAtomStats();
+    pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
     EXPECT_EQ(pulledAtomStats.total_pull(), 0);
 
diff --git a/statsd/tests/metrics/GaugeMetricProducer_test.cpp b/statsd/tests/metrics/GaugeMetricProducer_test.cpp
index 190c5fb..a378393 100644
--- a/statsd/tests/metrics/GaugeMetricProducer_test.cpp
+++ b/statsd/tests/metrics/GaugeMetricProducer_test.cpp
@@ -150,7 +150,7 @@
     allData.clear();
     allData.push_back(makeLogEvent(tagId, bucket2StartTimeNs + 1, 10, "some value", 11));
 
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     auto it = gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->begin();
     EXPECT_EQ(INT, it->mValue.getType());
@@ -168,7 +168,7 @@
 
     allData.clear();
     allData.push_back(makeLogEvent(tagId, bucket3StartTimeNs + 10, 24, "some value", 25));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     it = gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->begin();
     EXPECT_EQ(INT, it->mValue.getType());
@@ -331,7 +331,7 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 1));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(1, gaugeProducer.mCurrentSlicedBucket->begin()
                          ->second.front()
@@ -361,7 +361,8 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + bucketSizeNs + 1, 3));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + bucketSizeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                               bucketStartTimeNs + bucketSizeNs);
     ASSERT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(3, gaugeProducer.mCurrentSlicedBucket->begin()
@@ -399,7 +400,7 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 1));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(1, gaugeProducer.mCurrentSlicedBucket->begin()
                          ->second.front()
@@ -462,7 +463,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(110, gaugeProducer.mCurrentSlicedBucket->begin()
@@ -551,7 +552,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1000, 110));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     ASSERT_EQ(1UL, gaugeProducer.mPastBuckets.size());
@@ -598,7 +599,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 13));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(13L, gaugeProducer.mCurrentSlicedBucket->begin()
                            ->second.front()
@@ -611,7 +612,8 @@
 
     allData.clear();
     allData.push_back(event2);
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + bucketSizeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                               bucketStartTimeNs + bucketSizeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(15L, gaugeProducer.mCurrentSlicedBucket->begin()
                            ->second.front()
@@ -623,7 +625,8 @@
     allData.clear();
     allData.push_back(
             CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 2 * bucketSizeNs + 10, 26));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 2 * bucketSizeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                               bucket2StartTimeNs + 2 * bucketSizeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(26L, gaugeProducer.mCurrentSlicedBucket->begin()
                            ->second.front()
@@ -635,7 +638,8 @@
     // This event does not have the gauge field. Thus the current bucket value is 0.
     allData.clear();
     allData.push_back(CreateNoValuesLogEvent(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 10));
-    gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 3 * bucketSizeNs);
+    gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                               bucketStartTimeNs + 3 * bucketSizeNs);
     ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_TRUE(gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->empty());
 }
diff --git a/statsd/tests/metrics/NumericValueMetricProducer_test.cpp b/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
index 644b331..f2b50c3 100644
--- a/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
+++ b/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
@@ -365,7 +365,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     // empty since bucket is flushed
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // dimInfos holds the base
@@ -379,7 +379,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 23));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     // empty since bucket is cleared
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // dimInfos holds the base
@@ -394,7 +394,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     // empty since bucket is cleared
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // dimInfos holds the base
@@ -441,7 +441,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 10, 2));
-    valueProducer->onDataPulled(allData, /** success */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Partial buckets created in 2nd bucket.
     switch (GetParam()) {
@@ -490,7 +490,7 @@
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 3, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     // empty since bucket is cleared
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // dimInfos holds the base
@@ -504,14 +504,14 @@
 
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket3StartTimeNs + 1, 4, 23));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     // No new data seen, so data has been cleared.
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
 
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 3, 36));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -541,7 +541,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     // empty since bucket is cleared
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // dimInfos holds the base
@@ -555,7 +555,7 @@
     allData.clear();
     // 10 is less than 11, so we reset and keep 10 as the value.
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     // empty since the bucket is flushed.
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -567,7 +567,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -594,7 +594,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     // empty since bucket is cleared
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     // mDimInfos holds the base
@@ -608,7 +608,7 @@
     allData.clear();
     // 10 is less than 11, so we reset. 10 only updates the base.
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -618,7 +618,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -680,7 +680,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8}, {0},
                                     {bucketStartTimeNs}, {bucket2StartTimeNs});
 
@@ -793,7 +793,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 100));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
 
     switch (GetParam()) {
@@ -811,7 +811,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 150));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     EXPECT_EQ(bucket3StartTimeNs, valueProducer->mCurrentBucketStartTimeNs);
     EXPECT_EQ(2, valueProducer->getCurrentBucketNum());
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20, 30},
@@ -839,7 +839,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 100));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
 
@@ -1157,7 +1157,7 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     // empty since bucket is finished
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1171,7 +1171,7 @@
     // pull 2 at correct time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 23));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     // empty since bucket is finished
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1188,7 +1188,7 @@
     // The new bucket is back to normal.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket6StartTimeNs + 1, 36));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket6StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket6StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -1269,7 +1269,7 @@
     // since the condition turned to off before this pull finish, it has no effect
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8}, {1},
                                     {bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -1355,7 +1355,7 @@
     // for the new bucket since it was just pulled.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 50, 140));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 50);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 50);
 
     ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1370,7 +1370,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 160));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     assertPastBucketValuesSingleKey(
             valueProducer->mPastBuckets, {20, 30}, {bucketSizeNs - 8, bucketSizeNs - 24}, {1, -1},
@@ -1759,7 +1759,7 @@
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
     EXPECT_EQ(true, base1.has_value());
@@ -1825,7 +1825,7 @@
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
 
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
     EXPECT_EQ(true, base1.has_value());
@@ -1848,7 +1848,7 @@
     // This pull is incomplete since it's missing dimension 1. Will cause mDimInfos to be trimmed
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 2, 5));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
 
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1862,7 +1862,7 @@
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 2, 13));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 1, 5));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
 
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1915,7 +1915,7 @@
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1941,7 +1941,7 @@
     // next pull somehow did not happen, skip to end of bucket 3
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 2, 5));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     // Only one dimension left. One was trimmed.
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1956,7 +1956,7 @@
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 2, 14));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 1, 14));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
 
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1964,7 +1964,7 @@
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket6StartTimeNs + 1, 1, 19));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket6StartTimeNs + 1, 2, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket6StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket6StartTimeNs);
 
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -2026,7 +2026,7 @@
     EXPECT_EQ(0, curInterval.sampleSize);
 
     vector<shared_ptr<LogEvent>> allData;
-    valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     EXPECT_EQ(false, curBase.has_value());
@@ -2214,7 +2214,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
 
     // This will fail and should invalidate the whole bucket since we do not have all the data
     // needed to compute the metric value when the screen was on.
@@ -2224,7 +2224,7 @@
     // Bucket end.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
 
@@ -2292,7 +2292,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // First bucket added to mSkippedBuckets after flush.
     ASSERT_EQ(1UL, valueProducer->mSkippedBuckets.size());
@@ -2352,7 +2352,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ false, bucketStartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucketStartTimeNs);
 
     valueProducer->onConditionChanged(false, bucketStartTimeNs + 2);
     valueProducer->onConditionChanged(true, bucketStartTimeNs + 3);
@@ -2360,7 +2360,7 @@
     // Bucket end.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
 
@@ -2429,7 +2429,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
 
     valueProducer->onConditionChanged(false, bucketStartTimeNs + 2);
     valueProducer->onConditionChanged(true, bucketStartTimeNs + 3);
@@ -2437,7 +2437,7 @@
     // Bucket end.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
-    valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
 
     valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
 
@@ -2491,7 +2491,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     EXPECT_EQ(valueProducer->mDimInfos.begin()->second.seenNewData, false);
@@ -2501,7 +2501,7 @@
     // Bucket 3 empty.
     allData.clear();
     allData.push_back(CreateNoValuesLogEvent(tagId, bucket3StartTimeNs + 1));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     // Data has been trimmed.
     ASSERT_EQ(1UL, valueProducer->mPastBuckets.size());
     ASSERT_EQ(1UL, valueProducer->mSkippedBuckets.size());
@@ -2511,7 +2511,7 @@
     // Bucket 4 start.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 150));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
     ASSERT_EQ(1UL, valueProducer->mPastBuckets.size());
     ASSERT_EQ(2UL, valueProducer->mSkippedBuckets.size());
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
@@ -2520,7 +2520,7 @@
     // Bucket 5 start.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket5StartTimeNs + 1, 170));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
     assertPastBucketValuesSingleKey(
             valueProducer->mPastBuckets, {107, 20}, {bucketSizeNs, bucketSizeNs}, {0, 0},
             {bucketStartTimeNs, bucket4StartTimeNs}, {bucket2StartTimeNs, bucket5StartTimeNs});
@@ -2591,7 +2591,7 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 120));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
     curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -2649,7 +2649,7 @@
     // End of bucket
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
 
@@ -2686,7 +2686,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 2));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Key 1 should be removed from mDimInfos since in not present in the most pull.
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
@@ -2754,7 +2754,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 4));
     // Pull fails and arrives late.
-    valueProducer->onDataPulled(allData, /** fails */ false, bucket3StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket3StartTimeNs + 1);
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9},
                                     {partialBucketSplitTimeNs - bucketStartTimeNs}, {0},
                                     {bucketStartTimeNs}, {partialBucketSplitTimeNs});
@@ -2798,7 +2798,7 @@
     // End of first bucket
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 4));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
 
     valueProducer->onConditionChanged(true, bucket2StartTimeNs + 10);
@@ -2830,11 +2830,11 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 30, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 30);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs + 30);
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Bucket should have been completed.
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs}, {0},
@@ -2860,11 +2860,11 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 30, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 30);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs + 30);
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Bucket should have been completed.
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {19}, {bucketSizeNs}, {0},
@@ -2951,7 +2951,7 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {2}, {0}, {bucketStartTimeNs},
                                     {bucket2StartTimeNs});
@@ -2995,18 +2995,18 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 3, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ false, bucketStartTimeNs + 3);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucketStartTimeNs + 3);
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
 
     valueProducer->onConditionChanged(false, bucket2StartTimeNs + 8);
     valueProducer->onConditionChanged(true, bucket2StartTimeNs + 10);
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 30));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // There was not global base available so all buckets are invalid.
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {}, {});
@@ -3035,7 +3035,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, tagId, 2, 2));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     ProtoOutputStream output;
     std::set<string> strSet;
@@ -3100,7 +3100,7 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 30);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 30);
 
     // Bucket should have been completed.
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs}, {30},
@@ -3151,7 +3151,7 @@
     // Now the alarm is delivered. Condition is off though.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {50 - 8}, {0},
                                     {bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -3183,7 +3183,7 @@
     // Now the alarm is delivered. Condition is on.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs - 8}, {0},
                                     {bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -3205,7 +3205,7 @@
     // Now the alarm is delivered. Condition is off though.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Condition was always false.
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {}, {});
@@ -3239,7 +3239,7 @@
     // Now the alarm is delivered. Condition is off though.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
 
@@ -3328,7 +3328,7 @@
     // Bucket boundary pull.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     // Late condition change event.
     valueProducer->onConditionChanged(false, bucket2StartTimeNs - 100);
@@ -3395,7 +3395,7 @@
     // Bucket boundary pull.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs - 100, 20));
@@ -3739,7 +3739,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 3));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // This bucket is also dropped due to condition unknown.
     int64_t conditionChangeTimeNs = bucket2StartTimeNs + 10 * NS_PER_SEC;
@@ -4737,7 +4737,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 10));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs, 2 /*uid*/, 15));
-    valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     // Ensure the MetricDimensionKeys for the current state are kept.
     ASSERT_EQ(2UL, valueProducer->mCurrentSlicedBucket.size());
@@ -5773,7 +5773,7 @@
             CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 16 /* tag */));
     allData.push_back(
             CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 2 /*uid*/, 13, 8 /*tag*/));
-    valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     // Buckets flushed. MetricDimensionKeys not corresponding to the current state are removed.
     ASSERT_EQ(3UL, valueProducer->mCurrentSlicedBucket.size());
@@ -5983,7 +5983,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 11));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
     ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
@@ -6147,7 +6147,7 @@
     // Pull at end of first bucket.
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 11));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
     ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -6160,7 +6160,7 @@
     // Pull at end of second bucket. Since no new data is seen, mDimInfos will be cleared.
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
     ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
     ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
@@ -6356,7 +6356,7 @@
             CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 14 /* tag */));
     allData.push_back(
             CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 16 /* tag */));
-    valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
 
     // Buckets flushed. MetricDimensionKeys not corresponding to the current state are removed.
     ASSERT_EQ(2UL, valueProducer->mCurrentSlicedBucket.size());
@@ -6544,7 +6544,7 @@
     allData.clear();
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 1 /*uid*/, 21, 21));
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 20, 5));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Check dump report.
     ProtoOutputStream output;
@@ -6613,7 +6613,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -6622,7 +6623,7 @@
     // second pull on the bucket #2 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // the second pull did close the second bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -6663,7 +6664,8 @@
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + delayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + delayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + delayNs);
 
     // first delayed pull on the bucket #1 edge
     // the delayed pull did close the first bucket with condition duration == conditionDurationNs
@@ -6677,7 +6679,7 @@
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // second pull on the bucket #2 edge is on time
     assertPastBucketValuesSingleKey(
@@ -6733,7 +6735,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 30));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // second pull on the bucket #2 edge is on time
     // the pull did close the second bucket with condition where
@@ -6836,7 +6838,7 @@
     vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket1LatePullNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket1LatePullNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket1LatePullNs);
 
     // first delayed pull on the bucket #1 edge
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
@@ -6850,7 +6852,7 @@
     // will force delayed pull & bucket #2 close
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2LatePullNs, 25));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2LatePullNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2LatePullNs);
 
     // second delayed pull on the bucket #2 edge
     // the pull did close the second bucket with condition true
@@ -6867,7 +6869,7 @@
     // will force pull on time & bucket #3 close
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 40));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
 
     // the pull did close the third bucket with condition true
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 10, 15},
@@ -6904,7 +6906,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -6913,7 +6916,7 @@
     // second pull on the bucket #2 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // the second pull did close the second bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -6924,7 +6927,7 @@
     // third pull on the bucket #3 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
 
     // the third pull did close the third bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5, 5},
@@ -6956,7 +6959,8 @@
     // first delayed pull on the bucket #1 edge with delay
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket which is skipped
     // skipped due to bucket does not contains any value
@@ -6966,7 +6970,7 @@
     // second pull on the bucket #2 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // the second pull did close the second bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs},
@@ -6975,7 +6979,7 @@
     // third pull on the bucket #3 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 20));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
 
     // the third pull did close the third bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(
@@ -7016,7 +7020,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     // and the condition correction == pull delay
@@ -7072,7 +7077,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7129,7 +7135,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7138,7 +7145,7 @@
     // second pull on the bucket #2 boundary on time
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
 
     // the second pull did close the second bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -7199,7 +7206,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7256,7 +7264,8 @@
     // first delayed pull on the bucket #1 edge
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+                                bucket2StartTimeNs + pullDelayNs);
 
     // the delayed pull did close the first bucket with condition duration == bucketSizeNs
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7439,7 +7448,7 @@
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 1 /*uid*/, 11, 7));
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 8, 5));
     allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 9, 7));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
     ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
 
@@ -7518,7 +7527,7 @@
     allData.clear();
     allData.push_back(makeRepeatedUidLogEvent(tagId, bucket2StartTimeNs + 1, {1, 10}, 5, {5, 7}));
     allData.push_back(makeRepeatedUidLogEvent(tagId, bucket2StartTimeNs + 1, {2, 10}, 5, {7, 5}));
-    valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+    valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
 
     // Check dump report.
     ProtoOutputStream output;
diff --git a/statsd/tests/statsd_test_util.cpp b/statsd/tests/statsd_test_util.cpp
index fae6993..9b6f5b2 100644
--- a/statsd/tests/statsd_test_util.cpp
+++ b/statsd/tests/statsd_test_util.cpp
@@ -2068,7 +2068,7 @@
     return packageInfos;
 }
 
-StatsdStatsReport_PulledAtomStats getPulledAtomStats() {
+StatsdStatsReport_PulledAtomStats getPulledAtomStats(int32_t atom_id) {
     vector<uint8_t> statsBuffer;
     StatsdStats::getInstance().dumpStats(&statsBuffer, false /*reset stats*/);
     StatsdStatsReport statsReport;
@@ -2079,7 +2079,12 @@
     if (statsReport.pulled_atom_stats_size() == 0) {
         return pulledAtomStats;
     }
-    return statsReport.pulled_atom_stats(0);
+    for (size_t i = 0; i < statsReport.pulled_atom_stats_size(); i++) {
+        if (statsReport.pulled_atom_stats(i).atom_id() == atom_id) {
+            return statsReport.pulled_atom_stats(i);
+        }
+    }
+    return pulledAtomStats;
 }
 
 }  // namespace statsd
diff --git a/statsd/tests/statsd_test_util.h b/statsd/tests/statsd_test_util.h
index af5ce3e..ea473c4 100644
--- a/statsd/tests/statsd_test_util.h
+++ b/statsd/tests/statsd_test_util.h
@@ -726,7 +726,7 @@
     return result;
 }
 
-StatsdStatsReport_PulledAtomStats getPulledAtomStats();
+StatsdStatsReport_PulledAtomStats getPulledAtomStats(int atom_id);
 }  // namespace statsd
 }  // namespace os
 }  // namespace android