GaugeMetricProducer.cpp revision ec3c7a3e58757a23afbb494b5fd9f1a1081fa495
1/* 2* Copyright (C) 2017 The Android Open Source Project 3* 4* Licensed under the Apache License, Version 2.0 (the "License"); 5* you may not use this file except in compliance with the License. 6* You may obtain a copy of the License at 7* 8* http://www.apache.org/licenses/LICENSE-2.0 9* 10* Unless required by applicable law or agreed to in writing, software 11* distributed under the License is distributed on an "AS IS" BASIS, 12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13* See the License for the specific language governing permissions and 14* limitations under the License. 15*/ 16 17#define DEBUG false // STOPSHIP if true 18#include "Log.h" 19 20#include "../guardrail/StatsdStats.h" 21#include "GaugeMetricProducer.h" 22#include "../stats_log_util.h" 23 24#include <cutils/log.h> 25 26using android::util::FIELD_COUNT_REPEATED; 27using android::util::FIELD_TYPE_BOOL; 28using android::util::FIELD_TYPE_FLOAT; 29using android::util::FIELD_TYPE_INT32; 30using android::util::FIELD_TYPE_INT64; 31using android::util::FIELD_TYPE_MESSAGE; 32using android::util::FIELD_TYPE_STRING; 33using android::util::ProtoOutputStream; 34using std::map; 35using std::string; 36using std::unordered_map; 37using std::vector; 38using std::make_shared; 39using std::shared_ptr; 40 41namespace android { 42namespace os { 43namespace statsd { 44 45// for StatsLogReport 46const int FIELD_ID_ID = 1; 47const int FIELD_ID_GAUGE_METRICS = 8; 48const int FIELD_ID_TIME_BASE = 9; 49const int FIELD_ID_BUCKET_SIZE = 10; 50const int FIELD_ID_DIMENSION_PATH_IN_WHAT = 11; 51const int FIELD_ID_DIMENSION_PATH_IN_CONDITION = 12; 52// for GaugeMetricDataWrapper 53const int FIELD_ID_DATA = 1; 54const int FIELD_ID_SKIPPED = 2; 55const int FIELD_ID_SKIPPED_START_MILLIS = 3; 56const int FIELD_ID_SKIPPED_END_MILLIS = 4; 57// for GaugeMetricData 58const int FIELD_ID_DIMENSION_IN_WHAT = 1; 59const int FIELD_ID_DIMENSION_IN_CONDITION = 2; 60const int FIELD_ID_BUCKET_INFO = 3; 61const int FIELD_ID_DIMENSION_LEAF_IN_WHAT = 4; 62const int FIELD_ID_DIMENSION_LEAF_IN_CONDITION = 5; 63// for GaugeBucketInfo 64const int FIELD_ID_ATOM = 3; 65const int FIELD_ID_ELAPSED_ATOM_TIMESTAMP = 4; 66const int FIELD_ID_WALL_CLOCK_ATOM_TIMESTAMP = 5; 67const int FIELD_ID_BUCKET_NUM = 6; 68const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 7; 69const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 8; 70 71GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric, 72 const int conditionIndex, 73 const sp<ConditionWizard>& wizard, const int pullTagId, 74 const int64_t timeBaseNs, const int64_t startTimeNs, 75 shared_ptr<StatsPullerManager> statsPullerManager) 76 : MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard), 77 mStatsPullerManager(statsPullerManager), 78 mPullTagId(pullTagId), 79 mMinBucketSizeNs(metric.min_bucket_size_nanos()), 80 mDimensionSoftLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) != 81 StatsdStats::kAtomDimensionKeySizeLimitMap.end() 82 ? StatsdStats::kAtomDimensionKeySizeLimitMap.at(pullTagId).first 83 : StatsdStats::kDimensionKeySizeSoftLimit), 84 mDimensionHardLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) != 85 StatsdStats::kAtomDimensionKeySizeLimitMap.end() 86 ? StatsdStats::kAtomDimensionKeySizeLimitMap.at(pullTagId).second 87 : StatsdStats::kDimensionKeySizeHardLimit) { 88 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>(); 89 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>(); 90 int64_t bucketSizeMills = 0; 91 if (metric.has_bucket()) { 92 bucketSizeMills = TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket()); 93 } else { 94 bucketSizeMills = TimeUnitToBucketSizeInMillis(ONE_HOUR); 95 } 96 mBucketSizeNs = bucketSizeMills * 1000000; 97 98 mSamplingType = metric.sampling_type(); 99 if (!metric.gauge_fields_filter().include_all()) { 100 translateFieldMatcher(metric.gauge_fields_filter().fields(), &mFieldMatchers); 101 } 102 103 // TODO: use UidMap if uid->pkg_name is required 104 if (metric.has_dimensions_in_what()) { 105 translateFieldMatcher(metric.dimensions_in_what(), &mDimensionsInWhat); 106 mContainANYPositionInDimensionsInWhat = HasPositionANY(metric.dimensions_in_what()); 107 } 108 109 if (metric.has_dimensions_in_condition()) { 110 translateFieldMatcher(metric.dimensions_in_condition(), &mDimensionsInCondition); 111 } 112 113 if (metric.links().size() > 0) { 114 for (const auto& link : metric.links()) { 115 Metric2Condition mc; 116 mc.conditionId = link.condition(); 117 translateFieldMatcher(link.fields_in_what(), &mc.metricFields); 118 translateFieldMatcher(link.fields_in_condition(), &mc.conditionFields); 119 mMetric2ConditionLinks.push_back(mc); 120 } 121 } 122 mConditionSliced = (metric.links().size() > 0) || (mDimensionsInCondition.size() > 0); 123 mSliceByPositionALL = HasPositionALL(metric.dimensions_in_what()) || 124 HasPositionALL(metric.dimensions_in_condition()); 125 126 flushIfNeededLocked(startTimeNs); 127 // Kicks off the puller immediately. 128 if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 129 mStatsPullerManager->RegisterReceiver( 130 mPullTagId, this, getCurrentBucketEndTimeNs(), mBucketSizeNs); 131 } 132 133 VLOG("Gauge metric %lld created. bucket size %lld start_time: %lld sliced %d", 134 (long long)metric.id(), (long long)mBucketSizeNs, (long long)mTimeBaseNs, 135 mConditionSliced); 136} 137 138// for testing 139GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric, 140 const int conditionIndex, 141 const sp<ConditionWizard>& wizard, const int pullTagId, 142 const int64_t timeBaseNs, const int64_t startTimeNs) 143 : GaugeMetricProducer(key, metric, conditionIndex, wizard, pullTagId, timeBaseNs, startTimeNs, 144 make_shared<StatsPullerManager>()) { 145} 146 147GaugeMetricProducer::~GaugeMetricProducer() { 148 VLOG("~GaugeMetricProducer() called"); 149 if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 150 mStatsPullerManager->UnRegisterReceiver(mPullTagId, this); 151 } 152} 153 154void GaugeMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const { 155 if (mCurrentSlicedBucket == nullptr || 156 mCurrentSlicedBucket->size() == 0) { 157 return; 158 } 159 160 fprintf(out, "GaugeMetric %lld dimension size %lu\n", (long long)mMetricId, 161 (unsigned long)mCurrentSlicedBucket->size()); 162 if (verbose) { 163 for (const auto& it : *mCurrentSlicedBucket) { 164 fprintf(out, "\t(what)%s\t(condition)%s %d atoms\n", 165 it.first.getDimensionKeyInWhat().toString().c_str(), 166 it.first.getDimensionKeyInCondition().toString().c_str(), 167 (int)it.second.size()); 168 } 169 } 170} 171 172void GaugeMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) { 173 flushIfNeededLocked(dumpTimeNs); 174 mPastBuckets.clear(); 175 mSkippedBuckets.clear(); 176} 177 178void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs, 179 const bool include_current_partial_bucket, 180 std::set<string> *str_set, 181 ProtoOutputStream* protoOutput) { 182 VLOG("Gauge metric %lld report now...", (long long)mMetricId); 183 if (include_current_partial_bucket) { 184 flushLocked(dumpTimeNs); 185 } else { 186 flushIfNeededLocked(dumpTimeNs); 187 } 188 189 flushIfNeededLocked(dumpTimeNs); 190 if (mPastBuckets.empty()) { 191 return; 192 } 193 194 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId); 195 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_TIME_BASE, (long long)mTimeBaseNs); 196 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_SIZE, (long long)mBucketSizeNs); 197 198 // Fills the dimension path if not slicing by ALL. 199 if (!mSliceByPositionALL) { 200 if (!mDimensionsInWhat.empty()) { 201 uint64_t dimenPathToken = protoOutput->start( 202 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_WHAT); 203 writeDimensionPathToProto(mDimensionsInWhat, protoOutput); 204 protoOutput->end(dimenPathToken); 205 } 206 if (!mDimensionsInCondition.empty()) { 207 uint64_t dimenPathToken = protoOutput->start( 208 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_CONDITION); 209 writeDimensionPathToProto(mDimensionsInCondition, protoOutput); 210 protoOutput->end(dimenPathToken); 211 } 212 } 213 214 uint64_t protoToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_GAUGE_METRICS); 215 216 for (const auto& pair : mSkippedBuckets) { 217 uint64_t wrapperToken = 218 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_SKIPPED); 219 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_START_MILLIS, 220 (long long)(NanoToMillis(pair.first))); 221 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_END_MILLIS, 222 (long long)(NanoToMillis(pair.second))); 223 protoOutput->end(wrapperToken); 224 } 225 mSkippedBuckets.clear(); 226 227 for (const auto& pair : mPastBuckets) { 228 const MetricDimensionKey& dimensionKey = pair.first; 229 230 VLOG("Gauge dimension key %s", dimensionKey.toString().c_str()); 231 uint64_t wrapperToken = 232 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_DATA); 233 234 // First fill dimension. 235 if (mSliceByPositionALL) { 236 uint64_t dimensionToken = protoOutput->start( 237 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_WHAT); 238 writeDimensionToProto(dimensionKey.getDimensionKeyInWhat(), str_set, protoOutput); 239 protoOutput->end(dimensionToken); 240 241 if (dimensionKey.hasDimensionKeyInCondition()) { 242 uint64_t dimensionInConditionToken = protoOutput->start( 243 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_CONDITION); 244 writeDimensionToProto(dimensionKey.getDimensionKeyInCondition(), 245 str_set, protoOutput); 246 protoOutput->end(dimensionInConditionToken); 247 } 248 } else { 249 writeDimensionLeafNodesToProto(dimensionKey.getDimensionKeyInWhat(), 250 FIELD_ID_DIMENSION_LEAF_IN_WHAT, str_set, protoOutput); 251 if (dimensionKey.hasDimensionKeyInCondition()) { 252 writeDimensionLeafNodesToProto(dimensionKey.getDimensionKeyInCondition(), 253 FIELD_ID_DIMENSION_LEAF_IN_CONDITION, 254 str_set, protoOutput); 255 } 256 } 257 258 // Then fill bucket_info (GaugeBucketInfo). 259 for (const auto& bucket : pair.second) { 260 uint64_t bucketInfoToken = protoOutput->start( 261 FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_BUCKET_INFO); 262 263 if (bucket.mBucketEndNs - bucket.mBucketStartNs != mBucketSizeNs) { 264 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_START_BUCKET_ELAPSED_MILLIS, 265 (long long)NanoToMillis(bucket.mBucketStartNs)); 266 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_BUCKET_ELAPSED_MILLIS, 267 (long long)NanoToMillis(bucket.mBucketEndNs)); 268 } else { 269 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_NUM, 270 (long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs))); 271 } 272 273 if (!bucket.mGaugeAtoms.empty()) { 274 for (const auto& atom : bucket.mGaugeAtoms) { 275 uint64_t atomsToken = 276 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | 277 FIELD_ID_ATOM); 278 writeFieldValueTreeToStream(mTagId, *(atom.mFields), protoOutput); 279 protoOutput->end(atomsToken); 280 } 281 const bool truncateTimestamp = 282 android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.find( 283 mTagId) == 284 android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.end(); 285 for (const auto& atom : bucket.mGaugeAtoms) { 286 const int64_t elapsedTimestampNs = truncateTimestamp ? 287 truncateTimestampNsToFiveMinutes(atom.mElapsedTimestamps) : 288 atom.mElapsedTimestamps; 289 const int64_t wallClockNs = truncateTimestamp ? 290 truncateTimestampNsToFiveMinutes(atom.mWallClockTimestampNs) : 291 atom.mWallClockTimestampNs; 292 protoOutput->write( 293 FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED | FIELD_ID_ELAPSED_ATOM_TIMESTAMP, 294 (long long)elapsedTimestampNs); 295 protoOutput->write( 296 FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED | 297 FIELD_ID_WALL_CLOCK_ATOM_TIMESTAMP, 298 (long long)wallClockNs); 299 } 300 } 301 protoOutput->end(bucketInfoToken); 302 VLOG("Gauge \t bucket [%lld - %lld] includes %d atoms.", 303 (long long)bucket.mBucketStartNs, (long long)bucket.mBucketEndNs, 304 (int)bucket.mGaugeAtoms.size()); 305 } 306 protoOutput->end(wrapperToken); 307 } 308 protoOutput->end(protoToken); 309 310 mPastBuckets.clear(); 311 // TODO: Clear mDimensionKeyMap once the report is dumped. 312} 313 314void GaugeMetricProducer::pullLocked(const int64_t timestampNs) { 315 bool triggerPuller = false; 316 switch(mSamplingType) { 317 // When the metric wants to do random sampling and there is already one gauge atom for the 318 // current bucket, do not do it again. 319 case GaugeMetric::RANDOM_ONE_SAMPLE: { 320 triggerPuller = mCondition && mCurrentSlicedBucket->empty(); 321 break; 322 } 323 case GaugeMetric::ALL_CONDITION_CHANGES: { 324 triggerPuller = true; 325 break; 326 } 327 case GaugeMetric::CONDITION_CHANGE_TO_TRUE: { 328 triggerPuller = mCondition; 329 break; 330 } 331 default: 332 break; 333 } 334 if (!triggerPuller) { 335 return; 336 } 337 338 vector<std::shared_ptr<LogEvent>> allData; 339 if (!mStatsPullerManager->Pull(mPullTagId, timestampNs, &allData)) { 340 ALOGE("Gauge Stats puller failed for tag: %d", mPullTagId); 341 return; 342 } 343 344 for (const auto& data : allData) { 345 onMatchedLogEventLocked(0, *data); 346 } 347} 348 349void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet, 350 const int64_t eventTimeNs) { 351 VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId); 352 flushIfNeededLocked(eventTimeNs); 353 mCondition = conditionMet; 354 355 if (mPullTagId != -1) { 356 pullLocked(eventTimeNs); 357 } // else: Push mode. No need to proactively pull the gauge data. 358} 359 360void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition, 361 const int64_t eventTimeNs) { 362 VLOG("GaugeMetric %lld onSlicedConditionMayChange overall condition %d", (long long)mMetricId, 363 overallCondition); 364 flushIfNeededLocked(eventTimeNs); 365 // If the condition is sliced, mCondition is true if any of the dimensions is true. And we will 366 // pull for every dimension. 367 mCondition = overallCondition; 368 if (mPullTagId != -1) { 369 pullLocked(eventTimeNs); 370 } // else: Push mode. No need to proactively pull the gauge data. 371} 372 373std::shared_ptr<vector<FieldValue>> GaugeMetricProducer::getGaugeFields(const LogEvent& event) { 374 if (mFieldMatchers.size() > 0) { 375 std::shared_ptr<vector<FieldValue>> gaugeFields = std::make_shared<vector<FieldValue>>(); 376 filterGaugeValues(mFieldMatchers, event.getValues(), gaugeFields.get()); 377 return gaugeFields; 378 } else { 379 return std::make_shared<vector<FieldValue>>(event.getValues()); 380 } 381} 382 383void GaugeMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData) { 384 std::lock_guard<std::mutex> lock(mMutex); 385 if (allData.size() == 0) { 386 return; 387 } 388 for (const auto& data : allData) { 389 onMatchedLogEventLocked(0, *data); 390 } 391} 392 393bool GaugeMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) { 394 if (mCurrentSlicedBucket->find(newKey) != mCurrentSlicedBucket->end()) { 395 return false; 396 } 397 // 1. Report the tuple count if the tuple count > soft limit 398 if (mCurrentSlicedBucket->size() > mDimensionSoftLimit - 1) { 399 size_t newTupleCount = mCurrentSlicedBucket->size() + 1; 400 StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount); 401 // 2. Don't add more tuples, we are above the allowed threshold. Drop the data. 402 if (newTupleCount > mDimensionHardLimit) { 403 ALOGE("GaugeMetric %lld dropping data for dimension key %s", 404 (long long)mMetricId, newKey.toString().c_str()); 405 return true; 406 } 407 } 408 409 return false; 410} 411 412void GaugeMetricProducer::onMatchedLogEventInternalLocked( 413 const size_t matcherIndex, const MetricDimensionKey& eventKey, 414 const ConditionKey& conditionKey, bool condition, 415 const LogEvent& event) { 416 if (condition == false) { 417 return; 418 } 419 int64_t eventTimeNs = event.GetElapsedTimestampNs(); 420 mTagId = event.GetTagId(); 421 if (eventTimeNs < mCurrentBucketStartTimeNs) { 422 VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs, 423 (long long)mCurrentBucketStartTimeNs); 424 return; 425 } 426 flushIfNeededLocked(eventTimeNs); 427 428 // When gauge metric wants to randomly sample the output atom, we just simply use the first 429 // gauge in the given bucket. 430 if (mCurrentSlicedBucket->find(eventKey) != mCurrentSlicedBucket->end() && 431 mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 432 return; 433 } 434 if (hitGuardRailLocked(eventKey)) { 435 return; 436 } 437 GaugeAtom gaugeAtom(getGaugeFields(event), eventTimeNs, getWallClockNs()); 438 (*mCurrentSlicedBucket)[eventKey].push_back(gaugeAtom); 439 // Anomaly detection on gauge metric only works when there is one numeric 440 // field specified. 441 if (mAnomalyTrackers.size() > 0) { 442 if (gaugeAtom.mFields->size() == 1) { 443 const Value& value = gaugeAtom.mFields->begin()->mValue; 444 long gaugeVal = 0; 445 if (value.getType() == INT) { 446 gaugeVal = (long)value.int_value; 447 } else if (value.getType() == LONG) { 448 gaugeVal = value.long_value; 449 } 450 for (auto& tracker : mAnomalyTrackers) { 451 tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, eventKey, 452 gaugeVal); 453 } 454 } 455 } 456} 457 458void GaugeMetricProducer::updateCurrentSlicedBucketForAnomaly() { 459 for (const auto& slice : *mCurrentSlicedBucket) { 460 if (slice.second.empty()) { 461 continue; 462 } 463 const Value& value = slice.second.front().mFields->front().mValue; 464 long gaugeVal = 0; 465 if (value.getType() == INT) { 466 gaugeVal = (long)value.int_value; 467 } else if (value.getType() == LONG) { 468 gaugeVal = value.long_value; 469 } 470 (*mCurrentSlicedBucketForAnomaly)[slice.first] = gaugeVal; 471 } 472} 473 474void GaugeMetricProducer::dropDataLocked(const int64_t dropTimeNs) { 475 flushIfNeededLocked(dropTimeNs); 476 mPastBuckets.clear(); 477} 478 479// When a new matched event comes in, we check if event falls into the current 480// bucket. If not, flush the old counter to past buckets and initialize the new 481// bucket. 482// if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside 483// the GaugeMetricProducer while holding the lock. 484void GaugeMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) { 485 int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs(); 486 487 if (eventTimeNs < currentBucketEndTimeNs) { 488 VLOG("Gauge eventTime is %lld, less than next bucket start time %lld", 489 (long long)eventTimeNs, (long long)(mCurrentBucketStartTimeNs + mBucketSizeNs)); 490 return; 491 } 492 493 flushCurrentBucketLocked(eventTimeNs); 494 495 // Adjusts the bucket start and end times. 496 int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs; 497 mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs; 498 mCurrentBucketNum += numBucketsForward; 499 VLOG("Gauge metric %lld: new bucket start time: %lld", (long long)mMetricId, 500 (long long)mCurrentBucketStartTimeNs); 501} 502 503void GaugeMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) { 504 int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs(); 505 506 GaugeBucket info; 507 info.mBucketStartNs = mCurrentBucketStartTimeNs; 508 if (eventTimeNs < fullBucketEndTimeNs) { 509 info.mBucketEndNs = eventTimeNs; 510 } else { 511 info.mBucketEndNs = fullBucketEndTimeNs; 512 } 513 514 if (info.mBucketEndNs - mCurrentBucketStartTimeNs >= mMinBucketSizeNs) { 515 for (const auto& slice : *mCurrentSlicedBucket) { 516 info.mGaugeAtoms = slice.second; 517 auto& bucketList = mPastBuckets[slice.first]; 518 bucketList.push_back(info); 519 VLOG("Gauge gauge metric %lld, dump key value: %s", (long long)mMetricId, 520 slice.first.toString().c_str()); 521 } 522 } else { 523 mSkippedBuckets.emplace_back(info.mBucketStartNs, info.mBucketEndNs); 524 } 525 526 // If we have anomaly trackers, we need to update the partial bucket values. 527 if (mAnomalyTrackers.size() > 0) { 528 updateCurrentSlicedBucketForAnomaly(); 529 530 if (eventTimeNs > fullBucketEndTimeNs) { 531 // This is known to be a full bucket, so send this data to the anomaly tracker. 532 for (auto& tracker : mAnomalyTrackers) { 533 tracker->addPastBucket(mCurrentSlicedBucketForAnomaly, mCurrentBucketNum); 534 } 535 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>(); 536 } 537 } 538 539 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>(); 540} 541 542size_t GaugeMetricProducer::byteSizeLocked() const { 543 size_t totalSize = 0; 544 for (const auto& pair : mPastBuckets) { 545 totalSize += pair.second.size() * kBucketSize; 546 } 547 return totalSize; 548} 549 550} // namespace statsd 551} // namespace os 552} // namespace android 553