GaugeMetricProducer.cpp revision 3b592910c50856a3b28a07a735c31e19f44bc460
1/* 2* Copyright (C) 2017 The Android Open Source Project 3* 4* Licensed under the Apache License, Version 2.0 (the "License"); 5* you may not use this file except in compliance with the License. 6* You may obtain a copy of the License at 7* 8* http://www.apache.org/licenses/LICENSE-2.0 9* 10* Unless required by applicable law or agreed to in writing, software 11* distributed under the License is distributed on an "AS IS" BASIS, 12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13* See the License for the specific language governing permissions and 14* limitations under the License. 15*/ 16 17#define DEBUG false // STOPSHIP if true 18#include "Log.h" 19 20#include "../guardrail/StatsdStats.h" 21#include "GaugeMetricProducer.h" 22#include "../stats_log_util.h" 23 24#include <cutils/log.h> 25 26using android::util::FIELD_COUNT_REPEATED; 27using android::util::FIELD_TYPE_BOOL; 28using android::util::FIELD_TYPE_FLOAT; 29using android::util::FIELD_TYPE_INT32; 30using android::util::FIELD_TYPE_INT64; 31using android::util::FIELD_TYPE_MESSAGE; 32using android::util::FIELD_TYPE_STRING; 33using android::util::ProtoOutputStream; 34using std::map; 35using std::string; 36using std::unordered_map; 37using std::vector; 38using std::make_shared; 39using std::shared_ptr; 40 41namespace android { 42namespace os { 43namespace statsd { 44 45// for StatsLogReport 46const int FIELD_ID_ID = 1; 47const int FIELD_ID_GAUGE_METRICS = 8; 48const int FIELD_ID_TIME_BASE = 9; 49const int FIELD_ID_BUCKET_SIZE = 10; 50const int FIELD_ID_DIMENSION_PATH_IN_WHAT = 11; 51const int FIELD_ID_DIMENSION_PATH_IN_CONDITION = 12; 52// for GaugeMetricDataWrapper 53const int FIELD_ID_DATA = 1; 54const int FIELD_ID_SKIPPED = 2; 55const int FIELD_ID_SKIPPED_START_MILLIS = 3; 56const int FIELD_ID_SKIPPED_END_MILLIS = 4; 57// for GaugeMetricData 58const int FIELD_ID_DIMENSION_IN_WHAT = 1; 59const int FIELD_ID_DIMENSION_IN_CONDITION = 2; 60const int FIELD_ID_BUCKET_INFO = 3; 61const int FIELD_ID_DIMENSION_LEAF_IN_WHAT = 4; 62const int FIELD_ID_DIMENSION_LEAF_IN_CONDITION = 5; 63// for GaugeBucketInfo 64const int FIELD_ID_ATOM = 3; 65const int FIELD_ID_ELAPSED_ATOM_TIMESTAMP = 4; 66const int FIELD_ID_WALL_CLOCK_ATOM_TIMESTAMP = 5; 67const int FIELD_ID_BUCKET_NUM = 6; 68const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 7; 69const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 8; 70 71GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric, 72 const int conditionIndex, 73 const sp<ConditionWizard>& wizard, const int pullTagId, 74 const int64_t timeBaseNs, const int64_t startTimeNs, 75 shared_ptr<StatsPullerManager> statsPullerManager) 76 : MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard), 77 mStatsPullerManager(statsPullerManager), 78 mPullTagId(pullTagId), 79 mMinBucketSizeNs(metric.min_bucket_size_nanos()), 80 mDimensionSoftLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) != 81 StatsdStats::kAtomDimensionKeySizeLimitMap.end() 82 ? StatsdStats::kAtomDimensionKeySizeLimitMap.at(pullTagId).first 83 : StatsdStats::kDimensionKeySizeSoftLimit), 84 mDimensionHardLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) != 85 StatsdStats::kAtomDimensionKeySizeLimitMap.end() 86 ? StatsdStats::kAtomDimensionKeySizeLimitMap.at(pullTagId).second 87 : StatsdStats::kDimensionKeySizeHardLimit) { 88 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>(); 89 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>(); 90 int64_t bucketSizeMills = 0; 91 if (metric.has_bucket()) { 92 bucketSizeMills = TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket()); 93 } else { 94 bucketSizeMills = TimeUnitToBucketSizeInMillis(ONE_HOUR); 95 } 96 mBucketSizeNs = bucketSizeMills * 1000000; 97 98 mSamplingType = metric.sampling_type(); 99 if (!metric.gauge_fields_filter().include_all()) { 100 translateFieldMatcher(metric.gauge_fields_filter().fields(), &mFieldMatchers); 101 } 102 103 // TODO: use UidMap if uid->pkg_name is required 104 if (metric.has_dimensions_in_what()) { 105 translateFieldMatcher(metric.dimensions_in_what(), &mDimensionsInWhat); 106 mContainANYPositionInDimensionsInWhat = HasPositionANY(metric.dimensions_in_what()); 107 } 108 109 if (metric.has_dimensions_in_condition()) { 110 translateFieldMatcher(metric.dimensions_in_condition(), &mDimensionsInCondition); 111 } 112 113 if (metric.links().size() > 0) { 114 for (const auto& link : metric.links()) { 115 Metric2Condition mc; 116 mc.conditionId = link.condition(); 117 translateFieldMatcher(link.fields_in_what(), &mc.metricFields); 118 translateFieldMatcher(link.fields_in_condition(), &mc.conditionFields); 119 mMetric2ConditionLinks.push_back(mc); 120 } 121 } 122 mConditionSliced = (metric.links().size() > 0) || (mDimensionsInCondition.size() > 0); 123 mSliceByPositionALL = HasPositionALL(metric.dimensions_in_what()) || 124 HasPositionALL(metric.dimensions_in_condition()); 125 126 flushIfNeededLocked(startTimeNs); 127 // Kicks off the puller immediately. 128 if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 129 mStatsPullerManager->RegisterReceiver( 130 mPullTagId, this, getCurrentBucketEndTimeNs(), mBucketSizeNs); 131 } 132 133 VLOG("Gauge metric %lld created. bucket size %lld start_time: %lld sliced %d", 134 (long long)metric.id(), (long long)mBucketSizeNs, (long long)mTimeBaseNs, 135 mConditionSliced); 136} 137 138// for testing 139GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric, 140 const int conditionIndex, 141 const sp<ConditionWizard>& wizard, const int pullTagId, 142 const int64_t timeBaseNs, const int64_t startTimeNs) 143 : GaugeMetricProducer(key, metric, conditionIndex, wizard, pullTagId, timeBaseNs, startTimeNs, 144 make_shared<StatsPullerManager>()) { 145} 146 147GaugeMetricProducer::~GaugeMetricProducer() { 148 VLOG("~GaugeMetricProducer() called"); 149 if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 150 mStatsPullerManager->UnRegisterReceiver(mPullTagId, this); 151 } 152} 153 154void GaugeMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const { 155 if (mCurrentSlicedBucket == nullptr || 156 mCurrentSlicedBucket->size() == 0) { 157 return; 158 } 159 160 fprintf(out, "GaugeMetric %lld dimension size %lu\n", (long long)mMetricId, 161 (unsigned long)mCurrentSlicedBucket->size()); 162 if (verbose) { 163 for (const auto& it : *mCurrentSlicedBucket) { 164 fprintf(out, "\t(what)%s\t(condition)%s %d atoms\n", 165 it.first.getDimensionKeyInWhat().toString().c_str(), 166 it.first.getDimensionKeyInCondition().toString().c_str(), 167 (int)it.second.size()); 168 } 169 } 170} 171 172void GaugeMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) { 173 flushIfNeededLocked(dumpTimeNs); 174 mPastBuckets.clear(); 175 mSkippedBuckets.clear(); 176} 177 178void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs, 179 const bool include_current_partial_bucket, 180 std::set<string> *str_set, 181 ProtoOutputStream* protoOutput) { 182 VLOG("Gauge metric %lld report now...", (long long)mMetricId); 183 if (include_current_partial_bucket) { 184 flushLocked(dumpTimeNs); 185 } else { 186 flushIfNeededLocked(dumpTimeNs); 187 } 188 189 if (mPastBuckets.empty()) { 190 return; 191 } 192 193 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId); 194 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_TIME_BASE, (long long)mTimeBaseNs); 195 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_SIZE, (long long)mBucketSizeNs); 196 197 // Fills the dimension path if not slicing by ALL. 198 if (!mSliceByPositionALL) { 199 if (!mDimensionsInWhat.empty()) { 200 uint64_t dimenPathToken = protoOutput->start( 201 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_WHAT); 202 writeDimensionPathToProto(mDimensionsInWhat, protoOutput); 203 protoOutput->end(dimenPathToken); 204 } 205 if (!mDimensionsInCondition.empty()) { 206 uint64_t dimenPathToken = protoOutput->start( 207 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_CONDITION); 208 writeDimensionPathToProto(mDimensionsInCondition, protoOutput); 209 protoOutput->end(dimenPathToken); 210 } 211 } 212 213 uint64_t protoToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_GAUGE_METRICS); 214 215 for (const auto& pair : mSkippedBuckets) { 216 uint64_t wrapperToken = 217 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_SKIPPED); 218 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_START_MILLIS, 219 (long long)(NanoToMillis(pair.first))); 220 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_END_MILLIS, 221 (long long)(NanoToMillis(pair.second))); 222 protoOutput->end(wrapperToken); 223 } 224 mSkippedBuckets.clear(); 225 226 for (const auto& pair : mPastBuckets) { 227 const MetricDimensionKey& dimensionKey = pair.first; 228 229 VLOG("Gauge dimension key %s", dimensionKey.toString().c_str()); 230 uint64_t wrapperToken = 231 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_DATA); 232 233 // First fill dimension. 234 if (mSliceByPositionALL) { 235 uint64_t dimensionToken = protoOutput->start( 236 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_WHAT); 237 writeDimensionToProto(dimensionKey.getDimensionKeyInWhat(), str_set, protoOutput); 238 protoOutput->end(dimensionToken); 239 240 if (dimensionKey.hasDimensionKeyInCondition()) { 241 uint64_t dimensionInConditionToken = protoOutput->start( 242 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_CONDITION); 243 writeDimensionToProto(dimensionKey.getDimensionKeyInCondition(), 244 str_set, protoOutput); 245 protoOutput->end(dimensionInConditionToken); 246 } 247 } else { 248 writeDimensionLeafNodesToProto(dimensionKey.getDimensionKeyInWhat(), 249 FIELD_ID_DIMENSION_LEAF_IN_WHAT, str_set, protoOutput); 250 if (dimensionKey.hasDimensionKeyInCondition()) { 251 writeDimensionLeafNodesToProto(dimensionKey.getDimensionKeyInCondition(), 252 FIELD_ID_DIMENSION_LEAF_IN_CONDITION, 253 str_set, protoOutput); 254 } 255 } 256 257 // Then fill bucket_info (GaugeBucketInfo). 258 for (const auto& bucket : pair.second) { 259 uint64_t bucketInfoToken = protoOutput->start( 260 FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_BUCKET_INFO); 261 262 if (bucket.mBucketEndNs - bucket.mBucketStartNs != mBucketSizeNs) { 263 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_START_BUCKET_ELAPSED_MILLIS, 264 (long long)NanoToMillis(bucket.mBucketStartNs)); 265 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_BUCKET_ELAPSED_MILLIS, 266 (long long)NanoToMillis(bucket.mBucketEndNs)); 267 } else { 268 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_NUM, 269 (long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs))); 270 } 271 272 if (!bucket.mGaugeAtoms.empty()) { 273 for (const auto& atom : bucket.mGaugeAtoms) { 274 uint64_t atomsToken = 275 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | 276 FIELD_ID_ATOM); 277 writeFieldValueTreeToStream(mTagId, *(atom.mFields), protoOutput); 278 protoOutput->end(atomsToken); 279 } 280 const bool truncateTimestamp = 281 android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.find( 282 mTagId) == 283 android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.end(); 284 for (const auto& atom : bucket.mGaugeAtoms) { 285 const int64_t elapsedTimestampNs = truncateTimestamp ? 286 truncateTimestampNsToFiveMinutes(atom.mElapsedTimestamps) : 287 atom.mElapsedTimestamps; 288 const int64_t wallClockNs = truncateTimestamp ? 289 truncateTimestampNsToFiveMinutes(atom.mWallClockTimestampNs) : 290 atom.mWallClockTimestampNs; 291 protoOutput->write( 292 FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED | FIELD_ID_ELAPSED_ATOM_TIMESTAMP, 293 (long long)elapsedTimestampNs); 294 protoOutput->write( 295 FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED | 296 FIELD_ID_WALL_CLOCK_ATOM_TIMESTAMP, 297 (long long)wallClockNs); 298 } 299 } 300 protoOutput->end(bucketInfoToken); 301 VLOG("Gauge \t bucket [%lld - %lld] includes %d atoms.", 302 (long long)bucket.mBucketStartNs, (long long)bucket.mBucketEndNs, 303 (int)bucket.mGaugeAtoms.size()); 304 } 305 protoOutput->end(wrapperToken); 306 } 307 protoOutput->end(protoToken); 308 309 mPastBuckets.clear(); 310 // TODO: Clear mDimensionKeyMap once the report is dumped. 311} 312 313void GaugeMetricProducer::pullLocked(const int64_t timestampNs) { 314 bool triggerPuller = false; 315 switch(mSamplingType) { 316 // When the metric wants to do random sampling and there is already one gauge atom for the 317 // current bucket, do not do it again. 318 case GaugeMetric::RANDOM_ONE_SAMPLE: { 319 triggerPuller = mCondition && mCurrentSlicedBucket->empty(); 320 break; 321 } 322 case GaugeMetric::ALL_CONDITION_CHANGES: { 323 triggerPuller = true; 324 break; 325 } 326 case GaugeMetric::CONDITION_CHANGE_TO_TRUE: { 327 triggerPuller = mCondition; 328 break; 329 } 330 default: 331 break; 332 } 333 if (!triggerPuller) { 334 return; 335 } 336 337 vector<std::shared_ptr<LogEvent>> allData; 338 if (!mStatsPullerManager->Pull(mPullTagId, timestampNs, &allData)) { 339 ALOGE("Gauge Stats puller failed for tag: %d", mPullTagId); 340 return; 341 } 342 343 for (const auto& data : allData) { 344 onMatchedLogEventLocked(0, *data); 345 } 346} 347 348void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet, 349 const int64_t eventTimeNs) { 350 VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId); 351 flushIfNeededLocked(eventTimeNs); 352 mCondition = conditionMet; 353 354 if (mPullTagId != -1) { 355 pullLocked(eventTimeNs); 356 } // else: Push mode. No need to proactively pull the gauge data. 357} 358 359void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition, 360 const int64_t eventTimeNs) { 361 VLOG("GaugeMetric %lld onSlicedConditionMayChange overall condition %d", (long long)mMetricId, 362 overallCondition); 363 flushIfNeededLocked(eventTimeNs); 364 // If the condition is sliced, mCondition is true if any of the dimensions is true. And we will 365 // pull for every dimension. 366 mCondition = overallCondition; 367 if (mPullTagId != -1) { 368 pullLocked(eventTimeNs); 369 } // else: Push mode. No need to proactively pull the gauge data. 370} 371 372std::shared_ptr<vector<FieldValue>> GaugeMetricProducer::getGaugeFields(const LogEvent& event) { 373 if (mFieldMatchers.size() > 0) { 374 std::shared_ptr<vector<FieldValue>> gaugeFields = std::make_shared<vector<FieldValue>>(); 375 filterGaugeValues(mFieldMatchers, event.getValues(), gaugeFields.get()); 376 return gaugeFields; 377 } else { 378 return std::make_shared<vector<FieldValue>>(event.getValues()); 379 } 380} 381 382void GaugeMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData) { 383 std::lock_guard<std::mutex> lock(mMutex); 384 if (allData.size() == 0) { 385 return; 386 } 387 for (const auto& data : allData) { 388 onMatchedLogEventLocked(0, *data); 389 } 390} 391 392bool GaugeMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) { 393 if (mCurrentSlicedBucket->find(newKey) != mCurrentSlicedBucket->end()) { 394 return false; 395 } 396 // 1. Report the tuple count if the tuple count > soft limit 397 if (mCurrentSlicedBucket->size() > mDimensionSoftLimit - 1) { 398 size_t newTupleCount = mCurrentSlicedBucket->size() + 1; 399 StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount); 400 // 2. Don't add more tuples, we are above the allowed threshold. Drop the data. 401 if (newTupleCount > mDimensionHardLimit) { 402 ALOGE("GaugeMetric %lld dropping data for dimension key %s", 403 (long long)mMetricId, newKey.toString().c_str()); 404 return true; 405 } 406 } 407 408 return false; 409} 410 411void GaugeMetricProducer::onMatchedLogEventInternalLocked( 412 const size_t matcherIndex, const MetricDimensionKey& eventKey, 413 const ConditionKey& conditionKey, bool condition, 414 const LogEvent& event) { 415 if (condition == false) { 416 return; 417 } 418 int64_t eventTimeNs = event.GetElapsedTimestampNs(); 419 mTagId = event.GetTagId(); 420 if (eventTimeNs < mCurrentBucketStartTimeNs) { 421 VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs, 422 (long long)mCurrentBucketStartTimeNs); 423 return; 424 } 425 flushIfNeededLocked(eventTimeNs); 426 427 // When gauge metric wants to randomly sample the output atom, we just simply use the first 428 // gauge in the given bucket. 429 if (mCurrentSlicedBucket->find(eventKey) != mCurrentSlicedBucket->end() && 430 mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) { 431 return; 432 } 433 if (hitGuardRailLocked(eventKey)) { 434 return; 435 } 436 GaugeAtom gaugeAtom(getGaugeFields(event), eventTimeNs, getWallClockNs()); 437 (*mCurrentSlicedBucket)[eventKey].push_back(gaugeAtom); 438 // Anomaly detection on gauge metric only works when there is one numeric 439 // field specified. 440 if (mAnomalyTrackers.size() > 0) { 441 if (gaugeAtom.mFields->size() == 1) { 442 const Value& value = gaugeAtom.mFields->begin()->mValue; 443 long gaugeVal = 0; 444 if (value.getType() == INT) { 445 gaugeVal = (long)value.int_value; 446 } else if (value.getType() == LONG) { 447 gaugeVal = value.long_value; 448 } 449 for (auto& tracker : mAnomalyTrackers) { 450 tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, eventKey, 451 gaugeVal); 452 } 453 } 454 } 455} 456 457void GaugeMetricProducer::updateCurrentSlicedBucketForAnomaly() { 458 for (const auto& slice : *mCurrentSlicedBucket) { 459 if (slice.second.empty()) { 460 continue; 461 } 462 const Value& value = slice.second.front().mFields->front().mValue; 463 long gaugeVal = 0; 464 if (value.getType() == INT) { 465 gaugeVal = (long)value.int_value; 466 } else if (value.getType() == LONG) { 467 gaugeVal = value.long_value; 468 } 469 (*mCurrentSlicedBucketForAnomaly)[slice.first] = gaugeVal; 470 } 471} 472 473void GaugeMetricProducer::dropDataLocked(const int64_t dropTimeNs) { 474 flushIfNeededLocked(dropTimeNs); 475 mPastBuckets.clear(); 476} 477 478// When a new matched event comes in, we check if event falls into the current 479// bucket. If not, flush the old counter to past buckets and initialize the new 480// bucket. 481// if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside 482// the GaugeMetricProducer while holding the lock. 483void GaugeMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) { 484 int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs(); 485 486 if (eventTimeNs < currentBucketEndTimeNs) { 487 VLOG("Gauge eventTime is %lld, less than next bucket start time %lld", 488 (long long)eventTimeNs, (long long)(mCurrentBucketStartTimeNs + mBucketSizeNs)); 489 return; 490 } 491 492 flushCurrentBucketLocked(eventTimeNs); 493 494 // Adjusts the bucket start and end times. 495 int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs; 496 mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs; 497 mCurrentBucketNum += numBucketsForward; 498 VLOG("Gauge metric %lld: new bucket start time: %lld", (long long)mMetricId, 499 (long long)mCurrentBucketStartTimeNs); 500} 501 502void GaugeMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) { 503 int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs(); 504 505 GaugeBucket info; 506 info.mBucketStartNs = mCurrentBucketStartTimeNs; 507 if (eventTimeNs < fullBucketEndTimeNs) { 508 info.mBucketEndNs = eventTimeNs; 509 } else { 510 info.mBucketEndNs = fullBucketEndTimeNs; 511 } 512 513 if (info.mBucketEndNs - mCurrentBucketStartTimeNs >= mMinBucketSizeNs) { 514 for (const auto& slice : *mCurrentSlicedBucket) { 515 info.mGaugeAtoms = slice.second; 516 auto& bucketList = mPastBuckets[slice.first]; 517 bucketList.push_back(info); 518 VLOG("Gauge gauge metric %lld, dump key value: %s", (long long)mMetricId, 519 slice.first.toString().c_str()); 520 } 521 } else { 522 mSkippedBuckets.emplace_back(info.mBucketStartNs, info.mBucketEndNs); 523 } 524 525 // If we have anomaly trackers, we need to update the partial bucket values. 526 if (mAnomalyTrackers.size() > 0) { 527 updateCurrentSlicedBucketForAnomaly(); 528 529 if (eventTimeNs > fullBucketEndTimeNs) { 530 // This is known to be a full bucket, so send this data to the anomaly tracker. 531 for (auto& tracker : mAnomalyTrackers) { 532 tracker->addPastBucket(mCurrentSlicedBucketForAnomaly, mCurrentBucketNum); 533 } 534 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>(); 535 } 536 } 537 538 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>(); 539} 540 541size_t GaugeMetricProducer::byteSizeLocked() const { 542 size_t totalSize = 0; 543 for (const auto& pair : mPastBuckets) { 544 for (const auto& bucket : pair.second) { 545 totalSize += bucket.mGaugeAtoms.size() * sizeof(GaugeAtom); 546 for (const auto& atom : bucket.mGaugeAtoms) { 547 if (atom.mFields != nullptr) { 548 totalSize += atom.mFields->size() * sizeof(FieldValue); 549 } 550 } 551 } 552 } 553 return totalSize; 554} 555 556} // namespace statsd 557} // namespace os 558} // namespace android 559