event_selection_set.cpp revision 20b49f8991b55eda3309a0bbe3c18153376065da
1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "event_selection_set.h"
18
19#include <atomic>
20#include <thread>
21
22#include <android-base/logging.h>
23
24#include "environment.h"
25#include "event_attr.h"
26#include "event_type.h"
27#include "IOEventLoop.h"
28#include "perf_regs.h"
29#include "utils.h"
30
31constexpr uint64_t DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT = 4000;
32constexpr uint64_t DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT = 1;
33
34bool IsBranchSamplingSupported() {
35  const EventType* type = FindEventTypeByName("cpu-cycles");
36  if (type == nullptr) {
37    return false;
38  }
39  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
40  attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
41  attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
42  return IsEventAttrSupported(attr);
43}
44
45bool IsDwarfCallChainSamplingSupported() {
46  const EventType* type = FindEventTypeByName("cpu-cycles");
47  if (type == nullptr) {
48    return false;
49  }
50  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
51  attr.sample_type |=
52      PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
53  attr.exclude_callchain_user = 1;
54  attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
55  attr.sample_stack_user = 8192;
56  return IsEventAttrSupported(attr);
57}
58
59bool IsDumpingRegsForTracepointEventsSupported() {
60  const EventType* event_type = FindEventTypeByName("sched:sched_switch");
61  if (event_type == nullptr) {
62    return false;
63  }
64  std::atomic<bool> done(false);
65  std::atomic<pid_t> thread_id(0);
66  std::thread thread([&]() {
67    thread_id = gettid();
68    while (!done) {
69      usleep(1);
70    }
71    usleep(1);  // Make a sched out to generate one sample.
72  });
73  while (thread_id == 0) {
74    usleep(1);
75  }
76  perf_event_attr attr = CreateDefaultPerfEventAttr(*event_type);
77  attr.freq = 0;
78  attr.sample_period = 1;
79  std::unique_ptr<EventFd> event_fd =
80      EventFd::OpenEventFile(attr, thread_id, -1, nullptr);
81  if (event_fd == nullptr) {
82    return false;
83  }
84  if (!event_fd->CreateMappedBuffer(4, true)) {
85    return false;
86  }
87  done = true;
88  thread.join();
89
90  std::vector<char> buffer;
91  size_t buffer_pos = 0;
92  size_t size = event_fd->GetAvailableMmapData(buffer, buffer_pos);
93  std::vector<std::unique_ptr<Record>> records =
94      ReadRecordsFromBuffer(attr, buffer.data(), size);
95  for (auto& r : records) {
96    if (r->type() == PERF_RECORD_SAMPLE) {
97      auto& record = *static_cast<SampleRecord*>(r.get());
98      if (record.ip_data.ip != 0) {
99        return true;
100      }
101    }
102  }
103  return false;
104}
105
106bool EventSelectionSet::BuildAndCheckEventSelection(
107    const std::string& event_name, EventSelection* selection) {
108  std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
109  if (event_type == nullptr) {
110    return false;
111  }
112  if (for_stat_cmd_) {
113    if (event_type->event_type.name == "cpu-clock" ||
114        event_type->event_type.name == "task-clock") {
115      if (event_type->exclude_user || event_type->exclude_kernel) {
116        LOG(ERROR) << "Modifier u and modifier k used in event type "
117                   << event_type->event_type.name
118                   << " are not supported by the kernel.";
119        return false;
120      }
121    }
122  }
123  selection->event_type_modifier = *event_type;
124  selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
125  selection->event_attr.exclude_user = event_type->exclude_user;
126  selection->event_attr.exclude_kernel = event_type->exclude_kernel;
127  selection->event_attr.exclude_hv = event_type->exclude_hv;
128  selection->event_attr.exclude_host = event_type->exclude_host;
129  selection->event_attr.exclude_guest = event_type->exclude_guest;
130  selection->event_attr.precise_ip = event_type->precise_ip;
131  if (event_type->event_type.type == PERF_TYPE_TRACEPOINT) {
132    selection->event_attr.freq = 0;
133    selection->event_attr.sample_period = DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
134  } else {
135    selection->event_attr.freq = 1;
136    selection->event_attr.sample_freq =
137        AdjustSampleFrequency(DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT);
138  }
139  if (!IsEventAttrSupported(selection->event_attr)) {
140    LOG(ERROR) << "Event type '" << event_type->name
141               << "' is not supported on the device";
142    return false;
143  }
144  selection->event_fds.clear();
145
146  for (const auto& group : groups_) {
147    for (const auto& sel : group) {
148      if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
149        LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
150                   << "' appears more than once";
151        return false;
152      }
153    }
154  }
155  return true;
156}
157
158bool EventSelectionSet::AddEventType(const std::string& event_name, size_t* group_id) {
159  return AddEventGroup(std::vector<std::string>(1, event_name), group_id);
160}
161
162bool EventSelectionSet::AddEventGroup(
163    const std::vector<std::string>& event_names, size_t* group_id) {
164  EventSelectionGroup group;
165  for (const auto& event_name : event_names) {
166    EventSelection selection;
167    if (!BuildAndCheckEventSelection(event_name, &selection)) {
168      return false;
169    }
170    group.push_back(std::move(selection));
171  }
172  groups_.push_back(std::move(group));
173  UnionSampleType();
174  if (group_id != nullptr) {
175    *group_id = groups_.size() - 1;
176  }
177  return true;
178}
179
180std::vector<const EventType*> EventSelectionSet::GetEvents() const {
181  std::vector<const EventType*> result;
182  for (const auto& group : groups_) {
183    for (const auto& selection : group) {
184      result.push_back(&selection.event_type_modifier.event_type);
185    }
186  }
187  return result;
188}
189
190std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
191  std::vector<const EventType*> result;
192  for (const auto& group : groups_) {
193    for (const auto& selection : group) {
194      if (selection.event_type_modifier.event_type.type ==
195          PERF_TYPE_TRACEPOINT) {
196        result.push_back(&selection.event_type_modifier.event_type);
197      }
198    }
199  }
200  return result;
201}
202
203bool EventSelectionSet::ExcludeKernel() const {
204  for (const auto& group : groups_) {
205    for (const auto& selection : group) {
206      if (!selection.event_type_modifier.exclude_kernel) {
207        return false;
208      }
209    }
210  }
211  return true;
212}
213
214bool EventSelectionSet::HasInplaceSampler() const {
215  for (const auto& group : groups_) {
216    for (const auto& sel : group) {
217      if (sel.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
218          sel.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
219        return true;
220      }
221    }
222  }
223  return false;
224}
225
226std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
227  std::vector<EventAttrWithId> result;
228  for (const auto& group : groups_) {
229    for (const auto& selection : group) {
230      EventAttrWithId attr_id;
231      attr_id.attr = &selection.event_attr;
232      for (const auto& fd : selection.event_fds) {
233        attr_id.ids.push_back(fd->Id());
234      }
235      if (!selection.inplace_samplers.empty()) {
236        attr_id.ids.push_back(selection.inplace_samplers[0]->Id());
237      }
238      result.push_back(attr_id);
239    }
240  }
241  return result;
242}
243
244// Union the sample type of different event attrs can make reading sample
245// records in perf.data easier.
246void EventSelectionSet::UnionSampleType() {
247  uint64_t sample_type = 0;
248  for (const auto& group : groups_) {
249    for (const auto& selection : group) {
250      sample_type |= selection.event_attr.sample_type;
251    }
252  }
253  for (auto& group : groups_) {
254    for (auto& selection : group) {
255      selection.event_attr.sample_type = sample_type;
256    }
257  }
258}
259
260void EventSelectionSet::SetEnableOnExec(bool enable) {
261  for (auto& group : groups_) {
262    for (auto& selection : group) {
263      // If sampling is enabled on exec, then it is disabled at startup,
264      // otherwise it should be enabled at startup. Don't use
265      // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
266      // Because some android kernels can't handle ioctl() well when cpu-hotplug
267      // happens. See http://b/25193162.
268      if (enable) {
269        selection.event_attr.enable_on_exec = 1;
270        selection.event_attr.disabled = 1;
271      } else {
272        selection.event_attr.enable_on_exec = 0;
273        selection.event_attr.disabled = 0;
274      }
275    }
276  }
277}
278
279bool EventSelectionSet::GetEnableOnExec() {
280  for (const auto& group : groups_) {
281    for (const auto& selection : group) {
282      if (selection.event_attr.enable_on_exec == 0) {
283        return false;
284      }
285    }
286  }
287  return true;
288}
289
290void EventSelectionSet::SampleIdAll() {
291  for (auto& group : groups_) {
292    for (auto& selection : group) {
293      selection.event_attr.sample_id_all = 1;
294    }
295  }
296}
297
298void EventSelectionSet::SetSampleSpeed(size_t group_id, const SampleSpeed& speed) {
299  CHECK_LT(group_id, groups_.size());
300  for (auto& selection : groups_[group_id]) {
301    if (speed.UseFreq()) {
302      selection.event_attr.freq = 1;
303      selection.event_attr.sample_freq = speed.sample_freq;
304    } else {
305      selection.event_attr.freq = 0;
306      selection.event_attr.sample_period = speed.sample_period;
307    }
308  }
309}
310
311bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
312  if (branch_sample_type != 0 &&
313      (branch_sample_type &
314       (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
315        PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
316    LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
317               << branch_sample_type;
318    return false;
319  }
320  if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
321    LOG(ERROR) << "branch stack sampling is not supported on this device.";
322    return false;
323  }
324  for (auto& group : groups_) {
325    for (auto& selection : group) {
326      perf_event_attr& attr = selection.event_attr;
327      if (branch_sample_type != 0) {
328        attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
329      } else {
330        attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
331      }
332      attr.branch_sample_type = branch_sample_type;
333    }
334  }
335  return true;
336}
337
338void EventSelectionSet::EnableFpCallChainSampling() {
339  for (auto& group : groups_) {
340    for (auto& selection : group) {
341      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
342    }
343  }
344}
345
346bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
347  if (!IsDwarfCallChainSamplingSupported()) {
348    LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
349    return false;
350  }
351  for (auto& group : groups_) {
352    for (auto& selection : group) {
353      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
354                                          PERF_SAMPLE_REGS_USER |
355                                          PERF_SAMPLE_STACK_USER;
356      selection.event_attr.exclude_callchain_user = 1;
357      selection.event_attr.sample_regs_user =
358          GetSupportedRegMask(GetMachineArch());
359      selection.event_attr.sample_stack_user = dump_stack_size;
360    }
361  }
362  return true;
363}
364
365void EventSelectionSet::SetInherit(bool enable) {
366  for (auto& group : groups_) {
367    for (auto& selection : group) {
368      selection.event_attr.inherit = (enable ? 1 : 0);
369    }
370  }
371}
372
373bool EventSelectionSet::NeedKernelSymbol() const {
374  for (const auto& group : groups_) {
375    for (const auto& selection : group) {
376      if (!selection.event_type_modifier.exclude_kernel) {
377        return true;
378      }
379    }
380  }
381  return false;
382}
383
384static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
385  std::vector<int> online_cpus = GetOnlineCpus();
386  for (const auto& cpu : cpus) {
387    if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
388        online_cpus.end()) {
389      LOG(ERROR) << "cpu " << cpu << " is not online.";
390      return false;
391    }
392  }
393  return true;
394}
395
396bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
397                                              pid_t tid, int cpu,
398                                              std::string* failed_event_type) {
399  std::vector<std::unique_ptr<EventFd>> event_fds;
400  // Given a tid and cpu, events on the same group should be all opened
401  // successfully or all failed to open.
402  EventFd* group_fd = nullptr;
403  for (auto& selection : group) {
404    std::unique_ptr<EventFd> event_fd =
405        EventFd::OpenEventFile(selection.event_attr, tid, cpu, group_fd);
406    if (event_fd != nullptr) {
407      LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
408      event_fds.push_back(std::move(event_fd));
409    } else {
410      if (failed_event_type != nullptr) {
411        *failed_event_type = selection.event_type_modifier.name;
412        return false;
413      }
414    }
415    if (group_fd == nullptr) {
416      group_fd = event_fd.get();
417    }
418  }
419  for (size_t i = 0; i < group.size(); ++i) {
420    group[i].event_fds.push_back(std::move(event_fds[i]));
421  }
422  return true;
423}
424
425static std::map<pid_t, std::set<pid_t>> PrepareThreads(const std::set<pid_t>& processes,
426                                                       const std::set<pid_t>& threads) {
427  std::map<pid_t, std::set<pid_t>> result;
428  for (auto& pid : processes) {
429    std::vector<pid_t> tids = GetThreadsInProcess(pid);
430    std::set<pid_t>& threads_in_process = result[pid];
431    threads_in_process.insert(tids.begin(), tids.end());
432  }
433  for (auto& tid : threads) {
434    // tid = -1 means monitoring all threads.
435    if (tid == -1) {
436      result[-1].insert(-1);
437    } else {
438      pid_t pid;
439      if (GetProcessForThread(tid, &pid)) {
440        result[pid].insert(tid);
441      }
442    }
443  }
444  return result;
445}
446
447bool EventSelectionSet::OpenEventFiles(const std::vector<int>& on_cpus) {
448  std::vector<int> cpus = on_cpus;
449  if (!cpus.empty()) {
450    // cpus = {-1} means open an event file for all cpus.
451    if (!(cpus.size() == 1 && cpus[0] == -1) && !CheckIfCpusOnline(cpus)) {
452      return false;
453    }
454  } else {
455    cpus = GetOnlineCpus();
456  }
457  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
458  for (auto& group : groups_) {
459    if (IsUserSpaceSamplerGroup(group)) {
460      if (!OpenUserSpaceSamplersOnGroup(group, process_map)) {
461        return false;
462      }
463    } else {
464      for (const auto& pair : process_map) {
465        for (const auto& tid : pair.second) {
466          size_t success_cpu_count = 0;
467          std::string failed_event_type;
468          for (const auto& cpu : cpus) {
469            if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
470              success_cpu_count++;
471            }
472          }
473          // As the online cpus can be enabled or disabled at runtime, we may not
474          // open event file for all cpus successfully. But we should open at
475          // least one cpu successfully.
476          if (success_cpu_count == 0) {
477            PLOG(ERROR) << "failed to open perf event file for event_type "
478                        << failed_event_type << " for "
479                        << (tid == -1 ? "all threads" : "thread " + std::to_string(tid))
480                        << " on all cpus";
481            return false;
482          }
483        }
484      }
485    }
486  }
487  return true;
488}
489
490bool EventSelectionSet::IsUserSpaceSamplerGroup(EventSelectionGroup& group) {
491  return group.size() == 1 && group[0].event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS;
492}
493
494bool EventSelectionSet::OpenUserSpaceSamplersOnGroup(EventSelectionGroup& group,
495    const std::map<pid_t, std::set<pid_t>>& process_map) {
496  CHECK_EQ(group.size(), 1u);
497  for (auto& selection : group) {
498    if (selection.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
499        selection.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
500      for (auto& pair : process_map) {
501        std::unique_ptr<InplaceSamplerClient> sampler = InplaceSamplerClient::Create(
502            selection.event_attr, pair.first, pair.second);
503        if (sampler == nullptr) {
504          return false;
505        }
506        selection.inplace_samplers.push_back(std::move(sampler));
507      }
508    }
509  }
510  return true;
511}
512
513static bool ReadCounter(EventFd* event_fd, CounterInfo* counter) {
514  if (!event_fd->ReadCounter(&counter->counter)) {
515    return false;
516  }
517  counter->tid = event_fd->ThreadId();
518  counter->cpu = event_fd->Cpu();
519  return true;
520}
521
522bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
523  counters->clear();
524  for (size_t i = 0; i < groups_.size(); ++i) {
525    for (auto& selection : groups_[i]) {
526      CountersInfo counters_info;
527      counters_info.group_id = i;
528      counters_info.event_name = selection.event_type_modifier.event_type.name;
529      counters_info.event_modifier = selection.event_type_modifier.modifier;
530      counters_info.counters = selection.hotplugged_counters;
531      for (auto& event_fd : selection.event_fds) {
532        CounterInfo counter;
533        if (!ReadCounter(event_fd.get(), &counter)) {
534          return false;
535        }
536        counters_info.counters.push_back(counter);
537      }
538      counters->push_back(counters_info);
539    }
540  }
541  return true;
542}
543
544bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages,
545                                       size_t max_mmap_pages) {
546  for (size_t i = max_mmap_pages; i >= min_mmap_pages; i >>= 1) {
547    if (MmapEventFiles(i, i == min_mmap_pages)) {
548      LOG(VERBOSE) << "Mapped buffer size is " << i << " pages.";
549      mmap_pages_ = i;
550      return true;
551    }
552    for (auto& group : groups_) {
553      for (auto& selection : group) {
554        for (auto& event_fd : selection.event_fds) {
555          event_fd->DestroyMappedBuffer();
556        }
557      }
558    }
559  }
560  return false;
561}
562
563bool EventSelectionSet::MmapEventFiles(size_t mmap_pages, bool report_error) {
564  // Allocate a mapped buffer for each cpu.
565  std::map<int, EventFd*> cpu_map;
566  for (auto& group : groups_) {
567    for (auto& selection : group) {
568      for (auto& event_fd : selection.event_fds) {
569        auto it = cpu_map.find(event_fd->Cpu());
570        if (it != cpu_map.end()) {
571          if (!event_fd->ShareMappedBuffer(*(it->second), report_error)) {
572            return false;
573          }
574        } else {
575          if (!event_fd->CreateMappedBuffer(mmap_pages, report_error)) {
576            return false;
577          }
578          cpu_map[event_fd->Cpu()] = event_fd.get();
579        }
580      }
581    }
582  }
583  return true;
584}
585
586bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
587  // Add read Events for perf event files having mapped buffer.
588  for (auto& group : groups_) {
589    for (auto& selection : group) {
590      for (auto& event_fd : selection.event_fds) {
591        if (event_fd->HasMappedBuffer()) {
592          if (!event_fd->StartPolling(*loop_, [this]() {
593                return ReadMmapEventData();
594              })) {
595            return false;
596          }
597        }
598      }
599      for (auto& sampler : selection.inplace_samplers) {
600        if (!sampler->StartPolling(*loop_, callback,
601                                   [&] { return CheckMonitoredTargets(); })) {
602          return false;
603        }
604      }
605    }
606  }
607
608  // Prepare record callback function.
609  record_callback_ = callback;
610  return true;
611}
612
613// When reading from mmap buffers, we prefer reading from all buffers at once
614// rather than reading one buffer at a time. Because by reading all buffers
615// at once, we can merge records from different buffers easily in memory.
616// Otherwise, we have to sort records with greater effort.
617bool EventSelectionSet::ReadMmapEventData() {
618  size_t head_size = 0;
619  std::vector<RecordBufferHead>& heads = record_buffer_heads_;
620  if (heads.empty()) {
621    heads.resize(1);
622  }
623  heads[0].current_pos = 0;
624  size_t buffer_pos = 0;
625
626  for (auto& group : groups_) {
627    for (auto& selection : group) {
628      for (auto& event_fd : selection.event_fds) {
629        if (event_fd->HasMappedBuffer()) {
630          if (event_fd->GetAvailableMmapData(record_buffer_, buffer_pos) != 0) {
631            heads[head_size].end_pos = buffer_pos;
632            heads[head_size].attr = &selection.event_attr;
633            head_size++;
634            if (heads.size() == head_size) {
635              heads.resize(head_size + 1);
636            }
637            heads[head_size].current_pos = buffer_pos;
638          }
639        }
640      }
641    }
642  }
643
644  if (head_size == 0) {
645    return true;
646  }
647  if (head_size == 1) {
648    // Only one buffer has data, process it directly.
649    std::vector<std::unique_ptr<Record>> records =
650        ReadRecordsFromBuffer(*heads[0].attr,
651                              record_buffer_.data(), buffer_pos);
652    for (auto& r : records) {
653      if (!record_callback_(r.get())) {
654        return false;
655      }
656    }
657  } else {
658    // Use a priority queue to merge records from different buffers. As
659    // records from the same buffer are already ordered by time, we only
660    // need to merge the first record from all buffers. And each time a
661    // record is popped from the queue, we put the next record from its
662    // buffer into the queue.
663    auto comparator = [&](RecordBufferHead* h1, RecordBufferHead* h2) {
664      return h1->timestamp > h2->timestamp;
665    };
666    std::priority_queue<RecordBufferHead*, std::vector<RecordBufferHead*>, decltype(comparator)> q(comparator);
667    for (size_t i = 0; i < head_size; ++i) {
668      RecordBufferHead& h = heads[i];
669      h.r = ReadRecordFromBuffer(*h.attr, &record_buffer_[h.current_pos]);
670      h.timestamp = h.r->Timestamp();
671      h.current_pos += h.r->size();
672      q.push(&h);
673    }
674    while (!q.empty()) {
675      RecordBufferHead* h = q.top();
676      q.pop();
677      if (!record_callback_(h->r.get())) {
678        return false;
679      }
680      if (h->current_pos < h->end_pos) {
681        h->r = ReadRecordFromBuffer(*h->attr, &record_buffer_[h->current_pos]);
682        h->timestamp = h->r->Timestamp();
683        h->current_pos += h->r->size();
684        q.push(h);
685      }
686    }
687  }
688  return true;
689}
690
691bool EventSelectionSet::FinishReadMmapEventData() {
692  if (!ReadMmapEventData()) {
693    return false;
694  }
695  if (!HasInplaceSampler()) {
696    return true;
697  }
698  // Inplace sampler server uses a buffer to cache samples before sending them, so we need to
699  // explicitly ask it to send the cached samples.
700  loop_.reset(new IOEventLoop);
701  size_t inplace_sampler_count = 0;
702  auto close_callback = [&]() {
703    if (--inplace_sampler_count == 0) {
704      return loop_->ExitLoop();
705    }
706    return true;
707  };
708  for (auto& group : groups_) {
709    for (auto& sel : group) {
710      for (auto& sampler : sel.inplace_samplers) {
711        if (!sampler->IsClosed()) {
712          if (!sampler->StopProfiling(*loop_, close_callback)) {
713            return false;
714          }
715          inplace_sampler_count++;
716        }
717      }
718    }
719  }
720  if (inplace_sampler_count == 0) {
721    return true;
722  }
723
724  // Set a timeout to exit the loop.
725  timeval tv;
726  tv.tv_sec = 1;
727  tv.tv_usec = 0;
728  if (!loop_->AddPeriodicEvent(tv, [&]() { return loop_->ExitLoop(); })) {
729    return false;
730  }
731  return loop_->RunLoop();
732}
733
734bool EventSelectionSet::HandleCpuHotplugEvents(const std::vector<int>& monitored_cpus,
735                                               double check_interval_in_sec) {
736  monitored_cpus_.insert(monitored_cpus.begin(), monitored_cpus.end());
737  online_cpus_ = GetOnlineCpus();
738  if (!loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
739                               [&]() { return DetectCpuHotplugEvents(); })) {
740    return false;
741  }
742  return true;
743}
744
745bool EventSelectionSet::DetectCpuHotplugEvents() {
746  std::vector<int> new_cpus = GetOnlineCpus();
747  for (const auto& cpu : online_cpus_) {
748    if (std::find(new_cpus.begin(), new_cpus.end(), cpu) == new_cpus.end()) {
749      if (monitored_cpus_.empty() ||
750          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
751        LOG(INFO) << "Cpu " << cpu << " is offlined";
752        if (!HandleCpuOfflineEvent(cpu)) {
753          return false;
754        }
755      }
756    }
757  }
758  for (const auto& cpu : new_cpus) {
759    if (std::find(online_cpus_.begin(), online_cpus_.end(), cpu) ==
760        online_cpus_.end()) {
761      if (monitored_cpus_.empty() ||
762          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
763        LOG(INFO) << "Cpu " << cpu << " is onlined";
764        if (!HandleCpuOnlineEvent(cpu)) {
765          return false;
766        }
767      }
768    }
769  }
770  online_cpus_ = new_cpus;
771  return true;
772}
773
774bool EventSelectionSet::HandleCpuOfflineEvent(int cpu) {
775  if (!for_stat_cmd_) {
776    // Read mmap data here, so we won't lose the existing records of the
777    // offlined cpu.
778    if (!ReadMmapEventData()) {
779      return false;
780    }
781  }
782  for (auto& group : groups_) {
783    for (auto& selection : group) {
784      for (auto it = selection.event_fds.begin();
785           it != selection.event_fds.end();) {
786        if ((*it)->Cpu() == cpu) {
787          if (for_stat_cmd_) {
788            CounterInfo counter;
789            if (!ReadCounter(it->get(), &counter)) {
790              return false;
791            }
792            selection.hotplugged_counters.push_back(counter);
793          } else {
794            if ((*it)->HasMappedBuffer()) {
795              if (!(*it)->StopPolling()) {
796                return false;
797              }
798            }
799          }
800          it = selection.event_fds.erase(it);
801        } else {
802          ++it;
803        }
804      }
805    }
806  }
807  return true;
808}
809
810bool EventSelectionSet::HandleCpuOnlineEvent(int cpu) {
811  // We need to start profiling when opening new event files.
812  SetEnableOnExec(false);
813  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
814  for (auto& group : groups_) {
815    if (IsUserSpaceSamplerGroup(group)) {
816      continue;
817    }
818    for (const auto& pair : process_map) {
819      for (const auto& tid : pair.second) {
820        std::string failed_event_type;
821        if (!OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
822          // If failed to open event files, maybe the cpu has been offlined.
823          PLOG(WARNING) << "failed to open perf event file for event_type "
824                        << failed_event_type << " for "
825                        << (tid == -1 ? "all threads" : "thread " + std::to_string(tid))
826                        << " on cpu " << cpu;
827        }
828      }
829    }
830  }
831  if (!for_stat_cmd_) {
832    // Prepare mapped buffer.
833    if (!CreateMappedBufferForCpu(cpu)) {
834      return false;
835    }
836    // Send a EventIdRecord.
837    std::vector<uint64_t> event_id_data;
838    uint64_t attr_id = 0;
839    for (const auto& group : groups_) {
840      for (const auto& selection : group) {
841        for (const auto& event_fd : selection.event_fds) {
842          if (event_fd->Cpu() == cpu) {
843            event_id_data.push_back(attr_id);
844            event_id_data.push_back(event_fd->Id());
845          }
846        }
847        ++attr_id;
848      }
849    }
850    EventIdRecord r(event_id_data);
851    if (!record_callback_(&r)) {
852      return false;
853    }
854  }
855  return true;
856}
857
858bool EventSelectionSet::CreateMappedBufferForCpu(int cpu) {
859  EventFd* fd_with_buffer = nullptr;
860  for (auto& group : groups_) {
861    for (auto& selection : group) {
862      for (auto& event_fd : selection.event_fds) {
863        if (event_fd->Cpu() != cpu) {
864          continue;
865        }
866        if (fd_with_buffer == nullptr) {
867          if (!event_fd->CreateMappedBuffer(mmap_pages_, true)) {
868            return false;
869          }
870          fd_with_buffer = event_fd.get();
871        } else {
872          if (!event_fd->ShareMappedBuffer(*fd_with_buffer, true)) {
873            fd_with_buffer->DestroyMappedBuffer();
874            return false;
875          }
876        }
877      }
878    }
879  }
880  if (fd_with_buffer != nullptr &&
881      !fd_with_buffer->StartPolling(*loop_, [this]() {
882        return ReadMmapEventData();
883      })) {
884    return false;
885  }
886  return true;
887}
888
889bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
890  return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
891                                 [&]() { return CheckMonitoredTargets(); });
892}
893
894bool EventSelectionSet::CheckMonitoredTargets() {
895  if (!HasSampler()) {
896    return loop_->ExitLoop();
897  }
898  for (const auto& tid : threads_) {
899    if (IsThreadAlive(tid)) {
900      return true;
901    }
902  }
903  for (const auto& pid : processes_) {
904    if (IsThreadAlive(pid)) {
905      return true;
906    }
907  }
908  return loop_->ExitLoop();
909}
910
911bool EventSelectionSet::HasSampler() {
912  for (auto& group : groups_) {
913    for (auto& sel : group) {
914      if (!sel.event_fds.empty()) {
915        return true;
916      }
917      for (auto& sampler : sel.inplace_samplers) {
918        if (!sampler->IsClosed()) {
919          return true;
920        }
921      }
922    }
923  }
924  return false;
925}
926