event_selection_set.cpp revision 5f43fc4ac870b49542b4cf530a3729f9f1e0e9ab
1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "event_selection_set.h"
18
19#include <android-base/logging.h>
20
21#include "environment.h"
22#include "event_attr.h"
23#include "event_type.h"
24#include "IOEventLoop.h"
25#include "perf_regs.h"
26#include "utils.h"
27
28constexpr uint64_t DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT = 4000;
29constexpr uint64_t DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT = 1;
30
31bool IsBranchSamplingSupported() {
32  const EventType* type = FindEventTypeByName("cpu-cycles");
33  if (type == nullptr) {
34    return false;
35  }
36  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
37  attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
38  attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
39  return IsEventAttrSupportedByKernel(attr);
40}
41
42bool IsDwarfCallChainSamplingSupported() {
43  const EventType* type = FindEventTypeByName("cpu-cycles");
44  if (type == nullptr) {
45    return false;
46  }
47  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
48  attr.sample_type |=
49      PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
50  attr.exclude_callchain_user = 1;
51  attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
52  attr.sample_stack_user = 8192;
53  return IsEventAttrSupportedByKernel(attr);
54}
55
56bool EventSelectionSet::BuildAndCheckEventSelection(
57    const std::string& event_name, EventSelection* selection) {
58  std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
59  if (event_type == nullptr) {
60    return false;
61  }
62  if (for_stat_cmd_) {
63    if (event_type->event_type.name == "cpu-clock" ||
64        event_type->event_type.name == "task-clock") {
65      if (event_type->exclude_user || event_type->exclude_kernel) {
66        LOG(ERROR) << "Modifier u and modifier k used in event type "
67                   << event_type->event_type.name
68                   << " are not supported by the kernel.";
69        return false;
70      }
71    }
72  }
73  selection->event_type_modifier = *event_type;
74  selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
75  selection->event_attr.exclude_user = event_type->exclude_user;
76  selection->event_attr.exclude_kernel = event_type->exclude_kernel;
77  selection->event_attr.exclude_hv = event_type->exclude_hv;
78  selection->event_attr.exclude_host = event_type->exclude_host;
79  selection->event_attr.exclude_guest = event_type->exclude_guest;
80  selection->event_attr.precise_ip = event_type->precise_ip;
81  if (!IsEventAttrSupportedByKernel(selection->event_attr)) {
82    LOG(ERROR) << "Event type '" << event_type->name
83               << "' is not supported by the kernel";
84    return false;
85  }
86  selection->event_fds.clear();
87
88  for (const auto& group : groups_) {
89    for (const auto& sel : group) {
90      if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
91        LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
92                   << "' appears more than once";
93        return false;
94      }
95    }
96  }
97  return true;
98}
99
100bool EventSelectionSet::AddEventType(const std::string& event_name) {
101  return AddEventGroup(std::vector<std::string>(1, event_name));
102}
103
104bool EventSelectionSet::AddEventGroup(
105    const std::vector<std::string>& event_names) {
106  EventSelectionGroup group;
107  for (const auto& event_name : event_names) {
108    EventSelection selection;
109    if (!BuildAndCheckEventSelection(event_name, &selection)) {
110      return false;
111    }
112    group.push_back(std::move(selection));
113  }
114  groups_.push_back(std::move(group));
115  UnionSampleType();
116  return true;
117}
118
119std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
120  std::vector<const EventType*> result;
121  for (const auto& group : groups_) {
122    for (const auto& selection : group) {
123      if (selection.event_type_modifier.event_type.type ==
124          PERF_TYPE_TRACEPOINT) {
125        result.push_back(&selection.event_type_modifier.event_type);
126      }
127    }
128  }
129  return result;
130}
131
132std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
133  std::vector<EventAttrWithId> result;
134  for (const auto& group : groups_) {
135    for (const auto& selection : group) {
136      EventAttrWithId attr_id;
137      attr_id.attr = &selection.event_attr;
138      for (const auto& fd : selection.event_fds) {
139        attr_id.ids.push_back(fd->Id());
140      }
141      result.push_back(attr_id);
142    }
143  }
144  return result;
145}
146
147// Union the sample type of different event attrs can make reading sample
148// records in perf.data easier.
149void EventSelectionSet::UnionSampleType() {
150  uint64_t sample_type = 0;
151  for (const auto& group : groups_) {
152    for (const auto& selection : group) {
153      sample_type |= selection.event_attr.sample_type;
154    }
155  }
156  for (auto& group : groups_) {
157    for (auto& selection : group) {
158      selection.event_attr.sample_type = sample_type;
159    }
160  }
161}
162
163void EventSelectionSet::SetEnableOnExec(bool enable) {
164  for (auto& group : groups_) {
165    for (auto& selection : group) {
166      // If sampling is enabled on exec, then it is disabled at startup,
167      // otherwise it should be enabled at startup. Don't use
168      // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
169      // Because some android kernels can't handle ioctl() well when cpu-hotplug
170      // happens. See http://b/25193162.
171      if (enable) {
172        selection.event_attr.enable_on_exec = 1;
173        selection.event_attr.disabled = 1;
174      } else {
175        selection.event_attr.enable_on_exec = 0;
176        selection.event_attr.disabled = 0;
177      }
178    }
179  }
180}
181
182bool EventSelectionSet::GetEnableOnExec() {
183  for (const auto& group : groups_) {
184    for (const auto& selection : group) {
185      if (selection.event_attr.enable_on_exec == 0) {
186        return false;
187      }
188    }
189  }
190  return true;
191}
192
193void EventSelectionSet::SampleIdAll() {
194  for (auto& group : groups_) {
195    for (auto& selection : group) {
196      selection.event_attr.sample_id_all = 1;
197    }
198  }
199}
200
201void EventSelectionSet::SetSampleFreq(uint64_t sample_freq) {
202  for (auto& group : groups_) {
203    for (auto& selection : group) {
204      selection.event_attr.freq = 1;
205      selection.event_attr.sample_freq = sample_freq;
206    }
207  }
208}
209
210void EventSelectionSet::SetSamplePeriod(uint64_t sample_period) {
211  for (auto& group : groups_) {
212    for (auto& selection : group) {
213      selection.event_attr.freq = 0;
214      selection.event_attr.sample_period = sample_period;
215    }
216  }
217}
218
219void EventSelectionSet::UseDefaultSampleFreq() {
220  for (auto& group : groups_) {
221    for (auto& selection : group) {
222      if (selection.event_type_modifier.event_type.type ==
223          PERF_TYPE_TRACEPOINT) {
224        selection.event_attr.freq = 0;
225        selection.event_attr.sample_period =
226            DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
227      } else {
228        selection.event_attr.freq = 1;
229        selection.event_attr.sample_freq =
230            DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT;
231      }
232    }
233  }
234}
235
236bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
237  if (branch_sample_type != 0 &&
238      (branch_sample_type &
239       (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
240        PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
241    LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
242               << branch_sample_type;
243    return false;
244  }
245  if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
246    LOG(ERROR) << "branch stack sampling is not supported on this device.";
247    return false;
248  }
249  for (auto& group : groups_) {
250    for (auto& selection : group) {
251      perf_event_attr& attr = selection.event_attr;
252      if (branch_sample_type != 0) {
253        attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
254      } else {
255        attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
256      }
257      attr.branch_sample_type = branch_sample_type;
258    }
259  }
260  return true;
261}
262
263void EventSelectionSet::EnableFpCallChainSampling() {
264  for (auto& group : groups_) {
265    for (auto& selection : group) {
266      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
267    }
268  }
269}
270
271bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
272  if (!IsDwarfCallChainSamplingSupported()) {
273    LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
274    return false;
275  }
276  for (auto& group : groups_) {
277    for (auto& selection : group) {
278      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
279                                          PERF_SAMPLE_REGS_USER |
280                                          PERF_SAMPLE_STACK_USER;
281      selection.event_attr.exclude_callchain_user = 1;
282      selection.event_attr.sample_regs_user =
283          GetSupportedRegMask(GetMachineArch());
284      selection.event_attr.sample_stack_user = dump_stack_size;
285    }
286  }
287  return true;
288}
289
290void EventSelectionSet::SetInherit(bool enable) {
291  for (auto& group : groups_) {
292    for (auto& selection : group) {
293      selection.event_attr.inherit = (enable ? 1 : 0);
294    }
295  }
296}
297
298bool EventSelectionSet::NeedKernelSymbol() const {
299  for (const auto& group : groups_) {
300    for (const auto& selection : group) {
301      if (!selection.event_type_modifier.exclude_kernel) {
302        return true;
303      }
304    }
305  }
306  return false;
307}
308
309static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
310  std::vector<int> online_cpus = GetOnlineCpus();
311  for (const auto& cpu : cpus) {
312    if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
313        online_cpus.end()) {
314      LOG(ERROR) << "cpu " << cpu << " is not online.";
315      return false;
316    }
317  }
318  return true;
319}
320
321bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
322                                              pid_t tid, int cpu,
323                                              std::string* failed_event_type) {
324  std::vector<std::unique_ptr<EventFd>> event_fds;
325  // Given a tid and cpu, events on the same group should be all opened
326  // successfully or all failed to open.
327  EventFd* group_fd = nullptr;
328  for (auto& selection : group) {
329    std::unique_ptr<EventFd> event_fd =
330        EventFd::OpenEventFile(selection.event_attr, tid, cpu, group_fd);
331    if (event_fd != nullptr) {
332      LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
333      event_fds.push_back(std::move(event_fd));
334    } else {
335      if (failed_event_type != nullptr) {
336        *failed_event_type = selection.event_type_modifier.name;
337        return false;
338      }
339    }
340    if (group_fd == nullptr) {
341      group_fd = event_fd.get();
342    }
343  }
344  for (size_t i = 0; i < group.size(); ++i) {
345    group[i].event_fds.push_back(std::move(event_fds[i]));
346  }
347  return true;
348}
349
350static std::set<pid_t> PrepareThreads(const std::set<pid_t>& processes,
351                                      const std::set<pid_t>& threads) {
352  std::set<pid_t> result = threads;
353  for (const auto& pid : processes) {
354    std::vector<pid_t> tids = GetThreadsInProcess(pid);
355    result.insert(tids.begin(), tids.end());
356  }
357  return result;
358}
359
360bool EventSelectionSet::OpenEventFiles(const std::vector<int>& on_cpus) {
361  std::vector<int> cpus = on_cpus;
362  if (!cpus.empty()) {
363    // cpus = {-1} means open an event file for all cpus.
364    if (!(cpus.size() == 1 && cpus[0] == -1) && !CheckIfCpusOnline(cpus)) {
365      return false;
366    }
367  } else {
368    cpus = GetOnlineCpus();
369  }
370  std::set<pid_t> threads = PrepareThreads(processes_, threads_);
371  for (auto& group : groups_) {
372    for (const auto& tid : threads) {
373      size_t success_cpu_count = 0;
374      std::string failed_event_type;
375      for (const auto& cpu : cpus) {
376        if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
377          success_cpu_count++;
378        }
379      }
380      // As the online cpus can be enabled or disabled at runtime, we may not
381      // open event file for all cpus successfully. But we should open at
382      // least one cpu successfully.
383      if (success_cpu_count == 0) {
384        PLOG(ERROR) << "failed to open perf event file for event_type "
385                    << failed_event_type << " for "
386                    << (tid == -1 ? "all threads"
387                                  : "thread " + std::to_string(tid))
388                    << " on all cpus";
389        return false;
390      }
391    }
392  }
393  return true;
394}
395
396static bool ReadCounter(const EventFd* event_fd, CounterInfo* counter) {
397  if (!event_fd->ReadCounter(&counter->counter)) {
398    return false;
399  }
400  counter->tid = event_fd->ThreadId();
401  counter->cpu = event_fd->Cpu();
402  return true;
403}
404
405bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
406  counters->clear();
407  for (size_t i = 0; i < groups_.size(); ++i) {
408    for (auto& selection : groups_[i]) {
409      CountersInfo counters_info;
410      counters_info.group_id = i;
411      counters_info.event_name = selection.event_type_modifier.event_type.name;
412      counters_info.event_modifier = selection.event_type_modifier.modifier;
413      counters_info.counters = selection.hotplugged_counters;
414      for (auto& event_fd : selection.event_fds) {
415        CounterInfo counter;
416        if (!ReadCounter(event_fd.get(), &counter)) {
417          return false;
418        }
419        counters_info.counters.push_back(counter);
420      }
421      counters->push_back(counters_info);
422    }
423  }
424  return true;
425}
426
427bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages,
428                                       size_t max_mmap_pages) {
429  for (size_t i = max_mmap_pages; i >= min_mmap_pages; i >>= 1) {
430    if (MmapEventFiles(i, i == min_mmap_pages)) {
431      LOG(VERBOSE) << "Mapped buffer size is " << i << " pages.";
432      mmap_pages_ = i;
433      return true;
434    }
435    for (auto& group : groups_) {
436      for (auto& selection : group) {
437        for (auto& event_fd : selection.event_fds) {
438          event_fd->DestroyMappedBuffer();
439        }
440      }
441    }
442  }
443  return false;
444}
445
446bool EventSelectionSet::MmapEventFiles(size_t mmap_pages, bool report_error) {
447  // Allocate a mapped buffer for each cpu.
448  std::map<int, EventFd*> cpu_map;
449  for (auto& group : groups_) {
450    for (auto& selection : group) {
451      for (auto& event_fd : selection.event_fds) {
452        auto it = cpu_map.find(event_fd->Cpu());
453        if (it != cpu_map.end()) {
454          if (!event_fd->ShareMappedBuffer(*(it->second), report_error)) {
455            return false;
456          }
457        } else {
458          if (!event_fd->CreateMappedBuffer(mmap_pages, report_error)) {
459            return false;
460          }
461          cpu_map[event_fd->Cpu()] = event_fd.get();
462        }
463      }
464    }
465  }
466  return true;
467}
468
469bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
470  // Add read Events for perf event files having mapped buffer.
471  for (auto& group : groups_) {
472    for (auto& selection : group) {
473      for (auto& event_fd : selection.event_fds) {
474        if (event_fd->HasMappedBuffer()) {
475          if (!event_fd->StartPolling(*loop_, [this]() {
476                return ReadMmapEventData();
477              })) {
478            return false;
479          }
480        }
481      }
482    }
483  }
484
485  // Prepare record callback function.
486  record_callback_ = callback;
487  return true;
488}
489
490// When reading from mmap buffers, we prefer reading from all buffers at once
491// rather than reading one buffer at a time. Because by reading all buffers
492// at once, we can merge records from different buffers easily in memory.
493// Otherwise, we have to sort records with greater effort.
494bool EventSelectionSet::ReadMmapEventData() {
495  size_t head_size = 0;
496  std::vector<RecordBufferHead>& heads = record_buffer_heads_;
497  if (heads.empty()) {
498    heads.resize(1);
499  }
500  heads[0].current_pos = 0;
501  size_t buffer_pos = 0;
502
503  for (auto& group : groups_) {
504    for (auto& selection : group) {
505      for (auto& event_fd : selection.event_fds) {
506        if (event_fd->HasMappedBuffer()) {
507          if (event_fd->GetAvailableMmapData(record_buffer_, buffer_pos) != 0) {
508            heads[head_size].end_pos = buffer_pos;
509            heads[head_size].attr = &selection.event_attr;
510            head_size++;
511            if (heads.size() == head_size) {
512              heads.resize(head_size + 1);
513            }
514            heads[head_size].current_pos = buffer_pos;
515          }
516        }
517      }
518    }
519  }
520
521  if (head_size == 1) {
522    // Only one buffer has data, process it directly.
523    std::vector<std::unique_ptr<Record>> records =
524        ReadRecordsFromBuffer(*heads[0].attr,
525                              record_buffer_.data(), buffer_pos);
526    for (auto& r : records) {
527      if (!record_callback_(r.get())) {
528        return false;
529      }
530    }
531  } else {
532    // Use a priority queue to merge records from different buffers. As
533    // records from the same buffer are already ordered by time, we only
534    // need to merge the first record from all buffers. And each time a
535    // record is popped from the queue, we put the next record from its
536    // buffer into the queue.
537    auto comparator = [&](RecordBufferHead* h1, RecordBufferHead* h2) {
538      return h1->timestamp > h2->timestamp;
539    };
540    std::priority_queue<RecordBufferHead*, std::vector<RecordBufferHead*>, decltype(comparator)> q(comparator);
541    for (size_t i = 0; i < head_size; ++i) {
542      RecordBufferHead& h = heads[i];
543      h.r = ReadRecordFromBuffer(*h.attr, &record_buffer_[h.current_pos]);
544      h.timestamp = h.r->Timestamp();
545      h.current_pos += h.r->size();
546      q.push(&h);
547    }
548    while (!q.empty()) {
549      RecordBufferHead* h = q.top();
550      q.pop();
551      if (!record_callback_(h->r.get())) {
552        return false;
553      }
554      if (h->current_pos < h->end_pos) {
555        h->r = ReadRecordFromBuffer(*h->attr, &record_buffer_[h->current_pos]);
556        h->timestamp = h->r->Timestamp();
557        h->current_pos += h->r->size();
558        q.push(h);
559      }
560    }
561  }
562  return true;
563}
564
565bool EventSelectionSet::FinishReadMmapEventData() {
566  return ReadMmapEventData();
567}
568
569bool EventSelectionSet::HandleCpuHotplugEvents(const std::vector<int>& monitored_cpus,
570                                               double check_interval_in_sec) {
571  monitored_cpus_.insert(monitored_cpus.begin(), monitored_cpus.end());
572  online_cpus_ = GetOnlineCpus();
573  if (!loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
574                               [&]() { return DetectCpuHotplugEvents(); })) {
575    return false;
576  }
577  return true;
578}
579
580bool EventSelectionSet::DetectCpuHotplugEvents() {
581  std::vector<int> new_cpus = GetOnlineCpus();
582  for (const auto& cpu : online_cpus_) {
583    if (std::find(new_cpus.begin(), new_cpus.end(), cpu) == new_cpus.end()) {
584      if (monitored_cpus_.empty() ||
585          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
586        LOG(INFO) << "Cpu " << cpu << " is offlined";
587        if (!HandleCpuOfflineEvent(cpu)) {
588          return false;
589        }
590      }
591    }
592  }
593  for (const auto& cpu : new_cpus) {
594    if (std::find(online_cpus_.begin(), online_cpus_.end(), cpu) ==
595        online_cpus_.end()) {
596      if (monitored_cpus_.empty() ||
597          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
598        LOG(INFO) << "Cpu " << cpu << " is onlined";
599        if (!HandleCpuOnlineEvent(cpu)) {
600          return false;
601        }
602      }
603    }
604  }
605  online_cpus_ = new_cpus;
606  return true;
607}
608
609bool EventSelectionSet::HandleCpuOfflineEvent(int cpu) {
610  if (!for_stat_cmd_) {
611    // Read mmap data here, so we won't lose the existing records of the
612    // offlined cpu.
613    if (!ReadMmapEventData()) {
614      return false;
615    }
616  }
617  for (auto& group : groups_) {
618    for (auto& selection : group) {
619      for (auto it = selection.event_fds.begin();
620           it != selection.event_fds.end();) {
621        if ((*it)->Cpu() == cpu) {
622          if (for_stat_cmd_) {
623            CounterInfo counter;
624            if (!ReadCounter(it->get(), &counter)) {
625              return false;
626            }
627            selection.hotplugged_counters.push_back(counter);
628          } else {
629            if ((*it)->HasMappedBuffer()) {
630              if (!(*it)->StopPolling()) {
631                return false;
632              }
633            }
634          }
635          it = selection.event_fds.erase(it);
636        } else {
637          ++it;
638        }
639      }
640    }
641  }
642  return true;
643}
644
645bool EventSelectionSet::HandleCpuOnlineEvent(int cpu) {
646  // We need to start profiling when opening new event files.
647  SetEnableOnExec(false);
648  std::set<pid_t> threads = PrepareThreads(processes_, threads_);
649  for (auto& group : groups_) {
650    for (const auto& tid : threads) {
651      std::string failed_event_type;
652      if (!OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
653        // If failed to open event files, maybe the cpu has been offlined.
654        PLOG(WARNING) << "failed to open perf event file for event_type "
655                      << failed_event_type << " for "
656                      << (tid == -1 ? "all threads"
657                                    : "thread " + std::to_string(tid))
658                      << " on cpu " << cpu;
659      }
660    }
661  }
662  if (!for_stat_cmd_) {
663    // Prepare mapped buffer.
664    if (!CreateMappedBufferForCpu(cpu)) {
665      return false;
666    }
667    // Send a EventIdRecord.
668    std::vector<uint64_t> event_id_data;
669    uint64_t attr_id = 0;
670    for (const auto& group : groups_) {
671      for (const auto& selection : group) {
672        for (const auto& event_fd : selection.event_fds) {
673          if (event_fd->Cpu() == cpu) {
674            event_id_data.push_back(attr_id);
675            event_id_data.push_back(event_fd->Id());
676          }
677        }
678        ++attr_id;
679      }
680    }
681    EventIdRecord r(event_id_data);
682    if (!record_callback_(&r)) {
683      return false;
684    }
685  }
686  return true;
687}
688
689bool EventSelectionSet::CreateMappedBufferForCpu(int cpu) {
690  EventFd* fd_with_buffer = nullptr;
691  for (auto& group : groups_) {
692    for (auto& selection : group) {
693      for (auto& event_fd : selection.event_fds) {
694        if (event_fd->Cpu() != cpu) {
695          continue;
696        }
697        if (fd_with_buffer == nullptr) {
698          if (!event_fd->CreateMappedBuffer(mmap_pages_, true)) {
699            return false;
700          }
701          fd_with_buffer = event_fd.get();
702        } else {
703          if (!event_fd->ShareMappedBuffer(*fd_with_buffer, true)) {
704            fd_with_buffer->DestroyMappedBuffer();
705            return false;
706          }
707        }
708      }
709    }
710  }
711  if (fd_with_buffer != nullptr &&
712      !fd_with_buffer->StartPolling(*loop_, [this]() {
713        return ReadMmapEventData();
714      })) {
715    return false;
716  }
717  return true;
718}
719
720bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
721  return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
722                                 [&]() { return CheckMonitoredTargets(); });
723}
724
725bool EventSelectionSet::CheckMonitoredTargets() {
726  for (const auto& tid : threads_) {
727    if (IsThreadAlive(tid)) {
728      return true;
729    }
730  }
731  for (const auto& pid : processes_) {
732    if (IsThreadAlive(pid)) {
733      return true;
734    }
735  }
736  return loop_->ExitLoop();
737}
738