event_selection_set.cpp revision b16a51ac5e3e7e581bcd79cb7ec91cdb62b45a2b
1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "event_selection_set.h"
18
19#include <android-base/logging.h>
20
21#include "environment.h"
22#include "event_attr.h"
23#include "event_type.h"
24#include "IOEventLoop.h"
25#include "perf_regs.h"
26#include "utils.h"
27
28constexpr uint64_t DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT = 4000;
29constexpr uint64_t DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT = 1;
30
31bool IsBranchSamplingSupported() {
32  const EventType* type = FindEventTypeByName("cpu-cycles");
33  if (type == nullptr) {
34    return false;
35  }
36  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
37  attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
38  attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
39  return IsEventAttrSupported(attr);
40}
41
42bool IsDwarfCallChainSamplingSupported() {
43  const EventType* type = FindEventTypeByName("cpu-cycles");
44  if (type == nullptr) {
45    return false;
46  }
47  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
48  attr.sample_type |=
49      PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
50  attr.exclude_callchain_user = 1;
51  attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
52  attr.sample_stack_user = 8192;
53  return IsEventAttrSupported(attr);
54}
55
56bool EventSelectionSet::BuildAndCheckEventSelection(
57    const std::string& event_name, EventSelection* selection) {
58  std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
59  if (event_type == nullptr) {
60    return false;
61  }
62  if (for_stat_cmd_) {
63    if (event_type->event_type.name == "cpu-clock" ||
64        event_type->event_type.name == "task-clock") {
65      if (event_type->exclude_user || event_type->exclude_kernel) {
66        LOG(ERROR) << "Modifier u and modifier k used in event type "
67                   << event_type->event_type.name
68                   << " are not supported by the kernel.";
69        return false;
70      }
71    }
72  }
73  selection->event_type_modifier = *event_type;
74  selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
75  selection->event_attr.exclude_user = event_type->exclude_user;
76  selection->event_attr.exclude_kernel = event_type->exclude_kernel;
77  selection->event_attr.exclude_hv = event_type->exclude_hv;
78  selection->event_attr.exclude_host = event_type->exclude_host;
79  selection->event_attr.exclude_guest = event_type->exclude_guest;
80  selection->event_attr.precise_ip = event_type->precise_ip;
81  if (!IsEventAttrSupported(selection->event_attr)) {
82    LOG(ERROR) << "Event type '" << event_type->name
83               << "' is not supported on the device";
84    return false;
85  }
86  selection->event_fds.clear();
87
88  for (const auto& group : groups_) {
89    for (const auto& sel : group) {
90      if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
91        LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
92                   << "' appears more than once";
93        return false;
94      }
95    }
96  }
97  return true;
98}
99
100bool EventSelectionSet::AddEventType(const std::string& event_name) {
101  return AddEventGroup(std::vector<std::string>(1, event_name));
102}
103
104bool EventSelectionSet::AddEventGroup(
105    const std::vector<std::string>& event_names) {
106  EventSelectionGroup group;
107  for (const auto& event_name : event_names) {
108    EventSelection selection;
109    if (!BuildAndCheckEventSelection(event_name, &selection)) {
110      return false;
111    }
112    group.push_back(std::move(selection));
113  }
114  groups_.push_back(std::move(group));
115  UnionSampleType();
116  return true;
117}
118
119std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
120  std::vector<const EventType*> result;
121  for (const auto& group : groups_) {
122    for (const auto& selection : group) {
123      if (selection.event_type_modifier.event_type.type ==
124          PERF_TYPE_TRACEPOINT) {
125        result.push_back(&selection.event_type_modifier.event_type);
126      }
127    }
128  }
129  return result;
130}
131
132bool EventSelectionSet::HasInplaceSampler() const {
133  for (const auto& group : groups_) {
134    for (const auto& sel : group) {
135      if (sel.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
136          sel.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
137        return true;
138      }
139    }
140  }
141  return false;
142}
143
144std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
145  std::vector<EventAttrWithId> result;
146  for (const auto& group : groups_) {
147    for (const auto& selection : group) {
148      EventAttrWithId attr_id;
149      attr_id.attr = &selection.event_attr;
150      for (const auto& fd : selection.event_fds) {
151        attr_id.ids.push_back(fd->Id());
152      }
153      if (!selection.inplace_samplers.empty()) {
154        attr_id.ids.push_back(selection.inplace_samplers[0]->Id());
155      }
156      result.push_back(attr_id);
157    }
158  }
159  return result;
160}
161
162// Union the sample type of different event attrs can make reading sample
163// records in perf.data easier.
164void EventSelectionSet::UnionSampleType() {
165  uint64_t sample_type = 0;
166  for (const auto& group : groups_) {
167    for (const auto& selection : group) {
168      sample_type |= selection.event_attr.sample_type;
169    }
170  }
171  for (auto& group : groups_) {
172    for (auto& selection : group) {
173      selection.event_attr.sample_type = sample_type;
174    }
175  }
176}
177
178void EventSelectionSet::SetEnableOnExec(bool enable) {
179  for (auto& group : groups_) {
180    for (auto& selection : group) {
181      // If sampling is enabled on exec, then it is disabled at startup,
182      // otherwise it should be enabled at startup. Don't use
183      // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
184      // Because some android kernels can't handle ioctl() well when cpu-hotplug
185      // happens. See http://b/25193162.
186      if (enable) {
187        selection.event_attr.enable_on_exec = 1;
188        selection.event_attr.disabled = 1;
189      } else {
190        selection.event_attr.enable_on_exec = 0;
191        selection.event_attr.disabled = 0;
192      }
193    }
194  }
195}
196
197bool EventSelectionSet::GetEnableOnExec() {
198  for (const auto& group : groups_) {
199    for (const auto& selection : group) {
200      if (selection.event_attr.enable_on_exec == 0) {
201        return false;
202      }
203    }
204  }
205  return true;
206}
207
208void EventSelectionSet::SampleIdAll() {
209  for (auto& group : groups_) {
210    for (auto& selection : group) {
211      selection.event_attr.sample_id_all = 1;
212    }
213  }
214}
215
216void EventSelectionSet::SetSampleFreq(uint64_t sample_freq) {
217  for (auto& group : groups_) {
218    for (auto& selection : group) {
219      selection.event_attr.freq = 1;
220      selection.event_attr.sample_freq = sample_freq;
221    }
222  }
223}
224
225void EventSelectionSet::SetSamplePeriod(uint64_t sample_period) {
226  for (auto& group : groups_) {
227    for (auto& selection : group) {
228      selection.event_attr.freq = 0;
229      selection.event_attr.sample_period = sample_period;
230    }
231  }
232}
233
234void EventSelectionSet::UseDefaultSampleFreq() {
235  for (auto& group : groups_) {
236    for (auto& selection : group) {
237      if (selection.event_type_modifier.event_type.type ==
238          PERF_TYPE_TRACEPOINT) {
239        selection.event_attr.freq = 0;
240        selection.event_attr.sample_period =
241            DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
242      } else {
243        selection.event_attr.freq = 1;
244        selection.event_attr.sample_freq =
245            DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT;
246      }
247    }
248  }
249}
250
251bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
252  if (branch_sample_type != 0 &&
253      (branch_sample_type &
254       (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
255        PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
256    LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
257               << branch_sample_type;
258    return false;
259  }
260  if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
261    LOG(ERROR) << "branch stack sampling is not supported on this device.";
262    return false;
263  }
264  for (auto& group : groups_) {
265    for (auto& selection : group) {
266      perf_event_attr& attr = selection.event_attr;
267      if (branch_sample_type != 0) {
268        attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
269      } else {
270        attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
271      }
272      attr.branch_sample_type = branch_sample_type;
273    }
274  }
275  return true;
276}
277
278void EventSelectionSet::EnableFpCallChainSampling() {
279  for (auto& group : groups_) {
280    for (auto& selection : group) {
281      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
282    }
283  }
284}
285
286bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
287  if (!IsDwarfCallChainSamplingSupported()) {
288    LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
289    return false;
290  }
291  for (auto& group : groups_) {
292    for (auto& selection : group) {
293      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
294                                          PERF_SAMPLE_REGS_USER |
295                                          PERF_SAMPLE_STACK_USER;
296      selection.event_attr.exclude_callchain_user = 1;
297      selection.event_attr.sample_regs_user =
298          GetSupportedRegMask(GetMachineArch());
299      selection.event_attr.sample_stack_user = dump_stack_size;
300    }
301  }
302  return true;
303}
304
305void EventSelectionSet::SetInherit(bool enable) {
306  for (auto& group : groups_) {
307    for (auto& selection : group) {
308      selection.event_attr.inherit = (enable ? 1 : 0);
309    }
310  }
311}
312
313bool EventSelectionSet::NeedKernelSymbol() const {
314  for (const auto& group : groups_) {
315    for (const auto& selection : group) {
316      if (!selection.event_type_modifier.exclude_kernel) {
317        return true;
318      }
319    }
320  }
321  return false;
322}
323
324static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
325  std::vector<int> online_cpus = GetOnlineCpus();
326  for (const auto& cpu : cpus) {
327    if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
328        online_cpus.end()) {
329      LOG(ERROR) << "cpu " << cpu << " is not online.";
330      return false;
331    }
332  }
333  return true;
334}
335
336bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
337                                              pid_t tid, int cpu,
338                                              std::string* failed_event_type) {
339  std::vector<std::unique_ptr<EventFd>> event_fds;
340  // Given a tid and cpu, events on the same group should be all opened
341  // successfully or all failed to open.
342  EventFd* group_fd = nullptr;
343  for (auto& selection : group) {
344    std::unique_ptr<EventFd> event_fd =
345        EventFd::OpenEventFile(selection.event_attr, tid, cpu, group_fd);
346    if (event_fd != nullptr) {
347      LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
348      event_fds.push_back(std::move(event_fd));
349    } else {
350      if (failed_event_type != nullptr) {
351        *failed_event_type = selection.event_type_modifier.name;
352        return false;
353      }
354    }
355    if (group_fd == nullptr) {
356      group_fd = event_fd.get();
357    }
358  }
359  for (size_t i = 0; i < group.size(); ++i) {
360    group[i].event_fds.push_back(std::move(event_fds[i]));
361  }
362  return true;
363}
364
365static std::map<pid_t, std::set<pid_t>> PrepareThreads(const std::set<pid_t>& processes,
366                                                       const std::set<pid_t>& threads) {
367  std::map<pid_t, std::set<pid_t>> result;
368  for (auto& pid : processes) {
369    std::vector<pid_t> tids = GetThreadsInProcess(pid);
370    std::set<pid_t>& threads_in_process = result[pid];
371    threads_in_process.insert(tids.begin(), tids.end());
372  }
373  for (auto& tid : threads) {
374    // tid = -1 means monitoring all threads.
375    if (tid == -1) {
376      result[-1].insert(-1);
377    } else {
378      pid_t pid;
379      if (GetProcessForThread(tid, &pid)) {
380        result[pid].insert(tid);
381      }
382    }
383  }
384  return result;
385}
386
387bool EventSelectionSet::OpenEventFiles(const std::vector<int>& on_cpus) {
388  std::vector<int> cpus = on_cpus;
389  if (!cpus.empty()) {
390    // cpus = {-1} means open an event file for all cpus.
391    if (!(cpus.size() == 1 && cpus[0] == -1) && !CheckIfCpusOnline(cpus)) {
392      return false;
393    }
394  } else {
395    cpus = GetOnlineCpus();
396  }
397  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
398  for (auto& group : groups_) {
399    if (IsUserSpaceSamplerGroup(group)) {
400      if (!OpenUserSpaceSamplersOnGroup(group, process_map)) {
401        return false;
402      }
403    } else {
404      for (const auto& pair : process_map) {
405        for (const auto& tid : pair.second) {
406          size_t success_cpu_count = 0;
407          std::string failed_event_type;
408          for (const auto& cpu : cpus) {
409            if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
410              success_cpu_count++;
411            }
412          }
413          // As the online cpus can be enabled or disabled at runtime, we may not
414          // open event file for all cpus successfully. But we should open at
415          // least one cpu successfully.
416          if (success_cpu_count == 0) {
417            PLOG(ERROR) << "failed to open perf event file for event_type "
418                        << failed_event_type << " for "
419                        << (tid == -1 ? "all threads" : "thread " + std::to_string(tid))
420                        << " on all cpus";
421            return false;
422          }
423        }
424      }
425    }
426  }
427  return true;
428}
429
430bool EventSelectionSet::IsUserSpaceSamplerGroup(EventSelectionGroup& group) {
431  return group.size() == 1 && group[0].event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS;
432}
433
434bool EventSelectionSet::OpenUserSpaceSamplersOnGroup(EventSelectionGroup& group,
435    const std::map<pid_t, std::set<pid_t>>& process_map) {
436  CHECK_EQ(group.size(), 1u);
437  for (auto& selection : group) {
438    if (selection.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
439        selection.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
440      for (auto& pair : process_map) {
441        std::unique_ptr<InplaceSamplerClient> sampler = InplaceSamplerClient::Create(
442            selection.event_attr, pair.first, pair.second);
443        if (sampler == nullptr) {
444          return false;
445        }
446        selection.inplace_samplers.push_back(std::move(sampler));
447      }
448    }
449  }
450  return true;
451}
452
453static bool ReadCounter(const EventFd* event_fd, CounterInfo* counter) {
454  if (!event_fd->ReadCounter(&counter->counter)) {
455    return false;
456  }
457  counter->tid = event_fd->ThreadId();
458  counter->cpu = event_fd->Cpu();
459  return true;
460}
461
462bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
463  counters->clear();
464  for (size_t i = 0; i < groups_.size(); ++i) {
465    for (auto& selection : groups_[i]) {
466      CountersInfo counters_info;
467      counters_info.group_id = i;
468      counters_info.event_name = selection.event_type_modifier.event_type.name;
469      counters_info.event_modifier = selection.event_type_modifier.modifier;
470      counters_info.counters = selection.hotplugged_counters;
471      for (auto& event_fd : selection.event_fds) {
472        CounterInfo counter;
473        if (!ReadCounter(event_fd.get(), &counter)) {
474          return false;
475        }
476        counters_info.counters.push_back(counter);
477      }
478      counters->push_back(counters_info);
479    }
480  }
481  return true;
482}
483
484bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages,
485                                       size_t max_mmap_pages) {
486  for (size_t i = max_mmap_pages; i >= min_mmap_pages; i >>= 1) {
487    if (MmapEventFiles(i, i == min_mmap_pages)) {
488      LOG(VERBOSE) << "Mapped buffer size is " << i << " pages.";
489      mmap_pages_ = i;
490      return true;
491    }
492    for (auto& group : groups_) {
493      for (auto& selection : group) {
494        for (auto& event_fd : selection.event_fds) {
495          event_fd->DestroyMappedBuffer();
496        }
497      }
498    }
499  }
500  return false;
501}
502
503bool EventSelectionSet::MmapEventFiles(size_t mmap_pages, bool report_error) {
504  // Allocate a mapped buffer for each cpu.
505  std::map<int, EventFd*> cpu_map;
506  for (auto& group : groups_) {
507    for (auto& selection : group) {
508      for (auto& event_fd : selection.event_fds) {
509        auto it = cpu_map.find(event_fd->Cpu());
510        if (it != cpu_map.end()) {
511          if (!event_fd->ShareMappedBuffer(*(it->second), report_error)) {
512            return false;
513          }
514        } else {
515          if (!event_fd->CreateMappedBuffer(mmap_pages, report_error)) {
516            return false;
517          }
518          cpu_map[event_fd->Cpu()] = event_fd.get();
519        }
520      }
521    }
522  }
523  return true;
524}
525
526bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
527  // Add read Events for perf event files having mapped buffer.
528  for (auto& group : groups_) {
529    for (auto& selection : group) {
530      for (auto& event_fd : selection.event_fds) {
531        if (event_fd->HasMappedBuffer()) {
532          if (!event_fd->StartPolling(*loop_, [this]() {
533                return ReadMmapEventData();
534              })) {
535            return false;
536          }
537        }
538      }
539      for (auto& sampler : selection.inplace_samplers) {
540        if (!sampler->StartPolling(*loop_, callback,
541                                   [&] { return CheckMonitoredTargets(); })) {
542          return false;
543        }
544      }
545    }
546  }
547
548  // Prepare record callback function.
549  record_callback_ = callback;
550  return true;
551}
552
553// When reading from mmap buffers, we prefer reading from all buffers at once
554// rather than reading one buffer at a time. Because by reading all buffers
555// at once, we can merge records from different buffers easily in memory.
556// Otherwise, we have to sort records with greater effort.
557bool EventSelectionSet::ReadMmapEventData() {
558  size_t head_size = 0;
559  std::vector<RecordBufferHead>& heads = record_buffer_heads_;
560  if (heads.empty()) {
561    heads.resize(1);
562  }
563  heads[0].current_pos = 0;
564  size_t buffer_pos = 0;
565
566  for (auto& group : groups_) {
567    for (auto& selection : group) {
568      for (auto& event_fd : selection.event_fds) {
569        if (event_fd->HasMappedBuffer()) {
570          if (event_fd->GetAvailableMmapData(record_buffer_, buffer_pos) != 0) {
571            heads[head_size].end_pos = buffer_pos;
572            heads[head_size].attr = &selection.event_attr;
573            head_size++;
574            if (heads.size() == head_size) {
575              heads.resize(head_size + 1);
576            }
577            heads[head_size].current_pos = buffer_pos;
578          }
579        }
580      }
581    }
582  }
583
584  if (head_size == 0) {
585    return true;
586  }
587  if (head_size == 1) {
588    // Only one buffer has data, process it directly.
589    std::vector<std::unique_ptr<Record>> records =
590        ReadRecordsFromBuffer(*heads[0].attr,
591                              record_buffer_.data(), buffer_pos);
592    for (auto& r : records) {
593      if (!record_callback_(r.get())) {
594        return false;
595      }
596    }
597  } else {
598    // Use a priority queue to merge records from different buffers. As
599    // records from the same buffer are already ordered by time, we only
600    // need to merge the first record from all buffers. And each time a
601    // record is popped from the queue, we put the next record from its
602    // buffer into the queue.
603    auto comparator = [&](RecordBufferHead* h1, RecordBufferHead* h2) {
604      return h1->timestamp > h2->timestamp;
605    };
606    std::priority_queue<RecordBufferHead*, std::vector<RecordBufferHead*>, decltype(comparator)> q(comparator);
607    for (size_t i = 0; i < head_size; ++i) {
608      RecordBufferHead& h = heads[i];
609      h.r = ReadRecordFromBuffer(*h.attr, &record_buffer_[h.current_pos]);
610      h.timestamp = h.r->Timestamp();
611      h.current_pos += h.r->size();
612      q.push(&h);
613    }
614    while (!q.empty()) {
615      RecordBufferHead* h = q.top();
616      q.pop();
617      if (!record_callback_(h->r.get())) {
618        return false;
619      }
620      if (h->current_pos < h->end_pos) {
621        h->r = ReadRecordFromBuffer(*h->attr, &record_buffer_[h->current_pos]);
622        h->timestamp = h->r->Timestamp();
623        h->current_pos += h->r->size();
624        q.push(h);
625      }
626    }
627  }
628  return true;
629}
630
631bool EventSelectionSet::FinishReadMmapEventData() {
632  return ReadMmapEventData();
633}
634
635bool EventSelectionSet::HandleCpuHotplugEvents(const std::vector<int>& monitored_cpus,
636                                               double check_interval_in_sec) {
637  monitored_cpus_.insert(monitored_cpus.begin(), monitored_cpus.end());
638  online_cpus_ = GetOnlineCpus();
639  if (!loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
640                               [&]() { return DetectCpuHotplugEvents(); })) {
641    return false;
642  }
643  return true;
644}
645
646bool EventSelectionSet::DetectCpuHotplugEvents() {
647  std::vector<int> new_cpus = GetOnlineCpus();
648  for (const auto& cpu : online_cpus_) {
649    if (std::find(new_cpus.begin(), new_cpus.end(), cpu) == new_cpus.end()) {
650      if (monitored_cpus_.empty() ||
651          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
652        LOG(INFO) << "Cpu " << cpu << " is offlined";
653        if (!HandleCpuOfflineEvent(cpu)) {
654          return false;
655        }
656      }
657    }
658  }
659  for (const auto& cpu : new_cpus) {
660    if (std::find(online_cpus_.begin(), online_cpus_.end(), cpu) ==
661        online_cpus_.end()) {
662      if (monitored_cpus_.empty() ||
663          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
664        LOG(INFO) << "Cpu " << cpu << " is onlined";
665        if (!HandleCpuOnlineEvent(cpu)) {
666          return false;
667        }
668      }
669    }
670  }
671  online_cpus_ = new_cpus;
672  return true;
673}
674
675bool EventSelectionSet::HandleCpuOfflineEvent(int cpu) {
676  if (!for_stat_cmd_) {
677    // Read mmap data here, so we won't lose the existing records of the
678    // offlined cpu.
679    if (!ReadMmapEventData()) {
680      return false;
681    }
682  }
683  for (auto& group : groups_) {
684    for (auto& selection : group) {
685      for (auto it = selection.event_fds.begin();
686           it != selection.event_fds.end();) {
687        if ((*it)->Cpu() == cpu) {
688          if (for_stat_cmd_) {
689            CounterInfo counter;
690            if (!ReadCounter(it->get(), &counter)) {
691              return false;
692            }
693            selection.hotplugged_counters.push_back(counter);
694          } else {
695            if ((*it)->HasMappedBuffer()) {
696              if (!(*it)->StopPolling()) {
697                return false;
698              }
699            }
700          }
701          it = selection.event_fds.erase(it);
702        } else {
703          ++it;
704        }
705      }
706    }
707  }
708  return true;
709}
710
711bool EventSelectionSet::HandleCpuOnlineEvent(int cpu) {
712  // We need to start profiling when opening new event files.
713  SetEnableOnExec(false);
714  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
715  for (auto& group : groups_) {
716    if (IsUserSpaceSamplerGroup(group)) {
717      continue;
718    }
719    for (const auto& pair : process_map) {
720      for (const auto& tid : pair.second) {
721        std::string failed_event_type;
722        if (!OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
723          // If failed to open event files, maybe the cpu has been offlined.
724          PLOG(WARNING) << "failed to open perf event file for event_type "
725                        << failed_event_type << " for "
726                        << (tid == -1 ? "all threads" : "thread " + std::to_string(tid))
727                        << " on cpu " << cpu;
728        }
729      }
730    }
731  }
732  if (!for_stat_cmd_) {
733    // Prepare mapped buffer.
734    if (!CreateMappedBufferForCpu(cpu)) {
735      return false;
736    }
737    // Send a EventIdRecord.
738    std::vector<uint64_t> event_id_data;
739    uint64_t attr_id = 0;
740    for (const auto& group : groups_) {
741      for (const auto& selection : group) {
742        for (const auto& event_fd : selection.event_fds) {
743          if (event_fd->Cpu() == cpu) {
744            event_id_data.push_back(attr_id);
745            event_id_data.push_back(event_fd->Id());
746          }
747        }
748        ++attr_id;
749      }
750    }
751    EventIdRecord r(event_id_data);
752    if (!record_callback_(&r)) {
753      return false;
754    }
755  }
756  return true;
757}
758
759bool EventSelectionSet::CreateMappedBufferForCpu(int cpu) {
760  EventFd* fd_with_buffer = nullptr;
761  for (auto& group : groups_) {
762    for (auto& selection : group) {
763      for (auto& event_fd : selection.event_fds) {
764        if (event_fd->Cpu() != cpu) {
765          continue;
766        }
767        if (fd_with_buffer == nullptr) {
768          if (!event_fd->CreateMappedBuffer(mmap_pages_, true)) {
769            return false;
770          }
771          fd_with_buffer = event_fd.get();
772        } else {
773          if (!event_fd->ShareMappedBuffer(*fd_with_buffer, true)) {
774            fd_with_buffer->DestroyMappedBuffer();
775            return false;
776          }
777        }
778      }
779    }
780  }
781  if (fd_with_buffer != nullptr &&
782      !fd_with_buffer->StartPolling(*loop_, [this]() {
783        return ReadMmapEventData();
784      })) {
785    return false;
786  }
787  return true;
788}
789
790bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
791  return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
792                                 [&]() { return CheckMonitoredTargets(); });
793}
794
795bool EventSelectionSet::CheckMonitoredTargets() {
796  if (!HasSampler()) {
797    return loop_->ExitLoop();
798  }
799  for (const auto& tid : threads_) {
800    if (IsThreadAlive(tid)) {
801      return true;
802    }
803  }
804  for (const auto& pid : processes_) {
805    if (IsThreadAlive(pid)) {
806      return true;
807    }
808  }
809  return loop_->ExitLoop();
810}
811
812bool EventSelectionSet::HasSampler() {
813  for (auto& group : groups_) {
814    for (auto& sel : group) {
815      if (!sel.event_fds.empty()) {
816        return true;
817      }
818      for (auto& sampler : sel.inplace_samplers) {
819        if (!sampler->IsClosed()) {
820          return true;
821        }
822      }
823    }
824  }
825  return false;
826}
827