event_selection_set.cpp revision b3ae56e485b4d971a909a8b04da65713a3d56872
1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "event_selection_set.h"
18
19#include <atomic>
20#include <thread>
21
22#include <android-base/logging.h>
23
24#include "environment.h"
25#include "event_attr.h"
26#include "event_type.h"
27#include "IOEventLoop.h"
28#include "perf_regs.h"
29#include "utils.h"
30
31constexpr uint64_t DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT = 4000;
32constexpr uint64_t DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT = 1;
33
34bool IsBranchSamplingSupported() {
35  const EventType* type = FindEventTypeByName("cpu-cycles");
36  if (type == nullptr) {
37    return false;
38  }
39  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
40  attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
41  attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
42  return IsEventAttrSupported(attr);
43}
44
45bool IsDwarfCallChainSamplingSupported() {
46  const EventType* type = FindEventTypeByName("cpu-cycles");
47  if (type == nullptr) {
48    return false;
49  }
50  perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
51  attr.sample_type |=
52      PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
53  attr.exclude_callchain_user = 1;
54  attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
55  attr.sample_stack_user = 8192;
56  return IsEventAttrSupported(attr);
57}
58
59bool IsDumpingRegsForTracepointEventsSupported() {
60  const EventType* event_type = FindEventTypeByName("sched:sched_switch", false);
61  if (event_type == nullptr) {
62    return false;
63  }
64  std::atomic<bool> done(false);
65  std::atomic<pid_t> thread_id(0);
66  std::thread thread([&]() {
67    thread_id = gettid();
68    while (!done) {
69      usleep(1);
70    }
71    usleep(1);  // Make a sched out to generate one sample.
72  });
73  while (thread_id == 0) {
74    usleep(1);
75  }
76  perf_event_attr attr = CreateDefaultPerfEventAttr(*event_type);
77  attr.freq = 0;
78  attr.sample_period = 1;
79  std::unique_ptr<EventFd> event_fd =
80      EventFd::OpenEventFile(attr, thread_id, -1, nullptr);
81  if (event_fd == nullptr) {
82    return false;
83  }
84  if (!event_fd->CreateMappedBuffer(4, true)) {
85    return false;
86  }
87  done = true;
88  thread.join();
89
90  std::vector<char> buffer;
91  size_t buffer_pos = 0;
92  size_t size = event_fd->GetAvailableMmapData(buffer, buffer_pos);
93  std::vector<std::unique_ptr<Record>> records =
94      ReadRecordsFromBuffer(attr, buffer.data(), size);
95  for (auto& r : records) {
96    if (r->type() == PERF_RECORD_SAMPLE) {
97      auto& record = *static_cast<SampleRecord*>(r.get());
98      if (record.ip_data.ip != 0) {
99        return true;
100      }
101    }
102  }
103  return false;
104}
105
106bool EventSelectionSet::BuildAndCheckEventSelection(
107    const std::string& event_name, EventSelection* selection) {
108  std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
109  if (event_type == nullptr) {
110    return false;
111  }
112  if (for_stat_cmd_) {
113    if (event_type->event_type.name == "cpu-clock" ||
114        event_type->event_type.name == "task-clock") {
115      if (event_type->exclude_user || event_type->exclude_kernel) {
116        LOG(ERROR) << "Modifier u and modifier k used in event type "
117                   << event_type->event_type.name
118                   << " are not supported by the kernel.";
119        return false;
120      }
121    }
122  }
123  selection->event_type_modifier = *event_type;
124  selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
125  selection->event_attr.exclude_user = event_type->exclude_user;
126  selection->event_attr.exclude_kernel = event_type->exclude_kernel;
127  selection->event_attr.exclude_hv = event_type->exclude_hv;
128  selection->event_attr.exclude_host = event_type->exclude_host;
129  selection->event_attr.exclude_guest = event_type->exclude_guest;
130  selection->event_attr.precise_ip = event_type->precise_ip;
131  if (!for_stat_cmd_) {
132    if (event_type->event_type.type == PERF_TYPE_TRACEPOINT) {
133      selection->event_attr.freq = 0;
134      selection->event_attr.sample_period = DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
135    } else {
136      selection->event_attr.freq = 1;
137      selection->event_attr.sample_freq =
138          AdjustSampleFrequency(DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT);
139    }
140  }
141  if (!IsEventAttrSupported(selection->event_attr)) {
142    LOG(ERROR) << "Event type '" << event_type->name
143               << "' is not supported on the device";
144    return false;
145  }
146  selection->event_fds.clear();
147
148  for (const auto& group : groups_) {
149    for (const auto& sel : group) {
150      if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
151        LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
152                   << "' appears more than once";
153        return false;
154      }
155    }
156  }
157  return true;
158}
159
160bool EventSelectionSet::AddEventType(const std::string& event_name, size_t* group_id) {
161  return AddEventGroup(std::vector<std::string>(1, event_name), group_id);
162}
163
164bool EventSelectionSet::AddEventGroup(
165    const std::vector<std::string>& event_names, size_t* group_id) {
166  EventSelectionGroup group;
167  for (const auto& event_name : event_names) {
168    EventSelection selection;
169    if (!BuildAndCheckEventSelection(event_name, &selection)) {
170      return false;
171    }
172    group.push_back(std::move(selection));
173  }
174  groups_.push_back(std::move(group));
175  UnionSampleType();
176  if (group_id != nullptr) {
177    *group_id = groups_.size() - 1;
178  }
179  return true;
180}
181
182std::vector<const EventType*> EventSelectionSet::GetEvents() const {
183  std::vector<const EventType*> result;
184  for (const auto& group : groups_) {
185    for (const auto& selection : group) {
186      result.push_back(&selection.event_type_modifier.event_type);
187    }
188  }
189  return result;
190}
191
192std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
193  std::vector<const EventType*> result;
194  for (const auto& group : groups_) {
195    for (const auto& selection : group) {
196      if (selection.event_type_modifier.event_type.type ==
197          PERF_TYPE_TRACEPOINT) {
198        result.push_back(&selection.event_type_modifier.event_type);
199      }
200    }
201  }
202  return result;
203}
204
205bool EventSelectionSet::ExcludeKernel() const {
206  for (const auto& group : groups_) {
207    for (const auto& selection : group) {
208      if (!selection.event_type_modifier.exclude_kernel) {
209        return false;
210      }
211    }
212  }
213  return true;
214}
215
216bool EventSelectionSet::HasInplaceSampler() const {
217  for (const auto& group : groups_) {
218    for (const auto& sel : group) {
219      if (sel.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
220          sel.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
221        return true;
222      }
223    }
224  }
225  return false;
226}
227
228std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
229  std::vector<EventAttrWithId> result;
230  for (const auto& group : groups_) {
231    for (const auto& selection : group) {
232      EventAttrWithId attr_id;
233      attr_id.attr = &selection.event_attr;
234      for (const auto& fd : selection.event_fds) {
235        attr_id.ids.push_back(fd->Id());
236      }
237      if (!selection.inplace_samplers.empty()) {
238        attr_id.ids.push_back(selection.inplace_samplers[0]->Id());
239      }
240      result.push_back(attr_id);
241    }
242  }
243  return result;
244}
245
246// Union the sample type of different event attrs can make reading sample
247// records in perf.data easier.
248void EventSelectionSet::UnionSampleType() {
249  uint64_t sample_type = 0;
250  for (const auto& group : groups_) {
251    for (const auto& selection : group) {
252      sample_type |= selection.event_attr.sample_type;
253    }
254  }
255  for (auto& group : groups_) {
256    for (auto& selection : group) {
257      selection.event_attr.sample_type = sample_type;
258    }
259  }
260}
261
262void EventSelectionSet::SetEnableOnExec(bool enable) {
263  for (auto& group : groups_) {
264    for (auto& selection : group) {
265      // If sampling is enabled on exec, then it is disabled at startup,
266      // otherwise it should be enabled at startup. Don't use
267      // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
268      // Because some android kernels can't handle ioctl() well when cpu-hotplug
269      // happens. See http://b/25193162.
270      if (enable) {
271        selection.event_attr.enable_on_exec = 1;
272        selection.event_attr.disabled = 1;
273      } else {
274        selection.event_attr.enable_on_exec = 0;
275        selection.event_attr.disabled = 0;
276      }
277    }
278  }
279}
280
281bool EventSelectionSet::GetEnableOnExec() {
282  for (const auto& group : groups_) {
283    for (const auto& selection : group) {
284      if (selection.event_attr.enable_on_exec == 0) {
285        return false;
286      }
287    }
288  }
289  return true;
290}
291
292void EventSelectionSet::SampleIdAll() {
293  for (auto& group : groups_) {
294    for (auto& selection : group) {
295      selection.event_attr.sample_id_all = 1;
296    }
297  }
298}
299
300void EventSelectionSet::SetSampleSpeed(size_t group_id, const SampleSpeed& speed) {
301  CHECK_LT(group_id, groups_.size());
302  for (auto& selection : groups_[group_id]) {
303    if (speed.UseFreq()) {
304      selection.event_attr.freq = 1;
305      selection.event_attr.sample_freq = speed.sample_freq;
306    } else {
307      selection.event_attr.freq = 0;
308      selection.event_attr.sample_period = speed.sample_period;
309    }
310  }
311}
312
313bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
314  if (branch_sample_type != 0 &&
315      (branch_sample_type &
316       (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
317        PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
318    LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
319               << branch_sample_type;
320    return false;
321  }
322  if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
323    LOG(ERROR) << "branch stack sampling is not supported on this device.";
324    return false;
325  }
326  for (auto& group : groups_) {
327    for (auto& selection : group) {
328      perf_event_attr& attr = selection.event_attr;
329      if (branch_sample_type != 0) {
330        attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
331      } else {
332        attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
333      }
334      attr.branch_sample_type = branch_sample_type;
335    }
336  }
337  return true;
338}
339
340void EventSelectionSet::EnableFpCallChainSampling() {
341  for (auto& group : groups_) {
342    for (auto& selection : group) {
343      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
344    }
345  }
346}
347
348bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
349  if (!IsDwarfCallChainSamplingSupported()) {
350    LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
351    return false;
352  }
353  for (auto& group : groups_) {
354    for (auto& selection : group) {
355      selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
356                                          PERF_SAMPLE_REGS_USER |
357                                          PERF_SAMPLE_STACK_USER;
358      selection.event_attr.exclude_callchain_user = 1;
359      selection.event_attr.sample_regs_user =
360          GetSupportedRegMask(GetMachineArch());
361      selection.event_attr.sample_stack_user = dump_stack_size;
362    }
363  }
364  return true;
365}
366
367void EventSelectionSet::SetInherit(bool enable) {
368  for (auto& group : groups_) {
369    for (auto& selection : group) {
370      selection.event_attr.inherit = (enable ? 1 : 0);
371    }
372  }
373}
374
375bool EventSelectionSet::NeedKernelSymbol() const {
376  for (const auto& group : groups_) {
377    for (const auto& selection : group) {
378      if (!selection.event_type_modifier.exclude_kernel) {
379        return true;
380      }
381    }
382  }
383  return false;
384}
385
386static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
387  std::vector<int> online_cpus = GetOnlineCpus();
388  for (const auto& cpu : cpus) {
389    if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
390        online_cpus.end()) {
391      LOG(ERROR) << "cpu " << cpu << " is not online.";
392      return false;
393    }
394  }
395  return true;
396}
397
398bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
399                                              pid_t tid, int cpu,
400                                              std::string* failed_event_type) {
401  std::vector<std::unique_ptr<EventFd>> event_fds;
402  // Given a tid and cpu, events on the same group should be all opened
403  // successfully or all failed to open.
404  EventFd* group_fd = nullptr;
405  for (auto& selection : group) {
406    std::unique_ptr<EventFd> event_fd =
407        EventFd::OpenEventFile(selection.event_attr, tid, cpu, group_fd, false);
408    if (event_fd != nullptr) {
409      LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
410      event_fds.push_back(std::move(event_fd));
411    } else {
412      if (failed_event_type != nullptr) {
413        *failed_event_type = selection.event_type_modifier.name;
414        return false;
415      }
416    }
417    if (group_fd == nullptr) {
418      group_fd = event_fd.get();
419    }
420  }
421  for (size_t i = 0; i < group.size(); ++i) {
422    group[i].event_fds.push_back(std::move(event_fds[i]));
423  }
424  return true;
425}
426
427static std::map<pid_t, std::set<pid_t>> PrepareThreads(const std::set<pid_t>& processes,
428                                                       const std::set<pid_t>& threads) {
429  std::map<pid_t, std::set<pid_t>> result;
430  for (auto& pid : processes) {
431    std::vector<pid_t> tids = GetThreadsInProcess(pid);
432    std::set<pid_t>& threads_in_process = result[pid];
433    threads_in_process.insert(tids.begin(), tids.end());
434  }
435  for (auto& tid : threads) {
436    // tid = -1 means monitoring all threads.
437    if (tid == -1) {
438      result[-1].insert(-1);
439    } else {
440      pid_t pid;
441      if (GetProcessForThread(tid, &pid)) {
442        result[pid].insert(tid);
443      }
444    }
445  }
446  return result;
447}
448
449bool EventSelectionSet::OpenEventFiles(const std::vector<int>& on_cpus) {
450  std::vector<int> cpus = on_cpus;
451  if (!cpus.empty()) {
452    // cpus = {-1} means open an event file for all cpus.
453    if (!(cpus.size() == 1 && cpus[0] == -1) && !CheckIfCpusOnline(cpus)) {
454      return false;
455    }
456  } else {
457    cpus = GetOnlineCpus();
458  }
459  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
460  for (auto& group : groups_) {
461    if (IsUserSpaceSamplerGroup(group)) {
462      if (!OpenUserSpaceSamplersOnGroup(group, process_map)) {
463        return false;
464      }
465    } else {
466      for (const auto& pair : process_map) {
467        size_t success_count = 0;
468        std::string failed_event_type;
469        for (const auto& tid : pair.second) {
470          for (const auto& cpu : cpus) {
471            if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
472              success_count++;
473            }
474          }
475        }
476        // We can't guarantee to open perf event file successfully for each thread on each cpu.
477        // Because threads may exit between PrepareThreads() and OpenEventFilesOnGroup(), and
478        // cpus may be offlined between GetOnlineCpus() and OpenEventFilesOnGroup().
479        // So we only check that we can at least monitor one thread for each process.
480        if (success_count == 0) {
481          PLOG(ERROR) << "failed to open perf event file for event_type "
482                      << failed_event_type << " for "
483                      << (pair.first == -1 ? "all threads"
484                                           : "threads in process " + std::to_string(pair.first));
485          return false;
486        }
487      }
488    }
489  }
490  return true;
491}
492
493bool EventSelectionSet::IsUserSpaceSamplerGroup(EventSelectionGroup& group) {
494  return group.size() == 1 && group[0].event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS;
495}
496
497bool EventSelectionSet::OpenUserSpaceSamplersOnGroup(EventSelectionGroup& group,
498    const std::map<pid_t, std::set<pid_t>>& process_map) {
499  CHECK_EQ(group.size(), 1u);
500  for (auto& selection : group) {
501    if (selection.event_attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
502        selection.event_attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
503      for (auto& pair : process_map) {
504        std::unique_ptr<InplaceSamplerClient> sampler = InplaceSamplerClient::Create(
505            selection.event_attr, pair.first, pair.second);
506        if (sampler == nullptr) {
507          return false;
508        }
509        selection.inplace_samplers.push_back(std::move(sampler));
510      }
511    }
512  }
513  return true;
514}
515
516static bool ReadCounter(EventFd* event_fd, CounterInfo* counter) {
517  if (!event_fd->ReadCounter(&counter->counter)) {
518    return false;
519  }
520  counter->tid = event_fd->ThreadId();
521  counter->cpu = event_fd->Cpu();
522  return true;
523}
524
525bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
526  counters->clear();
527  for (size_t i = 0; i < groups_.size(); ++i) {
528    for (auto& selection : groups_[i]) {
529      CountersInfo counters_info;
530      counters_info.group_id = i;
531      counters_info.event_name = selection.event_type_modifier.event_type.name;
532      counters_info.event_modifier = selection.event_type_modifier.modifier;
533      counters_info.counters = selection.hotplugged_counters;
534      for (auto& event_fd : selection.event_fds) {
535        CounterInfo counter;
536        if (!ReadCounter(event_fd.get(), &counter)) {
537          return false;
538        }
539        counters_info.counters.push_back(counter);
540      }
541      counters->push_back(counters_info);
542    }
543  }
544  return true;
545}
546
547bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages,
548                                       size_t max_mmap_pages) {
549  for (size_t i = max_mmap_pages; i >= min_mmap_pages; i >>= 1) {
550    if (MmapEventFiles(i, i == min_mmap_pages)) {
551      LOG(VERBOSE) << "Mapped buffer size is " << i << " pages.";
552      mmap_pages_ = i;
553      return true;
554    }
555    for (auto& group : groups_) {
556      for (auto& selection : group) {
557        for (auto& event_fd : selection.event_fds) {
558          event_fd->DestroyMappedBuffer();
559        }
560      }
561    }
562  }
563  return false;
564}
565
566bool EventSelectionSet::MmapEventFiles(size_t mmap_pages, bool report_error) {
567  // Allocate a mapped buffer for each cpu.
568  std::map<int, EventFd*> cpu_map;
569  for (auto& group : groups_) {
570    for (auto& selection : group) {
571      for (auto& event_fd : selection.event_fds) {
572        auto it = cpu_map.find(event_fd->Cpu());
573        if (it != cpu_map.end()) {
574          if (!event_fd->ShareMappedBuffer(*(it->second), report_error)) {
575            return false;
576          }
577        } else {
578          if (!event_fd->CreateMappedBuffer(mmap_pages, report_error)) {
579            return false;
580          }
581          cpu_map[event_fd->Cpu()] = event_fd.get();
582        }
583      }
584    }
585  }
586  return true;
587}
588
589bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
590  // Add read Events for perf event files having mapped buffer.
591  for (auto& group : groups_) {
592    for (auto& selection : group) {
593      for (auto& event_fd : selection.event_fds) {
594        if (event_fd->HasMappedBuffer()) {
595          if (!event_fd->StartPolling(*loop_, [this]() {
596                return ReadMmapEventData();
597              })) {
598            return false;
599          }
600        }
601      }
602      for (auto& sampler : selection.inplace_samplers) {
603        if (!sampler->StartPolling(*loop_, callback,
604                                   [&] { return CheckMonitoredTargets(); })) {
605          return false;
606        }
607      }
608    }
609  }
610
611  // Prepare record callback function.
612  record_callback_ = callback;
613  return true;
614}
615
616// When reading from mmap buffers, we prefer reading from all buffers at once
617// rather than reading one buffer at a time. Because by reading all buffers
618// at once, we can merge records from different buffers easily in memory.
619// Otherwise, we have to sort records with greater effort.
620bool EventSelectionSet::ReadMmapEventData() {
621  size_t head_size = 0;
622  std::vector<RecordBufferHead>& heads = record_buffer_heads_;
623  if (heads.empty()) {
624    heads.resize(1);
625  }
626  heads[0].current_pos = 0;
627  size_t buffer_pos = 0;
628
629  for (auto& group : groups_) {
630    for (auto& selection : group) {
631      for (auto& event_fd : selection.event_fds) {
632        if (event_fd->HasMappedBuffer()) {
633          if (event_fd->GetAvailableMmapData(record_buffer_, buffer_pos) != 0) {
634            heads[head_size].end_pos = buffer_pos;
635            heads[head_size].attr = &selection.event_attr;
636            head_size++;
637            if (heads.size() == head_size) {
638              heads.resize(head_size + 1);
639            }
640            heads[head_size].current_pos = buffer_pos;
641          }
642        }
643      }
644    }
645  }
646
647  if (head_size == 0) {
648    return true;
649  }
650  if (head_size == 1) {
651    // Only one buffer has data, process it directly.
652    std::vector<std::unique_ptr<Record>> records =
653        ReadRecordsFromBuffer(*heads[0].attr,
654                              record_buffer_.data(), buffer_pos);
655    for (auto& r : records) {
656      if (!record_callback_(r.get())) {
657        return false;
658      }
659    }
660  } else {
661    // Use a priority queue to merge records from different buffers. As
662    // records from the same buffer are already ordered by time, we only
663    // need to merge the first record from all buffers. And each time a
664    // record is popped from the queue, we put the next record from its
665    // buffer into the queue.
666    auto comparator = [&](RecordBufferHead* h1, RecordBufferHead* h2) {
667      return h1->timestamp > h2->timestamp;
668    };
669    std::priority_queue<RecordBufferHead*, std::vector<RecordBufferHead*>, decltype(comparator)> q(comparator);
670    for (size_t i = 0; i < head_size; ++i) {
671      RecordBufferHead& h = heads[i];
672      h.r = ReadRecordFromBuffer(*h.attr, &record_buffer_[h.current_pos]);
673      h.timestamp = h.r->Timestamp();
674      h.current_pos += h.r->size();
675      q.push(&h);
676    }
677    while (!q.empty()) {
678      RecordBufferHead* h = q.top();
679      q.pop();
680      if (!record_callback_(h->r.get())) {
681        return false;
682      }
683      if (h->current_pos < h->end_pos) {
684        h->r = ReadRecordFromBuffer(*h->attr, &record_buffer_[h->current_pos]);
685        h->timestamp = h->r->Timestamp();
686        h->current_pos += h->r->size();
687        q.push(h);
688      }
689    }
690  }
691  return true;
692}
693
694bool EventSelectionSet::FinishReadMmapEventData() {
695  if (!ReadMmapEventData()) {
696    return false;
697  }
698  if (!HasInplaceSampler()) {
699    return true;
700  }
701  // Inplace sampler server uses a buffer to cache samples before sending them, so we need to
702  // explicitly ask it to send the cached samples.
703  loop_.reset(new IOEventLoop);
704  size_t inplace_sampler_count = 0;
705  auto close_callback = [&]() {
706    if (--inplace_sampler_count == 0) {
707      return loop_->ExitLoop();
708    }
709    return true;
710  };
711  for (auto& group : groups_) {
712    for (auto& sel : group) {
713      for (auto& sampler : sel.inplace_samplers) {
714        if (!sampler->IsClosed()) {
715          if (!sampler->StopProfiling(*loop_, close_callback)) {
716            return false;
717          }
718          inplace_sampler_count++;
719        }
720      }
721    }
722  }
723  if (inplace_sampler_count == 0) {
724    return true;
725  }
726
727  // Set a timeout to exit the loop.
728  timeval tv;
729  tv.tv_sec = 1;
730  tv.tv_usec = 0;
731  if (!loop_->AddPeriodicEvent(tv, [&]() { return loop_->ExitLoop(); })) {
732    return false;
733  }
734  return loop_->RunLoop();
735}
736
737bool EventSelectionSet::HandleCpuHotplugEvents(const std::vector<int>& monitored_cpus,
738                                               double check_interval_in_sec) {
739  monitored_cpus_.insert(monitored_cpus.begin(), monitored_cpus.end());
740  online_cpus_ = GetOnlineCpus();
741  if (!loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
742                               [&]() { return DetectCpuHotplugEvents(); })) {
743    return false;
744  }
745  return true;
746}
747
748bool EventSelectionSet::DetectCpuHotplugEvents() {
749  std::vector<int> new_cpus = GetOnlineCpus();
750  for (const auto& cpu : online_cpus_) {
751    if (std::find(new_cpus.begin(), new_cpus.end(), cpu) == new_cpus.end()) {
752      if (monitored_cpus_.empty() ||
753          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
754        LOG(INFO) << "Cpu " << cpu << " is offlined";
755        if (!HandleCpuOfflineEvent(cpu)) {
756          return false;
757        }
758      }
759    }
760  }
761  for (const auto& cpu : new_cpus) {
762    if (std::find(online_cpus_.begin(), online_cpus_.end(), cpu) ==
763        online_cpus_.end()) {
764      if (monitored_cpus_.empty() ||
765          monitored_cpus_.find(cpu) != monitored_cpus_.end()) {
766        LOG(INFO) << "Cpu " << cpu << " is onlined";
767        if (!HandleCpuOnlineEvent(cpu)) {
768          return false;
769        }
770      }
771    }
772  }
773  online_cpus_ = new_cpus;
774  return true;
775}
776
777bool EventSelectionSet::HandleCpuOfflineEvent(int cpu) {
778  if (!for_stat_cmd_) {
779    // Read mmap data here, so we won't lose the existing records of the
780    // offlined cpu.
781    if (!ReadMmapEventData()) {
782      return false;
783    }
784  }
785  for (auto& group : groups_) {
786    for (auto& selection : group) {
787      for (auto it = selection.event_fds.begin();
788           it != selection.event_fds.end();) {
789        if ((*it)->Cpu() == cpu) {
790          if (for_stat_cmd_) {
791            CounterInfo counter;
792            if (!ReadCounter(it->get(), &counter)) {
793              return false;
794            }
795            selection.hotplugged_counters.push_back(counter);
796          } else {
797            if ((*it)->HasMappedBuffer()) {
798              if (!(*it)->StopPolling()) {
799                return false;
800              }
801            }
802          }
803          it = selection.event_fds.erase(it);
804        } else {
805          ++it;
806        }
807      }
808    }
809  }
810  return true;
811}
812
813bool EventSelectionSet::HandleCpuOnlineEvent(int cpu) {
814  // We need to start profiling when opening new event files.
815  SetEnableOnExec(false);
816  std::map<pid_t, std::set<pid_t>> process_map = PrepareThreads(processes_, threads_);
817  for (auto& group : groups_) {
818    if (IsUserSpaceSamplerGroup(group)) {
819      continue;
820    }
821    for (const auto& pair : process_map) {
822      for (const auto& tid : pair.second) {
823        std::string failed_event_type;
824        if (!OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
825          // If failed to open event files, maybe the cpu has been offlined.
826          PLOG(WARNING) << "failed to open perf event file for event_type "
827                        << failed_event_type << " for "
828                        << (tid == -1 ? "all threads" : "thread " + std::to_string(tid))
829                        << " on cpu " << cpu;
830        }
831      }
832    }
833  }
834  if (!for_stat_cmd_) {
835    // Prepare mapped buffer.
836    if (!CreateMappedBufferForCpu(cpu)) {
837      return false;
838    }
839    // Send a EventIdRecord.
840    std::vector<uint64_t> event_id_data;
841    uint64_t attr_id = 0;
842    for (const auto& group : groups_) {
843      for (const auto& selection : group) {
844        for (const auto& event_fd : selection.event_fds) {
845          if (event_fd->Cpu() == cpu) {
846            event_id_data.push_back(attr_id);
847            event_id_data.push_back(event_fd->Id());
848          }
849        }
850        ++attr_id;
851      }
852    }
853    EventIdRecord r(event_id_data);
854    if (!record_callback_(&r)) {
855      return false;
856    }
857  }
858  return true;
859}
860
861bool EventSelectionSet::CreateMappedBufferForCpu(int cpu) {
862  EventFd* fd_with_buffer = nullptr;
863  for (auto& group : groups_) {
864    for (auto& selection : group) {
865      for (auto& event_fd : selection.event_fds) {
866        if (event_fd->Cpu() != cpu) {
867          continue;
868        }
869        if (fd_with_buffer == nullptr) {
870          if (!event_fd->CreateMappedBuffer(mmap_pages_, true)) {
871            return false;
872          }
873          fd_with_buffer = event_fd.get();
874        } else {
875          if (!event_fd->ShareMappedBuffer(*fd_with_buffer, true)) {
876            fd_with_buffer->DestroyMappedBuffer();
877            return false;
878          }
879        }
880      }
881    }
882  }
883  if (fd_with_buffer != nullptr &&
884      !fd_with_buffer->StartPolling(*loop_, [this]() {
885        return ReadMmapEventData();
886      })) {
887    return false;
888  }
889  return true;
890}
891
892bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
893  return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
894                                 [&]() { return CheckMonitoredTargets(); });
895}
896
897bool EventSelectionSet::CheckMonitoredTargets() {
898  if (!HasSampler()) {
899    return loop_->ExitLoop();
900  }
901  for (const auto& tid : threads_) {
902    if (IsThreadAlive(tid)) {
903      return true;
904    }
905  }
906  for (const auto& pid : processes_) {
907    if (IsThreadAlive(pid)) {
908      return true;
909    }
910  }
911  return loop_->ExitLoop();
912}
913
914bool EventSelectionSet::HasSampler() {
915  for (auto& group : groups_) {
916    for (auto& sel : group) {
917      if (!sel.event_fds.empty()) {
918        return true;
919      }
920      for (auto& sampler : sel.inplace_samplers) {
921        if (!sampler->IsClosed()) {
922          return true;
923        }
924      }
925    }
926  }
927  return false;
928}
929