1# SPDX-License-Identifier: Apache-2.0
2#
3# Copyright (C) 2015, ARM Limited and contributors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18""" Tasks Analysis Module """
19
20import matplotlib.gridspec as gridspec
21import matplotlib.pyplot as plt
22import numpy as np
23import pylab as pl
24import re
25
26from analysis_module import AnalysisModule
27from devlib.utils.misc import memoized
28from trappy.utils import listify
29
30
31class TasksAnalysis(AnalysisModule):
32    """
33    Support for Tasks signals analysis.
34
35    :param trace: input Trace object
36    :type trace: :mod:`libs.utils.Trace`
37    """
38
39    def __init__(self, trace):
40        super(TasksAnalysis, self).__init__(trace)
41
42
43###############################################################################
44# DataFrame Getter Methods
45###############################################################################
46
47    def _dfg_top_big_tasks(self, min_samples=100, min_utilization=None):
48        """
49        Tasks which had 'utilization' samples bigger than the specified
50        threshold
51
52        :param min_samples: minumum number of samples over the min_utilization
53        :type min_samples: int
54
55        :param min_utilization: minimum utilization used to filter samples
56            default: capacity of a little cluster
57        :type min_utilization: int
58        """
59        if not self._trace.hasEvents('sched_load_avg_task'):
60            self._log.warning('Events [sched_load_avg_task] not found')
61            return None
62
63        if min_utilization is None:
64            min_utilization = self._little_cap
65
66        # Get utilization samples >= min_utilization
67        df = self._dfg_trace_event('sched_load_avg_task')
68        big_tasks_events = df[df.util_avg > min_utilization]
69        if not len(big_tasks_events):
70            self._log.warning('No tasks with with utilization samples > %d',
71                              min_utilization)
72            return None
73
74        # Report the number of tasks which match the min_utilization condition
75        big_tasks = big_tasks_events.pid.unique()
76        self._log.info('%5d tasks with samples of utilization > %d',
77                       len(big_tasks), min_utilization)
78
79        # Compute number of samples above threshold
80        big_tasks_stats = big_tasks_events.groupby('pid')\
81                            .describe(include=['object'])
82        big_tasks_stats = big_tasks_stats.unstack()['comm']\
83                            .sort_values(by=['count'], ascending=False)
84
85        # Filter for number of occurrences
86        big_tasks_stats = big_tasks_stats[big_tasks_stats['count'] > min_samples]
87        if not len(big_tasks_stats):
88            self._log.warning('      but none with more than %d samples',
89                              min_samples)
90            return None
91
92        self._log.info('      %d with more than %d samples',
93                       len(big_tasks_stats), min_samples)
94
95        # Add task name column
96        big_tasks_stats['comm'] = big_tasks_stats.index.map(
97            lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
98
99        # Filter columns of interest
100        big_tasks_stats = big_tasks_stats[['count', 'comm']]
101        big_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
102
103        return big_tasks_stats
104
105    def _dfg_top_wakeup_tasks(self, min_wakeups=100):
106        """
107        Tasks which wakeup more frequently than a specified threshold.
108
109        :param min_wakeups: minimum number of wakeups
110        :type min_wakeups: int
111        """
112        if not self._trace.hasEvents('sched_wakeup'):
113            self._log.warning('Events [sched_wakeup] not found')
114            return None
115
116        df = self._dfg_trace_event('sched_wakeup')
117
118        # Compute number of wakeups above threshold
119        wkp_tasks_stats = df.groupby('pid').describe(include=['object'])
120        wkp_tasks_stats = wkp_tasks_stats.unstack()['comm']\
121                          .sort_values(by=['count'], ascending=False)
122
123        # Filter for number of occurrences
124        wkp_tasks_stats = wkp_tasks_stats[
125            wkp_tasks_stats['count'] > min_wakeups]
126        if not len(df):
127            self._log.warning('No tasks with more than %d wakeups',
128                              len(wkp_tasks_stats))
129            return None
130        self._log.info('%5d tasks with more than %d wakeups',
131                       len(df), len(wkp_tasks_stats))
132
133        # Add task name column
134        wkp_tasks_stats['comm'] = wkp_tasks_stats.index.map(
135            lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
136
137        # Filter columns of interest
138        wkp_tasks_stats = wkp_tasks_stats[['count', 'comm']]
139        wkp_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
140
141        return wkp_tasks_stats
142
143    def _dfg_rt_tasks(self, min_prio=100):
144        """
145        Tasks with RT priority
146
147        NOTE: priorities uses scheduler values, thus: the lower the value the
148              higher is the task priority.
149              RT   Priorities: [  0..100]
150              FAIR Priorities: [101..120]
151
152        :param min_prio: minumum priority
153        :type min_prio: int
154        """
155        if not self._trace.hasEvents('sched_switch'):
156            self._log.warning('Events [sched_switch] not found')
157            return None
158
159        df = self._dfg_trace_event('sched_switch')
160
161        # Filters tasks which have a priority bigger than threshold
162        df = df[df.next_prio <= min_prio]
163
164        # Filter columns of interest
165        rt_tasks = df[['next_pid', 'next_prio']]
166
167        # Remove all duplicateds
168        rt_tasks = rt_tasks.drop_duplicates()
169
170        # Order by priority
171        rt_tasks.sort_values(by=['next_prio', 'next_pid'], ascending=True,
172                             inplace=True)
173        rt_tasks.rename(columns={'next_pid': 'pid', 'next_prio': 'prio'},
174                        inplace=True)
175
176        # Set PID as index
177        rt_tasks.set_index('pid', inplace=True)
178
179        # Add task name column
180        rt_tasks['comm'] = rt_tasks.index.map(
181            lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
182
183        return rt_tasks
184
185
186###############################################################################
187# Plotting Methods
188###############################################################################
189
190    def plotTasks(self, tasks=None, signals=None):
191        """
192        Generate a common set of useful plots for each of the specified tasks
193
194        This method allows to filter which signals should be plot, if data are
195        available in the input trace. The list of signals supported are:
196        Tasks signals plot:
197                load_avg, util_avg, boosted_util, sched_overutilized
198        Tasks residencies on CPUs:
199                residencies, sched_overutilized
200        Tasks PELT signals:
201                load_sum, util_sum, period_contrib, sched_overutilized
202
203        At least one of the previous signals must be specified to get a valid
204        plot.
205
206        Addidional custom signals can be specified and they will be represented
207        in the "Task signals plots" if they represent valid keys of the task
208        load/utilization trace event (e.g. sched_load_avg_task).
209
210        Note:
211            sched_overutilized: enable the plotting of overutilization bands on
212                                top of each subplot
213            residencies: enable the generation of the CPUs residencies plot
214
215        :param tasks: the list of task names and/or PIDs to plot.
216                      Numerical PIDs and string task names can be mixed
217                      in the same list.
218                      default: all tasks defined in Trace
219                      creation time are plotted
220        :type tasks: list(str) or list(int)
221
222        :param signals: list of signals (and thus plots) to generate
223                        default: all the plots and signals available in the
224                        current trace
225        :type signals: list(str)
226        """
227        if not signals:
228            signals = ['load_avg', 'util_avg', 'boosted_util',
229                       'sched_overutilized',
230                       'load_sum', 'util_sum', 'period_contrib',
231                       'residencies']
232
233        # Check for the minimum required signals to be available
234        if not self._trace.hasEvents('sched_load_avg_task'):
235            self._log.warning('Events [sched_load_avg_task] not found, '
236                              'plot DISABLED!')
237            return
238
239        # Defined list of tasks to plot
240        if tasks and \
241            not isinstance(tasks, str) and \
242            not isinstance(tasks, list):
243            raise ValueError('Wrong format for tasks parameter')
244
245        if tasks:
246            tasks_to_plot = listify(tasks)
247        elif self._tasks:
248            tasks_to_plot = sorted(self._tasks)
249        else:
250            raise ValueError('No tasks to plot specified')
251
252        # Compute number of plots to produce
253        plots_count = 0
254        plots_signals = [
255                # Fist plot: task's utilization
256                {'load_avg', 'util_avg', 'boosted_util'},
257                # Second plot: task residency
258                {'residencies'},
259                # Third plot: tasks's load
260                {'load_sum', 'util_sum', 'period_contrib'}
261        ]
262        hr = []
263        ysize = 0
264        for plot_id, signals_to_plot in enumerate(plots_signals):
265            signals_to_plot = signals_to_plot.intersection(signals)
266            if len(signals_to_plot):
267                plots_count = plots_count + 1
268                # Use bigger size only for the first plot
269                hr.append(3 if plot_id == 0 else 1)
270                ysize = ysize + (8 if plot_id else 4)
271
272        # Grid
273        gs = gridspec.GridSpec(plots_count, 1, height_ratios=hr)
274        gs.update(wspace=0.1, hspace=0.1)
275
276        # Build list of all PIDs for each task_name to plot
277        pids_to_plot = []
278        for task in tasks_to_plot:
279            # Add specified PIDs to the list
280            if isinstance(task, int):
281                pids_to_plot.append(task)
282                continue
283            # Otherwise: add all the PIDs for task with the specified name
284            pids_to_plot.extend(self._trace.getTaskByName(task))
285
286        for tid in pids_to_plot:
287            savefig = False
288
289            task_name = self._trace.getTaskByPid(tid)
290            if len(task_name) == 1:
291                task_name = task_name[0]
292                self._log.info('Plotting %5d: %s...', tid, task_name)
293            else:
294                self._log.info('Plotting %5d: %s...', tid, ', '.join(task_name))
295            plot_id = 0
296
297            # For each task create a figure with plots_count plots
298            plt.figure(figsize=(16, ysize))
299            plt.suptitle('Task Signals',
300                         y=.94, fontsize=16, horizontalalignment='center')
301
302            # Plot load and utilization
303            signals_to_plot = {'load_avg', 'util_avg', 'boosted_util'}
304            signals_to_plot = list(signals_to_plot.intersection(signals))
305            if len(signals_to_plot) > 0:
306                axes = plt.subplot(gs[plot_id, 0])
307                axes.set_title('Task [{0:d}:{1:s}] Signals'
308                               .format(tid, task_name))
309                plot_id = plot_id + 1
310                is_last = (plot_id == plots_count)
311                self._plotTaskSignals(axes, tid, signals, is_last)
312                savefig = True
313
314            # Plot CPUs residency
315            signals_to_plot = {'residencies'}
316            signals_to_plot = list(signals_to_plot.intersection(signals))
317            if len(signals_to_plot) > 0:
318                axes = plt.subplot(gs[plot_id, 0])
319                axes.set_title(
320                    'Task [{0:d}:{1:s}] Residency (green: LITTLE, red: big)'
321                    .format(tid, task_name)
322                )
323                plot_id = plot_id + 1
324                is_last = (plot_id == plots_count)
325                if 'sched_overutilized' in signals:
326                    signals_to_plot.append('sched_overutilized')
327                self._plotTaskResidencies(axes, tid, signals_to_plot, is_last)
328                savefig = True
329
330            # Plot PELT signals
331            signals_to_plot = {'load_sum', 'util_sum', 'period_contrib'}
332            signals_to_plot = list(signals_to_plot.intersection(signals))
333            if len(signals_to_plot) > 0:
334                axes = plt.subplot(gs[plot_id, 0])
335                axes.set_title('Task [{0:d}:{1:s}] PELT Signals'
336                               .format(tid, task_name))
337                plot_id = plot_id + 1
338                if 'sched_overutilized' in signals:
339                    signals_to_plot.append('sched_overutilized')
340                self._plotTaskPelt(axes, tid, signals_to_plot)
341                savefig = True
342
343            if not savefig:
344                self._log.warning('Nothing to plot for %s', task_name)
345                continue
346
347            # Save generated plots into datadir
348            if isinstance(task_name, list):
349                task_name = re.sub('[:/]', '_', task_name[0])
350            else:
351                task_name = re.sub('[:/]', '_', task_name)
352            figname = '{}/{}task_util_{}_{}.png'\
353                      .format(self._trace.plots_dir, self._trace.plots_prefix,
354                              tid, task_name)
355            pl.savefig(figname, bbox_inches='tight')
356
357    def plotBigTasks(self, max_tasks=10, min_samples=100,
358                     min_utilization=None):
359        """
360        For each big task plot utilization and show the smallest cluster
361        capacity suitable for accommodating task utilization.
362
363        :param max_tasks: maximum number of tasks to consider
364        :type max_tasks: int
365
366        :param min_samples: minumum number of samples over the min_utilization
367        :type min_samples: int
368
369        :param min_utilization: minimum utilization used to filter samples
370            default: capacity of a little cluster
371        :type min_utilization: int
372        """
373
374        # Get PID of big tasks
375        big_frequent_task_df = self._dfg_top_big_tasks(
376            min_samples, min_utilization)
377        if max_tasks > 0:
378            big_frequent_task_df = big_frequent_task_df.head(max_tasks)
379        big_frequent_task_pids = big_frequent_task_df.index.values
380
381        big_frequent_tasks_count = len(big_frequent_task_pids)
382        if big_frequent_tasks_count == 0:
383            self._log.warning('No big/frequent tasks to plot')
384            return
385
386        # Get the list of events for all big frequent tasks
387        df = self._dfg_trace_event('sched_load_avg_task')
388        big_frequent_tasks_events = df[df.pid.isin(big_frequent_task_pids)]
389
390        # Define axes for side-by-side plottings
391        fig, axes = plt.subplots(big_frequent_tasks_count, 1,
392                                 figsize=(16, big_frequent_tasks_count*4))
393        plt.subplots_adjust(wspace=0.1, hspace=0.2)
394
395        plot_idx = 0
396        for pid, group in big_frequent_tasks_events.groupby('pid'):
397
398            # # Build task names (there could be multiple, during the task lifetime)
399            task_name = 'PID: {} | {}'.format(
400                pid, ' | '.join(self._trace.getTaskByPid(pid)))
401
402            # Plot title
403            if big_frequent_tasks_count == 1:
404                ax = axes
405            else:
406                ax = axes[plot_idx]
407            ax.set_title(task_name)
408
409            # Left axis: utilization
410            ax = group.plot(y=['util_avg', 'min_cluster_cap'],
411                            style=['r.', '-b'],
412                            drawstyle='steps-post',
413                            linewidth=1,
414                            ax=ax)
415            ax.set_xlim(self._trace.x_min, self._trace.x_max)
416            ax.set_ylim(0, 1100)
417            ax.set_ylabel('util_avg')
418            ax.set_xlabel('')
419            ax.grid(True)
420            self._trace.analysis.status.plotOverutilized(ax)
421
422            plot_idx += 1
423
424        ax.set_xlabel('Time [s]')
425
426        self._log.info('Tasks which have been a "utilization" of %d for at least %d samples',
427                       self._little_cap, min_samples)
428
429    def plotWakeupTasks(self, max_tasks=10, min_wakeups=0, per_cluster=False):
430        """
431        Show waking up tasks over time and newly forked tasks in two separate
432        plots.
433
434        :param max_tasks: maximum number of tasks to consider
435        :param max_tasks: int
436
437        :param min_wakeups: minimum number of wakeups of each task
438        :type min_wakeups: int
439
440        :param per_cluster: if True get per-cluster wakeup events
441        :type per_cluster: bool
442        """
443        if per_cluster is True and \
444           not self._trace.hasEvents('sched_wakeup_new'):
445            self._log.warning('Events [sched_wakeup_new] not found, '
446                              'plots DISABLED!')
447            return
448        elif  not self._trace.hasEvents('sched_wakeup') and \
449              not self._trace.hasEvents('sched_wakeup_new'):
450            self._log.warning('Events [sched_wakeup, sched_wakeup_new] not found, '
451                              'plots DISABLED!')
452            return
453
454        # Define axes for side-by-side plottings
455        fig, axes = plt.subplots(2, 1, figsize=(14, 5))
456        plt.subplots_adjust(wspace=0.2, hspace=0.3)
457
458        if per_cluster:
459
460            # Get per cluster wakeup events
461            df = self._dfg_trace_event('sched_wakeup_new')
462            big_frequent = (
463                    (df.target_cpu.isin(self._big_cpus))
464                    )
465            ntbc = df[big_frequent]
466            ntbc_count = len(ntbc)
467            little_frequent = (
468                    (df.target_cpu.isin(self._little_cpus))
469                    )
470            ntlc = df[little_frequent];
471            ntlc_count = len(ntlc)
472
473            self._log.info('%5d tasks forked on big cluster    (%3.1f %%)',
474                           ntbc_count,
475                           100. * ntbc_count / (ntbc_count + ntlc_count))
476            self._log.info('%5d tasks forked on LITTLE cluster (%3.1f %%)',
477                           ntlc_count,
478                           100. * ntlc_count / (ntbc_count + ntlc_count))
479
480            ax = axes[0]
481            ax.set_title('Tasks Forks on big CPUs');
482            ntbc.pid.plot(style=['g.'], ax=ax);
483            ax.set_xlim(self._trace.x_min, self._trace.x_max);
484            ax.set_xticklabels([])
485            ax.set_xlabel('')
486            ax.grid(True)
487            self._trace.analysis.status.plotOverutilized(ax)
488
489            ax = axes[1]
490            ax.set_title('Tasks Forks on LITTLE CPUs');
491            ntlc.pid.plot(style=['g.'], ax=ax);
492            ax.set_xlim(self._trace.x_min, self._trace.x_max);
493            ax.grid(True)
494            self._trace.analysis.status.plotOverutilized(ax)
495
496            return
497
498        # Keep events of defined big tasks
499        wkp_task_pids = self._dfg_top_wakeup_tasks(min_wakeups)
500        if len(wkp_task_pids):
501            wkp_task_pids = wkp_task_pids.index.values[:max_tasks]
502            self._log.info('Plotting %d frequent wakeup tasks',
503                           len(wkp_task_pids))
504
505        ax = axes[0]
506        ax.set_title('Tasks WakeUps Events')
507        df = self._dfg_trace_event('sched_wakeup')
508        if len(df):
509            df = df[df.pid.isin(wkp_task_pids)]
510            df.pid.astype(int).plot(style=['b.'], ax=ax)
511            ax.set_xlim(self._trace.x_min, self._trace.x_max)
512            ax.set_xticklabels([])
513            ax.set_xlabel('')
514            ax.grid(True)
515            self._trace.analysis.status.plotOverutilized(ax)
516
517        ax = axes[1]
518        ax.set_title('Tasks Forks Events')
519        df = self._dfg_trace_event('sched_wakeup_new')
520        if len(df):
521            df = df[df.pid.isin(wkp_task_pids)]
522            df.pid.astype(int).plot(style=['r.'], ax=ax)
523            ax.set_xlim(self._trace.x_min, self._trace.x_max)
524            ax.grid(True)
525            self._trace.analysis.status.plotOverutilized(ax)
526
527    def plotBigTasksVsCapacity(self, min_samples=1,
528                               min_utilization=None, big_cluster=True):
529        """
530        Draw a plot that shows whether tasks are placed on the correct cluster
531        based on their utilization and cluster capacity. Green dots mean the
532        task was placed on the correct cluster, Red means placement was wrong
533
534        :param min_samples: minumum number of samples over the min_utilization
535        :type min_samples: int
536
537        :param min_utilization: minimum utilization used to filter samples
538            default: capacity of a little cluster
539        :type min_utilization: int
540
541        :param big_cluster:
542        :type big_cluster: bool
543        """
544
545        if not self._trace.hasEvents('sched_load_avg_task'):
546            self._log.warning('Events [sched_load_avg_task] not found')
547            return
548        if not self._trace.hasEvents('cpu_frequency'):
549            self._log.warning('Events [cpu_frequency] not found')
550            return
551
552        if big_cluster:
553            cluster_correct = 'big'
554            cpus = self._big_cpus
555        else:
556            cluster_correct = 'LITTLE'
557            cpus = self._little_cpus
558
559        # Get all utilization update events
560        df = self._dfg_trace_event('sched_load_avg_task')
561
562        # Keep events of defined big tasks
563        big_task_pids = self._dfg_top_big_tasks(
564            min_samples, min_utilization)
565        if big_task_pids is not None:
566            big_task_pids = big_task_pids.index.values
567            df = df[df.pid.isin(big_task_pids)]
568        if not df.size:
569            self._log.warning('No events for tasks with more then %d utilization '
570                              'samples bigger than %d, plots DISABLED!')
571            return
572
573        fig, axes = plt.subplots(2, 1, figsize=(14, 5))
574        plt.subplots_adjust(wspace=0.2, hspace=0.3)
575
576        # Add column of expected cluster depending on:
577        # a) task utilization value
578        # b) capacity of the selected cluster
579        bu_bc = ( \
580                (df['util_avg'] > self._little_cap) & \
581                (df['cpu'].isin(self._big_cpus))
582            )
583        su_lc = ( \
584                (df['util_avg'] <= self._little_cap) & \
585                (df['cpu'].isin(self._little_cpus))
586            )
587        # The Cluster CAPacity Matches the UTILization (ccap_mutil) iff:
588        # - tasks with util_avg  > little_cap are running on a BIG cpu
589        # - tasks with util_avg <= little_cap are running on a LITTLe cpu
590        df.loc[:,'ccap_mutil'] = np.select([(bu_bc | su_lc)], [True], False)
591
592        df_freq = self._dfg_trace_event('cpu_frequency')
593        df_freq = df_freq[df_freq.cpu == cpus[0]]
594
595        ax = axes[0]
596        ax.set_title('Tasks Utilization vs Allocation')
597        for ucolor, umatch in zip('gr', [True, False]):
598            cdata = df[df['ccap_mutil'] == umatch]
599            if len(cdata) > 0:
600                cdata['util_avg'].plot(ax=ax,
601                        style=[ucolor+'.'], legend=False)
602        ax.set_xlim(self._trace.x_min, self._trace.x_max)
603        ax.set_xticklabels([])
604        ax.set_xlabel('')
605        ax.grid(True)
606        self._trace.analysis.status.plotOverutilized(ax)
607
608        ax = axes[1]
609        ax.set_title('Frequencies on "{}" cluster'.format(cluster_correct))
610        df_freq['frequency'].plot(style=['-b'], ax=ax, drawstyle='steps-post')
611        ax.set_xlim(self._trace.x_min, self._trace.x_max);
612        ax.grid(True)
613        self._trace.analysis.status.plotOverutilized(ax)
614
615        legend_y = axes[0].get_ylim()[1]
616        axes[0].annotate('Utilization-Capacity Matches',
617                         xy=(0, legend_y),
618                         xytext=(-50, 45), textcoords='offset points',
619                         fontsize=18)
620        axes[0].annotate('Task schduled (green) or not (red) on min cluster',
621                         xy=(0, legend_y),
622                         xytext=(-50, 25), textcoords='offset points',
623                         fontsize=14)
624
625
626###############################################################################
627# Utility Methods
628###############################################################################
629
630    def _plotTaskSignals(self, axes, tid, signals, is_last=False):
631        """
632        For task with ID `tid` plot the specified signals.
633
634        :param axes: axes over which to generate the plot
635        :type axes: :mod:`matplotlib.axes.Axes`
636
637        :param tid: task ID
638        :type tid: int
639
640        :param signals: signals to be plot
641        :param signals: list(str)
642
643        :param is_last: if True this is the last plot
644        :type is_last: bool
645        """
646        # Get dataframe for the required task
647        util_df = self._dfg_trace_event('sched_load_avg_task')
648
649        # Plot load and util
650        signals_to_plot = set(signals).difference({'boosted_util'})
651        for signal in signals_to_plot:
652            if signal not in util_df.columns:
653                continue
654            data = util_df[util_df.pid == tid][signal]
655            data.plot(ax=axes, drawstyle='steps-post', legend=True)
656
657        # Plot boost utilization if available
658        if 'boosted_util' in signals and \
659           self._trace.hasEvents('sched_boost_task'):
660            boost_df = self._dfg_trace_event('sched_boost_task')
661            data = boost_df[boost_df.pid == tid][['boosted_util']]
662            if len(data):
663                data.plot(ax=axes, style=['y-'], drawstyle='steps-post')
664            else:
665                task_name = self._trace.getTaskByPid(tid)
666                self._log.warning('No "boosted_util" data for task [%d:%s]',
667                                  tid, task_name)
668
669        # Add Capacities data if avilable
670        if 'nrg_model' in self._platform:
671            nrg_model = self._platform['nrg_model']
672            max_lcap = nrg_model['little']['cpu']['cap_max']
673            max_bcap = nrg_model['big']['cpu']['cap_max']
674            tip_lcap = 0.8 * max_lcap
675            tip_bcap = 0.8 * max_bcap
676            self._log.debug(
677                'LITTLE capacity tip/max: %d/%d, big capacity tip/max: %d/%d',
678                tip_lcap, max_lcap, tip_bcap, max_bcap
679            )
680            axes.axhline(tip_lcap, color='y', linestyle=':', linewidth=2)
681            axes.axhline(max_lcap, color='y', linestyle='--', linewidth=2)
682            axes.axhline(tip_bcap, color='r', linestyle=':', linewidth=2)
683            axes.axhline(max_bcap, color='r', linestyle='--', linewidth=2)
684
685        axes.set_ylim(0, 1100)
686        axes.set_xlim(self._trace.x_min, self._trace.x_max)
687        axes.grid(True)
688        if not is_last:
689            axes.set_xticklabels([])
690            axes.set_xlabel('')
691        if 'sched_overutilized' in signals:
692            self._trace.analysis.status.plotOverutilized(axes)
693
694    def _plotTaskResidencies(self, axes, tid, signals, is_last=False):
695        """
696        For task with ID `tid` plot residency information.
697
698        :param axes: axes over which to generate the plot
699        :type axes: :mod:`matplotlib.axes.Axes`
700
701        :param tid: task ID
702        :type tid: int
703
704        :param signals: signals to be plot
705        :param signals: list(str)
706
707        :param is_last: if True this is the last plot
708        :type is_last: bool
709        """
710        util_df = self._dfg_trace_event('sched_load_avg_task')
711        data = util_df[util_df.pid == tid][['cluster', 'cpu']]
712        for ccolor, clabel in zip('gr', ['LITTLE', 'big']):
713            cdata = data[data.cluster == clabel]
714            if len(cdata) > 0:
715                cdata.plot(ax=axes, style=[ccolor+'+'], legend=False)
716        # Y Axis - placeholders for legend, acutal CPUs. topmost empty lane
717        cpus = [str(n) for n in range(self._platform['cpus_count'])]
718        ylabels = [''] + cpus
719        axes.set_yticklabels(ylabels)
720        axes.set_ylim(-1, self._platform['cpus_count'])
721        axes.set_ylabel('CPUs')
722        # X Axis
723        axes.set_xlim(self._trace.x_min, self._trace.x_max)
724
725        axes.grid(True)
726        if not is_last:
727            axes.set_xticklabels([])
728            axes.set_xlabel('')
729        if 'sched_overutilized' in signals:
730            self._trace.analysis.status.plotOverutilized(axes)
731
732    def _plotTaskPelt(self, axes, tid, signals):
733        """
734        For task with ID `tid` plot PELT-related signals.
735
736        :param axes: axes over which to generate the plot
737        :type axes: :mod:`matplotlib.axes.Axes`
738
739        :param tid: task ID
740        :type tid: int
741
742        :param signals: signals to be plot
743        :param signals: list(str)
744        """
745        util_df = self._dfg_trace_event('sched_load_avg_task')
746        data = util_df[util_df.pid == tid][['load_sum',
747                                            'util_sum',
748                                            'period_contrib']]
749        data.plot(ax=axes, drawstyle='steps-post')
750        axes.set_xlim(self._trace.x_min, self._trace.x_max)
751        axes.ticklabel_format(style='scientific', scilimits=(0, 0),
752                              axis='y', useOffset=False)
753        axes.grid(True)
754        if 'sched_overutilized' in signals:
755            self._trace.analysis.status.plotOverutilized(axes)
756
757# vim :set tabstop=4 shiftwidth=4 expandtab
758