builtin-lock.c revision e6817ec1d8ab31fc7b01906e305f848542df6413
1#include "builtin.h"
2#include "perf.h"
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
11#include "util/trace-event.h"
12
13#include "util/debug.h"
14#include "util/session.h"
15
16#include <sys/types.h>
17#include <sys/prctl.h>
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
21#include <limits.h>
22
23/* ANDROID_CHANGE_BEGIN */
24#if 0
25#include <linux/list.h>
26#include <linux/hash.h>
27#else
28#include "util/include/linux/list.h"
29#include "util/include/linux/hash.h"
30#endif
31/* ANDROID_CHANGE_END */
32
33static struct perf_session *session;
34
35/* based on kernel/lockdep.c */
36#define LOCKHASH_BITS		12
37#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
38
39static struct list_head lockhash_table[LOCKHASH_SIZE];
40
41#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
42#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
43
44struct lock_stat {
45	struct list_head	hash_entry;
46	struct rb_node		rb;		/* used for sorting */
47
48	/*
49	 * FIXME: raw_field_value() returns unsigned long long,
50	 * so address of lockdep_map should be dealed as 64bit.
51	 * Is there more better solution?
52	 */
53	void			*addr;		/* address of lockdep_map, used as ID */
54	char			*name;		/* for strcpy(), we cannot use const */
55
56	unsigned int		nr_acquire;
57	unsigned int		nr_acquired;
58	unsigned int		nr_contended;
59	unsigned int		nr_release;
60
61	unsigned int		nr_readlock;
62	unsigned int		nr_trylock;
63	/* these times are in nano sec. */
64	u64			wait_time_total;
65	u64			wait_time_min;
66	u64			wait_time_max;
67
68	int			discard; /* flag of blacklist */
69};
70
71/*
72 * States of lock_seq_stat
73 *
74 * UNINITIALIZED is required for detecting first event of acquire.
75 * As the nature of lock events, there is no guarantee
76 * that the first event for the locks are acquire,
77 * it can be acquired, contended or release.
78 */
79#define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
80#define SEQ_STATE_RELEASED	1
81#define SEQ_STATE_ACQUIRING	2
82#define SEQ_STATE_ACQUIRED	3
83#define SEQ_STATE_READ_ACQUIRED	4
84#define SEQ_STATE_CONTENDED	5
85
86/*
87 * MAX_LOCK_DEPTH
88 * Imported from include/linux/sched.h.
89 * Should this be synchronized?
90 */
91#define MAX_LOCK_DEPTH 48
92
93/*
94 * struct lock_seq_stat:
95 * Place to put on state of one lock sequence
96 * 1) acquire -> acquired -> release
97 * 2) acquire -> contended -> acquired -> release
98 * 3) acquire (with read or try) -> release
99 * 4) Are there other patterns?
100 */
101struct lock_seq_stat {
102	struct list_head        list;
103	int			state;
104	u64			prev_event_time;
105	void                    *addr;
106
107	int                     read_count;
108};
109
110struct thread_stat {
111	struct rb_node		rb;
112
113	u32                     tid;
114	struct list_head        seq_list;
115};
116
117static struct rb_root		thread_stats;
118
119static struct thread_stat *thread_stat_find(u32 tid)
120{
121	struct rb_node *node;
122	struct thread_stat *st;
123
124	node = thread_stats.rb_node;
125	while (node) {
126		st = container_of(node, struct thread_stat, rb);
127		if (st->tid == tid)
128			return st;
129		else if (tid < st->tid)
130			node = node->rb_left;
131		else
132			node = node->rb_right;
133	}
134
135	return NULL;
136}
137
138static void thread_stat_insert(struct thread_stat *new)
139{
140	struct rb_node **rb = &thread_stats.rb_node;
141	struct rb_node *parent = NULL;
142	struct thread_stat *p;
143
144	while (*rb) {
145		p = container_of(*rb, struct thread_stat, rb);
146		parent = *rb;
147
148		if (new->tid < p->tid)
149			rb = &(*rb)->rb_left;
150		else if (new->tid > p->tid)
151			rb = &(*rb)->rb_right;
152		else
153			BUG_ON("inserting invalid thread_stat\n");
154	}
155
156	rb_link_node(&new->rb, parent, rb);
157	rb_insert_color(&new->rb, &thread_stats);
158}
159
160static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
161{
162	struct thread_stat *st;
163
164	st = thread_stat_find(tid);
165	if (st)
166		return st;
167
168	st = zalloc(sizeof(struct thread_stat));
169	if (!st)
170		die("memory allocation failed\n");
171
172	st->tid = tid;
173	INIT_LIST_HEAD(&st->seq_list);
174
175	thread_stat_insert(st);
176
177	return st;
178}
179
180static struct thread_stat *thread_stat_findnew_first(u32 tid);
181static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
182	thread_stat_findnew_first;
183
184static struct thread_stat *thread_stat_findnew_first(u32 tid)
185{
186	struct thread_stat *st;
187
188	st = zalloc(sizeof(struct thread_stat));
189	if (!st)
190		die("memory allocation failed\n");
191	st->tid = tid;
192	INIT_LIST_HEAD(&st->seq_list);
193
194	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
195	rb_insert_color(&st->rb, &thread_stats);
196
197	thread_stat_findnew = thread_stat_findnew_after_first;
198	return st;
199}
200
201/* build simple key function one is bigger than two */
202#define SINGLE_KEY(member)						\
203	static int lock_stat_key_ ## member(struct lock_stat *one,	\
204					 struct lock_stat *two)		\
205	{								\
206		return one->member > two->member;			\
207	}
208
209SINGLE_KEY(nr_acquired)
210SINGLE_KEY(nr_contended)
211SINGLE_KEY(wait_time_total)
212SINGLE_KEY(wait_time_max)
213
214static int lock_stat_key_wait_time_min(struct lock_stat *one,
215					struct lock_stat *two)
216{
217	u64 s1 = one->wait_time_min;
218	u64 s2 = two->wait_time_min;
219	if (s1 == ULLONG_MAX)
220		s1 = 0;
221	if (s2 == ULLONG_MAX)
222		s2 = 0;
223	return s1 > s2;
224}
225
226struct lock_key {
227	/*
228	 * name: the value for specify by user
229	 * this should be simpler than raw name of member
230	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
231	 */
232	const char		*name;
233	int			(*key)(struct lock_stat*, struct lock_stat*);
234};
235
236static const char		*sort_key = "acquired";
237
238static int			(*compare)(struct lock_stat *, struct lock_stat *);
239
240static struct rb_root		result;	/* place to store sorted data */
241
242#define DEF_KEY_LOCK(name, fn_suffix)	\
243	{ #name, lock_stat_key_ ## fn_suffix }
244struct lock_key keys[] = {
245	DEF_KEY_LOCK(acquired, nr_acquired),
246	DEF_KEY_LOCK(contended, nr_contended),
247	DEF_KEY_LOCK(wait_total, wait_time_total),
248	DEF_KEY_LOCK(wait_min, wait_time_min),
249	DEF_KEY_LOCK(wait_max, wait_time_max),
250
251	/* extra comparisons much complicated should be here */
252
253	{ NULL, NULL }
254};
255
256static void select_key(void)
257{
258	int i;
259
260	for (i = 0; keys[i].name; i++) {
261		if (!strcmp(keys[i].name, sort_key)) {
262			compare = keys[i].key;
263			return;
264		}
265	}
266
267	die("Unknown compare key:%s\n", sort_key);
268}
269
270static void insert_to_result(struct lock_stat *st,
271			     int (*bigger)(struct lock_stat *, struct lock_stat *))
272{
273	struct rb_node **rb = &result.rb_node;
274	struct rb_node *parent = NULL;
275	struct lock_stat *p;
276
277	while (*rb) {
278		p = container_of(*rb, struct lock_stat, rb);
279		parent = *rb;
280
281		if (bigger(st, p))
282			rb = &(*rb)->rb_left;
283		else
284			rb = &(*rb)->rb_right;
285	}
286
287	rb_link_node(&st->rb, parent, rb);
288	rb_insert_color(&st->rb, &result);
289}
290
291/* returns left most element of result, and erase it */
292static struct lock_stat *pop_from_result(void)
293{
294	struct rb_node *node = result.rb_node;
295
296	if (!node)
297		return NULL;
298
299	while (node->rb_left)
300		node = node->rb_left;
301
302	rb_erase(node, &result);
303	return container_of(node, struct lock_stat, rb);
304}
305
306static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
307{
308	struct list_head *entry = lockhashentry(addr);
309	struct lock_stat *ret, *new;
310
311	list_for_each_entry(ret, entry, hash_entry) {
312		if (ret->addr == addr)
313			return ret;
314	}
315
316	new = zalloc(sizeof(struct lock_stat));
317	if (!new)
318		goto alloc_failed;
319
320	new->addr = addr;
321	new->name = zalloc(sizeof(char) * strlen(name) + 1);
322	if (!new->name)
323		goto alloc_failed;
324	strcpy(new->name, name);
325
326	new->wait_time_min = ULLONG_MAX;
327
328	list_add(&new->hash_entry, entry);
329	return new;
330
331alloc_failed:
332	die("memory allocation failed\n");
333}
334
335static char			const *input_name = "perf.data";
336
337struct raw_event_sample {
338	u32			size;
339	char			data[0];
340};
341
342struct trace_acquire_event {
343	void			*addr;
344	const char		*name;
345	int			flag;
346};
347
348struct trace_acquired_event {
349	void			*addr;
350	const char		*name;
351};
352
353struct trace_contended_event {
354	void			*addr;
355	const char		*name;
356};
357
358struct trace_release_event {
359	void			*addr;
360	const char		*name;
361};
362
363struct trace_lock_handler {
364	void (*acquire_event)(struct trace_acquire_event *,
365			      struct event *,
366			      int cpu,
367			      u64 timestamp,
368			      struct thread *thread);
369
370	void (*acquired_event)(struct trace_acquired_event *,
371			       struct event *,
372			       int cpu,
373			       u64 timestamp,
374			       struct thread *thread);
375
376	void (*contended_event)(struct trace_contended_event *,
377				struct event *,
378				int cpu,
379				u64 timestamp,
380				struct thread *thread);
381
382	void (*release_event)(struct trace_release_event *,
383			      struct event *,
384			      int cpu,
385			      u64 timestamp,
386			      struct thread *thread);
387};
388
389static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
390{
391	struct lock_seq_stat *seq;
392
393	list_for_each_entry(seq, &ts->seq_list, list) {
394		if (seq->addr == addr)
395			return seq;
396	}
397
398	seq = zalloc(sizeof(struct lock_seq_stat));
399	if (!seq)
400		die("Not enough memory\n");
401	seq->state = SEQ_STATE_UNINITIALIZED;
402	seq->addr = addr;
403
404	list_add(&seq->list, &ts->seq_list);
405	return seq;
406}
407
408enum broken_state {
409	BROKEN_ACQUIRE,
410	BROKEN_ACQUIRED,
411	BROKEN_CONTENDED,
412	BROKEN_RELEASE,
413	BROKEN_MAX,
414};
415
416static int bad_hist[BROKEN_MAX];
417
418enum acquire_flags {
419	TRY_LOCK = 1,
420	READ_LOCK = 2,
421};
422
423static void
424report_lock_acquire_event(struct trace_acquire_event *acquire_event,
425			struct event *__event __used,
426			int cpu __used,
427			u64 timestamp __used,
428			struct thread *thread __used)
429{
430	struct lock_stat *ls;
431	struct thread_stat *ts;
432	struct lock_seq_stat *seq;
433
434	ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
435	if (ls->discard)
436		return;
437
438	ts = thread_stat_findnew(thread->pid);
439	seq = get_seq(ts, acquire_event->addr);
440
441	switch (seq->state) {
442	case SEQ_STATE_UNINITIALIZED:
443	case SEQ_STATE_RELEASED:
444		if (!acquire_event->flag) {
445			seq->state = SEQ_STATE_ACQUIRING;
446		} else {
447			if (acquire_event->flag & TRY_LOCK)
448				ls->nr_trylock++;
449			if (acquire_event->flag & READ_LOCK)
450				ls->nr_readlock++;
451			seq->state = SEQ_STATE_READ_ACQUIRED;
452			seq->read_count = 1;
453			ls->nr_acquired++;
454		}
455		break;
456	case SEQ_STATE_READ_ACQUIRED:
457		if (acquire_event->flag & READ_LOCK) {
458			seq->read_count++;
459			ls->nr_acquired++;
460			goto end;
461		} else {
462			goto broken;
463		}
464		break;
465	case SEQ_STATE_ACQUIRED:
466	case SEQ_STATE_ACQUIRING:
467	case SEQ_STATE_CONTENDED:
468broken:
469		/* broken lock sequence, discard it */
470		ls->discard = 1;
471		bad_hist[BROKEN_ACQUIRE]++;
472		list_del(&seq->list);
473		free(seq);
474		goto end;
475		break;
476	default:
477		BUG_ON("Unknown state of lock sequence found!\n");
478		break;
479	}
480
481	ls->nr_acquire++;
482	seq->prev_event_time = timestamp;
483end:
484	return;
485}
486
487static void
488report_lock_acquired_event(struct trace_acquired_event *acquired_event,
489			 struct event *__event __used,
490			 int cpu __used,
491			 u64 timestamp __used,
492			 struct thread *thread __used)
493{
494	struct lock_stat *ls;
495	struct thread_stat *ts;
496	struct lock_seq_stat *seq;
497	u64 contended_term;
498
499	ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
500	if (ls->discard)
501		return;
502
503	ts = thread_stat_findnew(thread->pid);
504	seq = get_seq(ts, acquired_event->addr);
505
506	switch (seq->state) {
507	case SEQ_STATE_UNINITIALIZED:
508		/* orphan event, do nothing */
509		return;
510	case SEQ_STATE_ACQUIRING:
511		break;
512	case SEQ_STATE_CONTENDED:
513		contended_term = timestamp - seq->prev_event_time;
514		ls->wait_time_total += contended_term;
515		if (contended_term < ls->wait_time_min)
516			ls->wait_time_min = contended_term;
517		if (ls->wait_time_max < contended_term)
518			ls->wait_time_max = contended_term;
519		break;
520	case SEQ_STATE_RELEASED:
521	case SEQ_STATE_ACQUIRED:
522	case SEQ_STATE_READ_ACQUIRED:
523		/* broken lock sequence, discard it */
524		ls->discard = 1;
525		bad_hist[BROKEN_ACQUIRED]++;
526		list_del(&seq->list);
527		free(seq);
528		goto end;
529		break;
530
531	default:
532		BUG_ON("Unknown state of lock sequence found!\n");
533		break;
534	}
535
536	seq->state = SEQ_STATE_ACQUIRED;
537	ls->nr_acquired++;
538	seq->prev_event_time = timestamp;
539end:
540	return;
541}
542
543static void
544report_lock_contended_event(struct trace_contended_event *contended_event,
545			  struct event *__event __used,
546			  int cpu __used,
547			  u64 timestamp __used,
548			  struct thread *thread __used)
549{
550	struct lock_stat *ls;
551	struct thread_stat *ts;
552	struct lock_seq_stat *seq;
553
554	ls = lock_stat_findnew(contended_event->addr, contended_event->name);
555	if (ls->discard)
556		return;
557
558	ts = thread_stat_findnew(thread->pid);
559	seq = get_seq(ts, contended_event->addr);
560
561	switch (seq->state) {
562	case SEQ_STATE_UNINITIALIZED:
563		/* orphan event, do nothing */
564		return;
565	case SEQ_STATE_ACQUIRING:
566		break;
567	case SEQ_STATE_RELEASED:
568	case SEQ_STATE_ACQUIRED:
569	case SEQ_STATE_READ_ACQUIRED:
570	case SEQ_STATE_CONTENDED:
571		/* broken lock sequence, discard it */
572		ls->discard = 1;
573		bad_hist[BROKEN_CONTENDED]++;
574		list_del(&seq->list);
575		free(seq);
576		goto end;
577		break;
578	default:
579		BUG_ON("Unknown state of lock sequence found!\n");
580		break;
581	}
582
583	seq->state = SEQ_STATE_CONTENDED;
584	ls->nr_contended++;
585	seq->prev_event_time = timestamp;
586end:
587	return;
588}
589
590static void
591report_lock_release_event(struct trace_release_event *release_event,
592			struct event *__event __used,
593			int cpu __used,
594			u64 timestamp __used,
595			struct thread *thread __used)
596{
597	struct lock_stat *ls;
598	struct thread_stat *ts;
599	struct lock_seq_stat *seq;
600
601	ls = lock_stat_findnew(release_event->addr, release_event->name);
602	if (ls->discard)
603		return;
604
605	ts = thread_stat_findnew(thread->pid);
606	seq = get_seq(ts, release_event->addr);
607
608	switch (seq->state) {
609	case SEQ_STATE_UNINITIALIZED:
610		goto end;
611		break;
612	case SEQ_STATE_ACQUIRED:
613		break;
614	case SEQ_STATE_READ_ACQUIRED:
615		seq->read_count--;
616		BUG_ON(seq->read_count < 0);
617		if (!seq->read_count) {
618			ls->nr_release++;
619			goto end;
620		}
621		break;
622	case SEQ_STATE_ACQUIRING:
623	case SEQ_STATE_CONTENDED:
624	case SEQ_STATE_RELEASED:
625		/* broken lock sequence, discard it */
626		ls->discard = 1;
627		bad_hist[BROKEN_RELEASE]++;
628		goto free_seq;
629		break;
630	default:
631		BUG_ON("Unknown state of lock sequence found!\n");
632		break;
633	}
634
635	ls->nr_release++;
636free_seq:
637	list_del(&seq->list);
638	free(seq);
639end:
640	return;
641}
642
643/* lock oriented handlers */
644/* TODO: handlers for CPU oriented, thread oriented */
645static struct trace_lock_handler report_lock_ops  = {
646	.acquire_event		= report_lock_acquire_event,
647	.acquired_event		= report_lock_acquired_event,
648	.contended_event	= report_lock_contended_event,
649	.release_event		= report_lock_release_event,
650};
651
652static struct trace_lock_handler *trace_handler;
653
654static void
655process_lock_acquire_event(void *data,
656			   struct event *event __used,
657			   int cpu __used,
658			   u64 timestamp __used,
659			   struct thread *thread __used)
660{
661	struct trace_acquire_event acquire_event;
662	u64 tmp;		/* this is required for casting... */
663
664	tmp = raw_field_value(event, "lockdep_addr", data);
665	memcpy(&acquire_event.addr, &tmp, sizeof(void *));
666	acquire_event.name = (char *)raw_field_ptr(event, "name", data);
667	acquire_event.flag = (int)raw_field_value(event, "flag", data);
668
669	if (trace_handler->acquire_event)
670		trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
671}
672
673static void
674process_lock_acquired_event(void *data,
675			    struct event *event __used,
676			    int cpu __used,
677			    u64 timestamp __used,
678			    struct thread *thread __used)
679{
680	struct trace_acquired_event acquired_event;
681	u64 tmp;		/* this is required for casting... */
682
683	tmp = raw_field_value(event, "lockdep_addr", data);
684	memcpy(&acquired_event.addr, &tmp, sizeof(void *));
685	acquired_event.name = (char *)raw_field_ptr(event, "name", data);
686
687	if (trace_handler->acquire_event)
688		trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
689}
690
691static void
692process_lock_contended_event(void *data,
693			     struct event *event __used,
694			     int cpu __used,
695			     u64 timestamp __used,
696			     struct thread *thread __used)
697{
698	struct trace_contended_event contended_event;
699	u64 tmp;		/* this is required for casting... */
700
701	tmp = raw_field_value(event, "lockdep_addr", data);
702	memcpy(&contended_event.addr, &tmp, sizeof(void *));
703	contended_event.name = (char *)raw_field_ptr(event, "name", data);
704
705	if (trace_handler->acquire_event)
706		trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
707}
708
709static void
710process_lock_release_event(void *data,
711			   struct event *event __used,
712			   int cpu __used,
713			   u64 timestamp __used,
714			   struct thread *thread __used)
715{
716	struct trace_release_event release_event;
717	u64 tmp;		/* this is required for casting... */
718
719	tmp = raw_field_value(event, "lockdep_addr", data);
720	memcpy(&release_event.addr, &tmp, sizeof(void *));
721	release_event.name = (char *)raw_field_ptr(event, "name", data);
722
723	if (trace_handler->acquire_event)
724		trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
725}
726
727static void
728process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
729{
730	struct event *event;
731	int type;
732
733	type = trace_parse_common_type(data);
734	event = trace_find_event(type);
735
736	if (!strcmp(event->name, "lock_acquire"))
737		process_lock_acquire_event(data, event, cpu, timestamp, thread);
738	if (!strcmp(event->name, "lock_acquired"))
739		process_lock_acquired_event(data, event, cpu, timestamp, thread);
740	if (!strcmp(event->name, "lock_contended"))
741		process_lock_contended_event(data, event, cpu, timestamp, thread);
742	if (!strcmp(event->name, "lock_release"))
743		process_lock_release_event(data, event, cpu, timestamp, thread);
744}
745
746static void print_bad_events(int bad, int total)
747{
748	/* Output for debug, this have to be removed */
749	int i;
750	const char *name[4] =
751		{ "acquire", "acquired", "contended", "release" };
752
753	pr_info("\n=== output for debug===\n\n");
754	pr_info("bad: %d, total: %d\n", bad, total);
755	pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
756	pr_info("histogram of events caused bad sequence\n");
757	for (i = 0; i < BROKEN_MAX; i++)
758		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
759}
760
761/* TODO: various way to print, coloring, nano or milli sec */
762static void print_result(void)
763{
764	struct lock_stat *st;
765	char cut_name[20];
766	int bad, total;
767
768	pr_info("%20s ", "Name");
769	pr_info("%10s ", "acquired");
770	pr_info("%10s ", "contended");
771
772	pr_info("%15s ", "total wait (ns)");
773	pr_info("%15s ", "max wait (ns)");
774	pr_info("%15s ", "min wait (ns)");
775
776	pr_info("\n\n");
777
778	bad = total = 0;
779	while ((st = pop_from_result())) {
780		total++;
781		if (st->discard) {
782			bad++;
783			continue;
784		}
785		bzero(cut_name, 20);
786
787		if (strlen(st->name) < 16) {
788			/* output raw name */
789			pr_info("%20s ", st->name);
790		} else {
791			strncpy(cut_name, st->name, 16);
792			cut_name[16] = '.';
793			cut_name[17] = '.';
794			cut_name[18] = '.';
795			cut_name[19] = '\0';
796			/* cut off name for saving output style */
797			pr_info("%20s ", cut_name);
798		}
799
800		pr_info("%10u ", st->nr_acquired);
801		pr_info("%10u ", st->nr_contended);
802
803		pr_info("%15" PRIu64 " ", st->wait_time_total);
804		pr_info("%15" PRIu64 " ", st->wait_time_max);
805		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
806		       0 : st->wait_time_min);
807		pr_info("\n");
808	}
809
810	print_bad_events(bad, total);
811}
812
813static bool info_threads, info_map;
814
815static void dump_threads(void)
816{
817	struct thread_stat *st;
818	struct rb_node *node;
819	struct thread *t;
820
821	pr_info("%10s: comm\n", "Thread ID");
822
823	node = rb_first(&thread_stats);
824	while (node) {
825		st = container_of(node, struct thread_stat, rb);
826		t = perf_session__findnew(session, st->tid);
827		pr_info("%10d: %s\n", st->tid, t->comm);
828		node = rb_next(node);
829	};
830}
831
832static void dump_map(void)
833{
834	unsigned int i;
835	struct lock_stat *st;
836
837	pr_info("Address of instance: name of class\n");
838	for (i = 0; i < LOCKHASH_SIZE; i++) {
839		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
840			pr_info(" %p: %s\n", st->addr, st->name);
841		}
842	}
843}
844
845static void dump_info(void)
846{
847	if (info_threads)
848		dump_threads();
849	else if (info_map)
850		dump_map();
851	else
852		die("Unknown type of information\n");
853}
854
855static int process_sample_event(union perf_event *event,
856				struct perf_sample *sample,
857				struct perf_evsel *evsel __used,
858				struct perf_session *s)
859{
860	struct thread *thread = perf_session__findnew(s, sample->tid);
861
862	if (thread == NULL) {
863		pr_debug("problem processing %d event, skipping it.\n",
864			event->header.type);
865		return -1;
866	}
867
868	process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
869
870	return 0;
871}
872
873static struct perf_event_ops eops = {
874	.sample			= process_sample_event,
875	.comm			= perf_event__process_comm,
876	.ordered_samples	= true,
877};
878
879static int read_events(void)
880{
881	session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
882	if (!session)
883		die("Initializing perf session failed\n");
884
885	return perf_session__process_events(session, &eops);
886}
887
888static void sort_result(void)
889{
890	unsigned int i;
891	struct lock_stat *st;
892
893	for (i = 0; i < LOCKHASH_SIZE; i++) {
894		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
895			insert_to_result(st, compare);
896		}
897	}
898}
899
900static void __cmd_report(void)
901{
902	setup_pager();
903	select_key();
904	read_events();
905	sort_result();
906	print_result();
907}
908
909static const char * const report_usage[] = {
910	"perf lock report [<options>]",
911	NULL
912};
913
914static const struct option report_options[] = {
915	OPT_STRING('k', "key", &sort_key, "acquired",
916		    "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
917	/* TODO: type */
918	OPT_END()
919};
920
921static const char * const info_usage[] = {
922	"perf lock info [<options>]",
923	NULL
924};
925
926static const struct option info_options[] = {
927	OPT_BOOLEAN('t', "threads", &info_threads,
928		    "dump thread list in perf.data"),
929	OPT_BOOLEAN('m', "map", &info_map,
930		    "map of lock instances (name:address table)"),
931	OPT_END()
932};
933
934static const char * const lock_usage[] = {
935	"perf lock [<options>] {record|trace|report}",
936	NULL
937};
938
939static const struct option lock_options[] = {
940	OPT_STRING('i', "input", &input_name, "file", "input file name"),
941	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
942	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
943	OPT_END()
944};
945
946static const char *record_args[] = {
947	"record",
948	"-R",
949	"-f",
950	"-m", "1024",
951	"-c", "1",
952	"-e", "lock:lock_acquire:r",
953	"-e", "lock:lock_acquired:r",
954	"-e", "lock:lock_contended:r",
955	"-e", "lock:lock_release:r",
956};
957
958static int __cmd_record(int argc, const char **argv)
959{
960	unsigned int rec_argc, i, j;
961	const char **rec_argv;
962
963	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
964	rec_argv = calloc(rec_argc + 1, sizeof(char *));
965
966	if (rec_argv == NULL)
967		return -ENOMEM;
968
969	for (i = 0; i < ARRAY_SIZE(record_args); i++)
970		rec_argv[i] = strdup(record_args[i]);
971
972	for (j = 1; j < (unsigned int)argc; j++, i++)
973		rec_argv[i] = argv[j];
974
975	BUG_ON(i != rec_argc);
976
977	return cmd_record(i, rec_argv, NULL);
978}
979
980int cmd_lock(int argc, const char **argv, const char *prefix __used)
981{
982	unsigned int i;
983
984	symbol__init();
985	for (i = 0; i < LOCKHASH_SIZE; i++)
986		INIT_LIST_HEAD(lockhash_table + i);
987
988	argc = parse_options(argc, argv, lock_options, lock_usage,
989			     PARSE_OPT_STOP_AT_NON_OPTION);
990	if (!argc)
991		usage_with_options(lock_usage, lock_options);
992
993	if (!strncmp(argv[0], "rec", 3)) {
994		return __cmd_record(argc, argv);
995	} else if (!strncmp(argv[0], "report", 6)) {
996		trace_handler = &report_lock_ops;
997		if (argc) {
998			argc = parse_options(argc, argv,
999					     report_options, report_usage, 0);
1000			if (argc)
1001				usage_with_options(report_usage, report_options);
1002		}
1003		__cmd_report();
1004	} else if (!strcmp(argv[0], "script")) {
1005		/* Aliased to 'perf script' */
1006		return cmd_script(argc, argv, prefix);
1007	} else if (!strcmp(argv[0], "info")) {
1008		if (argc) {
1009			argc = parse_options(argc, argv,
1010					     info_options, info_usage, 0);
1011			if (argc)
1012				usage_with_options(info_usage, info_options);
1013		}
1014		/* recycling report_lock_ops */
1015		trace_handler = &report_lock_ops;
1016		setup_pager();
1017		read_events();
1018		dump_info();
1019	} else {
1020		usage_with_options(lock_usage, lock_options);
1021	}
1022
1023	return 0;
1024}
1025