Lines Matching defs:opts

66 	struct perf_record_opts	opts;
200 struct perf_record_opts *opts = &rec->opts;
203 perf_evlist__config(evlist, opts);
215 perf_evsel__open_strerror(pos, &opts->target,
229 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
235 "(current value: %d)\n", opts->mmap_pages);
237 } else if (!is_power_of_2(opts->mmap_pages) &&
238 (opts->mmap_pages != UINT_MAX)) {
274 if (!rec->opts.pipe_output) {
356 struct perf_record_opts *opts = &rec->opts;
374 opts->pipe_output = true;
380 opts->pipe_output = true;
392 if (opts->pipe_output)
421 if (!rec->opts.branch_stack)
425 err = perf_evlist__prepare_workload(evsel_list, &opts->target,
426 argv, opts->pipe_output,
447 if (opts->pipe_output) {
470 if (opts->pipe_output) {
519 if (perf_target__has_task(&opts->target))
523 else if (perf_target__has_cpu(&opts->target))
548 if (!perf_target__none(&opts->target))
577 if (done && !disabled && !perf_target__none(&opts->target)) {
719 int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
739 opts->call_graph = CALLCHAIN_FP;
752 opts->call_graph = CALLCHAIN_DWARF;
753 opts->stack_dump_size = default_stack_dump_size;
760 opts->stack_dump_size = size;
775 static void callchain_debug(struct perf_record_opts *opts)
777 pr_debug("callchain: type %d\n", opts->call_graph);
779 if (opts->call_graph == CALLCHAIN_DWARF)
781 opts->stack_dump_size);
788 struct perf_record_opts *opts = opt->value;
793 opts->call_graph = CALLCHAIN_NONE;
798 ret = record_parse_callchain(arg, opts);
800 callchain_debug(opts);
809 struct perf_record_opts *opts = opt->value;
811 if (opts->call_graph == CALLCHAIN_NONE)
812 opts->call_graph = CALLCHAIN_FP;
814 callchain_debug(opts);
835 .opts = {
867 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
869 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
873 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
875 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
877 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
879 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
881 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
884 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
886 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
887 OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
889 OPT_BOOLEAN(0, "group", &record.opts.group,
891 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
894 OPT_CALLBACK(0, "call-graph", &record.opts,
900 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
902 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
904 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
905 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
906 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
915 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
918 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
922 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
925 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
945 if (!argc && perf_target__none(&rec->opts.target))
948 if (nr_cgroups && !rec->opts.target.system_wide) {
975 err = perf_target__validate(&rec->opts.target);
977 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
981 err = perf_target__parse_uid(&rec->opts.target);
985 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
993 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
996 if (rec->opts.user_interval != ULLONG_MAX)
997 rec->opts.default_interval = rec->opts.user_interval;
998 if (rec->opts.user_freq != UINT_MAX)
999 rec->opts.freq = rec->opts.user_freq;
1004 if (rec->opts.default_interval)
1005 rec->opts.freq = 0;
1006 else if (rec->opts.freq) {
1007 rec->opts.default_interval = rec->opts.freq;