1#ifndef _SCHED_SYSCTL_H
2#define _SCHED_SYSCTL_H
3
4#ifdef CONFIG_DETECT_HUNG_TASK
5extern int	     sysctl_hung_task_check_count;
6extern unsigned int  sysctl_hung_task_panic;
7extern unsigned long sysctl_hung_task_timeout_secs;
8extern int sysctl_hung_task_warnings;
9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
10					 void __user *buffer,
11					 size_t *lenp, loff_t *ppos);
12#else
13/* Avoid need for ifdefs elsewhere in the code */
14enum { sysctl_hung_task_timeout_secs = 0 };
15#endif
16
17/*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 *
29 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
30 * not a hard limit any more. Although some userspace tools can be surprised by
31 * that.
32 */
33#define MAPCOUNT_ELF_CORE_MARGIN	(5)
34#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
35
36extern int sysctl_max_map_count;
37
38extern unsigned int sysctl_sched_latency;
39extern unsigned int sysctl_sched_min_granularity;
40extern unsigned int sysctl_sched_wakeup_granularity;
41extern unsigned int sysctl_sched_child_runs_first;
42
43enum sched_tunable_scaling {
44	SCHED_TUNABLESCALING_NONE,
45	SCHED_TUNABLESCALING_LOG,
46	SCHED_TUNABLESCALING_LINEAR,
47	SCHED_TUNABLESCALING_END,
48};
49extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
50
51extern unsigned int sysctl_numa_balancing_scan_delay;
52extern unsigned int sysctl_numa_balancing_scan_period_min;
53extern unsigned int sysctl_numa_balancing_scan_period_max;
54extern unsigned int sysctl_numa_balancing_scan_size;
55
56#ifdef CONFIG_SCHED_DEBUG
57extern unsigned int sysctl_sched_migration_cost;
58extern unsigned int sysctl_sched_nr_migrate;
59extern unsigned int sysctl_sched_time_avg;
60extern unsigned int sysctl_timer_migration;
61extern unsigned int sysctl_sched_shares_window;
62
63int sched_proc_update_handler(struct ctl_table *table, int write,
64		void __user *buffer, size_t *length,
65		loff_t *ppos);
66#endif
67#ifdef CONFIG_SCHED_DEBUG
68static inline unsigned int get_sysctl_timer_migration(void)
69{
70	return sysctl_timer_migration;
71}
72#else
73static inline unsigned int get_sysctl_timer_migration(void)
74{
75	return 1;
76}
77#endif
78
79/*
80 *  control realtime throttling:
81 *
82 *  /proc/sys/kernel/sched_rt_period_us
83 *  /proc/sys/kernel/sched_rt_runtime_us
84 */
85extern unsigned int sysctl_sched_rt_period;
86extern int sysctl_sched_rt_runtime;
87
88#ifdef CONFIG_CFS_BANDWIDTH
89extern unsigned int sysctl_sched_cfs_bandwidth_slice;
90#endif
91
92#ifdef CONFIG_SCHED_AUTOGROUP
93extern unsigned int sysctl_sched_autogroup_enabled;
94#endif
95
96extern int sched_rr_timeslice;
97
98extern int sched_rr_handler(struct ctl_table *table, int write,
99		void __user *buffer, size_t *lenp,
100		loff_t *ppos);
101
102extern int sched_rt_handler(struct ctl_table *table, int write,
103		void __user *buffer, size_t *lenp,
104		loff_t *ppos);
105
106extern int sysctl_numa_balancing(struct ctl_table *table, int write,
107				 void __user *buffer, size_t *lenp,
108				 loff_t *ppos);
109
110#endif /* _SCHED_SYSCTL_H */
111