1425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
2425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar#ifdef CONFIG_SCHEDSTATS
3b5aadf7f14c1acc94956aa257e018e9de3881f41Alexey Dobriyan
4425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
5425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * Expects runqueue lock to be held for atomicity of update
6425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
7425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
8425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarrq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
10425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (rq) {
11425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar		rq->rq_sched_info.run_delay += delta;
122d72376b3af1e7d4d4515ebfd0f4383f2e92c343Ingo Molnar		rq->rq_sched_info.pcount++;
13425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	}
14425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
15425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
16425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
17425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * Expects runqueue lock to be held for atomicity of update
18425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
19425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
20425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarrq_sched_info_depart(struct rq *rq, unsigned long long delta)
21425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
22425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (rq)
239c2c48020ec0dd6ecd27e5a1298f73b40d85a595Ken Chen		rq->rq_cpu_time += delta;
24425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
2546ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg
2646ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Gargstatic inline void
2746ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Gargrq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
2846ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg{
2946ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	if (rq)
3046ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg		rq->rq_sched_info.run_delay += delta;
3146ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg}
32425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
33425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
34c3c7011969274768818842b0a08ec45d88f45b4fIngo Molnar# define schedstat_set(var, val)	do { var = (val); } while (0)
35425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar#else /* !CONFIG_SCHEDSTATS */
36425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
37425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarrq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{}
39425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
4046ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Gargrq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
4146ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg{}
4246ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Gargstatic inline void
43425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarrq_sched_info_depart(struct rq *rq, unsigned long long delta)
44425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{}
45425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar# define schedstat_inc(rq, field)	do { } while (0)
46425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar# define schedstat_add(rq, field, amt)	do { } while (0)
47c3c7011969274768818842b0a08ec45d88f45b4fIngo Molnar# define schedstat_set(var, val)	do { } while (0)
48425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar#endif
49425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
509a41785cc43d88397f787a651ed7286a33f8462fBalbir Singh#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
5146ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Gargstatic inline void sched_info_reset_dequeued(struct task_struct *t)
5246ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg{
5346ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	t->sched_info.last_queued = 0;
5446ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg}
5546ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg
56425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
57d4a6f3c32c39132318454e77d59ab14b06f6eb02Rakib Mullick * We are interested in knowing how long it was from the *first* time a
5846ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg * task was queued to the time that it finally hit a cpu, we call this routine
5946ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg * from dequeue_task() to account for possible rq->clock skew across cpus. The
6046ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg * delta taken on each cpu would annul the skew.
61425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
624314895165623879937f46d767673654662b570cMichael S. Tsirkinstatic inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
63425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
644314895165623879937f46d767673654662b570cMichael S. Tsirkin	unsigned long long now = rq_clock(rq), delta = 0;
6546ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg
6646ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	if (unlikely(sched_info_on()))
6746ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg		if (t->sched_info.last_queued)
6846ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg			delta = now - t->sched_info.last_queued;
6946ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	sched_info_reset_dequeued(t);
7046ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	t->sched_info.run_delay += delta;
7146ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg
724314895165623879937f46d767673654662b570cMichael S. Tsirkin	rq_sched_info_dequeued(rq, delta);
73425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
74425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
75425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
76425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * Called when a task finally hits the cpu.  We can now calculate how
77425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * long it was waiting to run.  We also note when it began so that we
78425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * can keep stats on how long its timeslice is.
79425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
804314895165623879937f46d767673654662b570cMichael S. Tsirkinstatic void sched_info_arrive(struct rq *rq, struct task_struct *t)
81425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
824314895165623879937f46d767673654662b570cMichael S. Tsirkin	unsigned long long now = rq_clock(rq), delta = 0;
83425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
84425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (t->sched_info.last_queued)
85425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar		delta = now - t->sched_info.last_queued;
8646ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg	sched_info_reset_dequeued(t);
87425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	t->sched_info.run_delay += delta;
88425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	t->sched_info.last_arrival = now;
892d72376b3af1e7d4d4515ebfd0f4383f2e92c343Ingo Molnar	t->sched_info.pcount++;
90425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
914314895165623879937f46d767673654662b570cMichael S. Tsirkin	rq_sched_info_arrive(rq, delta);
92425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
93425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
94425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
95425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * This function is only called from enqueue_task(), but also only updates
96425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * the timestamp if it is already not set.  It's assumed that
97425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * sched_info_dequeued() will clear that stamp when appropriate.
98425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
994314895165623879937f46d767673654662b570cMichael S. Tsirkinstatic inline void sched_info_queued(struct rq *rq, struct task_struct *t)
100425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
101425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (unlikely(sched_info_on()))
102425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar		if (!t->sched_info.last_queued)
1034314895165623879937f46d767673654662b570cMichael S. Tsirkin			t->sched_info.last_queued = rq_clock(rq);
104425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
105425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
106425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
10713b62e46d5407c7d619aea1dc9c3e0991b631b57Michael S. Tsirkin * Called when a process ceases being the active-running process involuntarily
10813b62e46d5407c7d619aea1dc9c3e0991b631b57Michael S. Tsirkin * due, typically, to expiring its time slice (this may also be called when
10913b62e46d5407c7d619aea1dc9c3e0991b631b57Michael S. Tsirkin * switching to the idle task).  Now we can calculate how long we ran.
110d4abc238c9f4df8b3216f3e883f5d0a07b7ac75aBharath Ravi * Also, if the process is still in the TASK_RUNNING state, call
111d4abc238c9f4df8b3216f3e883f5d0a07b7ac75aBharath Ravi * sched_info_queued() to mark that it has now again started waiting on
112d4abc238c9f4df8b3216f3e883f5d0a07b7ac75aBharath Ravi * the runqueue.
113425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
1144314895165623879937f46d767673654662b570cMichael S. Tsirkinstatic inline void sched_info_depart(struct rq *rq, struct task_struct *t)
115425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
1164314895165623879937f46d767673654662b570cMichael S. Tsirkin	unsigned long long delta = rq_clock(rq) -
1179a41785cc43d88397f787a651ed7286a33f8462fBalbir Singh					t->sched_info.last_arrival;
118425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
1194314895165623879937f46d767673654662b570cMichael S. Tsirkin	rq_sched_info_depart(rq, delta);
120d4abc238c9f4df8b3216f3e883f5d0a07b7ac75aBharath Ravi
121d4abc238c9f4df8b3216f3e883f5d0a07b7ac75aBharath Ravi	if (t->state == TASK_RUNNING)
1224314895165623879937f46d767673654662b570cMichael S. Tsirkin		sched_info_queued(rq, t);
123425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
124425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
125425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar/*
126425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * Called when tasks are switched involuntarily due, typically, to expiring
127425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * their time slice.  (This may also be called when switching to or from
128425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar * the idle task.)  We are only called when prev != next.
129425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar */
130425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
1314314895165623879937f46d767673654662b570cMichael S. Tsirkin__sched_info_switch(struct rq *rq,
1324314895165623879937f46d767673654662b570cMichael S. Tsirkin		    struct task_struct *prev, struct task_struct *next)
133425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
134425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	/*
135425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	 * prev now departs the cpu.  It's not interesting to record
136425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	 * stats about how efficient we were at scheduling the idle
137425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	 * process, however.
138425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	 */
139425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (prev != rq->idle)
1404314895165623879937f46d767673654662b570cMichael S. Tsirkin		sched_info_depart(rq, prev);
141425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
142425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (next != rq->idle)
1434314895165623879937f46d767673654662b570cMichael S. Tsirkin		sched_info_arrive(rq, next);
144425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
145425e0968a25fa3f111f9919964cac079738140b5Ingo Molnarstatic inline void
1464314895165623879937f46d767673654662b570cMichael S. Tsirkinsched_info_switch(struct rq *rq,
1474314895165623879937f46d767673654662b570cMichael S. Tsirkin		  struct task_struct *prev, struct task_struct *next)
148425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar{
149425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar	if (unlikely(sched_info_on()))
1504314895165623879937f46d767673654662b570cMichael S. Tsirkin		__sched_info_switch(rq, prev, next);
151425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar}
152425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar#else
1534314895165623879937f46d767673654662b570cMichael S. Tsirkin#define sched_info_queued(rq, t)		do { } while (0)
15446ac22bab42cc868b9c1d0e915ddbc8e8065a44dAnkita Garg#define sched_info_reset_dequeued(t)	do { } while (0)
1554314895165623879937f46d767673654662b570cMichael S. Tsirkin#define sched_info_dequeued(rq, t)		do { } while (0)
1564314895165623879937f46d767673654662b570cMichael S. Tsirkin#define sched_info_depart(rq, t)		do { } while (0)
1574314895165623879937f46d767673654662b570cMichael S. Tsirkin#define sched_info_arrive(rq, next)		do { } while (0)
1584314895165623879937f46d767673654662b570cMichael S. Tsirkin#define sched_info_switch(rq, t, next)		do { } while (0)
1599a41785cc43d88397f787a651ed7286a33f8462fBalbir Singh#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
160425e0968a25fa3f111f9919964cac079738140b5Ingo Molnar
161bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar/*
162bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * The following are functions that support scheduler-internal time accounting.
163bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * These functions are generally called at the timer tick.  None of this depends
164bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * on CONFIG_SCHEDSTATS.
165bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar */
166bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
167bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar/**
168fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro * cputimer_running - return true if cputimer is running
169fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro *
170fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro * @tsk:	Pointer to target task.
171fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro */
172fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohirostatic inline bool cputimer_running(struct task_struct *tsk)
173fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro
174fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro{
175fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
176fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro
177fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	if (!cputimer->running)
178fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro		return false;
179fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro
180fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	/*
181fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
182fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * in __exit_signal(), we won't account to the signal struct further
183fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * cputime consumed by that task, even though the task can still be
184fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * ticking after __exit_signal().
185fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 *
186fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * In order to keep a consistent behaviour between thread group cputime
187fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * and thread group cputimer accounting, lets also ignore the cputime
188fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * elapsing after __exit_signal() in any thread group timer running.
189fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 *
190fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * This makes sure that POSIX CPU clocks and timers are synchronized, so
191fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
192fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 * clock delta is behind the expiring timer value.
193fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	 */
194fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	if (unlikely(!tsk->sighand))
195fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro		return false;
196fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro
197fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	return true;
198fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro}
199fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro
200fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro/**
2017086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * account_group_user_time - Maintain utime for a thread group.
202bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
2037086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * @tsk:	Pointer to task structure.
2047086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * @cputime:	Time value by which to increment the utime field of the
2057086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar *		thread_group_cputime structure.
206bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
207bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * If thread group time is being maintained, get the structure for the
208bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * running CPU and update the utime field there.
209bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar */
2107086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayharstatic inline void account_group_user_time(struct task_struct *tsk,
2117086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar					   cputime_t cputime)
212bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar{
21348286d5088a3ba76de40a6b70221632a49cab7a1Oleg Nesterov	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
214bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
215fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	if (!cputimer_running(tsk))
2164cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra		return;
2174cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra
218ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_lock(&cputimer->lock);
219648616343cdbe904c585a6c12e323d3b3c72e46fMartin Schwidefsky	cputimer->cputime.utime += cputime;
220ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_unlock(&cputimer->lock);
221bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar}
222bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
223bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar/**
2247086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * account_group_system_time - Maintain stime for a thread group.
225bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
2267086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * @tsk:	Pointer to task structure.
2277086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * @cputime:	Time value by which to increment the stime field of the
2287086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar *		thread_group_cputime structure.
229bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
230bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * If thread group time is being maintained, get the structure for the
231bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * running CPU and update the stime field there.
232bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar */
2337086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayharstatic inline void account_group_system_time(struct task_struct *tsk,
2347086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar					     cputime_t cputime)
235bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar{
23648286d5088a3ba76de40a6b70221632a49cab7a1Oleg Nesterov	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
2374cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra
238fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	if (!cputimer_running(tsk))
2394cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra		return;
240bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
241ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_lock(&cputimer->lock);
242648616343cdbe904c585a6c12e323d3b3c72e46fMartin Schwidefsky	cputimer->cputime.stime += cputime;
243ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_unlock(&cputimer->lock);
244bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar}
245bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
246bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar/**
2477086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * account_group_exec_runtime - Maintain exec runtime for a thread group.
248bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
2497086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar * @tsk:	Pointer to task structure.
250bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * @ns:		Time value by which to increment the sum_exec_runtime field
2517086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar *		of the thread_group_cputime structure.
252bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar *
253bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * If thread group time is being maintained, get the structure for the
254bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar * running CPU and update the sum_exec_runtime field there.
255bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar */
2567086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayharstatic inline void account_group_exec_runtime(struct task_struct *tsk,
2577086efe1c1536f6bc160e7d60a9bfd645b91f279Frank Mayhar					      unsigned long long ns)
258bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar{
25948286d5088a3ba76de40a6b70221632a49cab7a1Oleg Nesterov	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
2604cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra
261fa18f7bde3ad4568d1d343b60d963bfbd8dc3991KOSAKI Motohiro	if (!cputimer_running(tsk))
2624cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra		return;
263bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar
264ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_lock(&cputimer->lock);
2654cd4c1b40d40447fb5e7ba80746c6d7ba91d7a53Peter Zijlstra	cputimer->cputime.sum_exec_runtime += ns;
266ee30a7b2fc072f139dac44826860d2c1f422137cThomas Gleixner	raw_spin_unlock(&cputimer->lock);
267bb34d92f643086d546b49cef680f6f305ed84414Frank Mayhar}
268