1/*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 *  4 February 1994
8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 *     flag set in its personality we do *not* modify the given timeout
10 *     parameter to reflect time remaining.
11 *
12 *  24 January 2000
13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/syscalls.h>
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
24#include <linux/file.h>
25#include <linux/fdtable.h>
26#include <linux/fs.h>
27#include <linux/rcupdate.h>
28#include <linux/hrtimer.h>
29#include <linux/sched/rt.h>
30#include <linux/freezer.h>
31#include <net/busy_poll.h>
32
33#include <asm/uaccess.h>
34
35
36/*
37 * Estimate expected accuracy in ns from a timeval.
38 *
39 * After quite a bit of churning around, we've settled on
40 * a simple thing of taking 0.1% of the timeout as the
41 * slack, with a cap of 100 msec.
42 * "nice" tasks get a 0.5% slack instead.
43 *
44 * Consider this comment an open invitation to come up with even
45 * better solutions..
46 */
47
48#define MAX_SLACK	(100 * NSEC_PER_MSEC)
49
50static long __estimate_accuracy(struct timespec *tv)
51{
52	long slack;
53	int divfactor = 1000;
54
55	if (tv->tv_sec < 0)
56		return 0;
57
58	if (task_nice(current) > 0)
59		divfactor = divfactor / 5;
60
61	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
62		return MAX_SLACK;
63
64	slack = tv->tv_nsec / divfactor;
65	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
66
67	if (slack > MAX_SLACK)
68		return MAX_SLACK;
69
70	return slack;
71}
72
73long select_estimate_accuracy(struct timespec *tv)
74{
75	unsigned long ret;
76	struct timespec now;
77
78	/*
79	 * Realtime tasks get a slack of 0 for obvious reasons.
80	 */
81
82	if (rt_task(current))
83		return 0;
84
85	ktime_get_ts(&now);
86	now = timespec_sub(*tv, now);
87	ret = __estimate_accuracy(&now);
88	if (ret < current->timer_slack_ns)
89		return current->timer_slack_ns;
90	return ret;
91}
92
93
94
95struct poll_table_page {
96	struct poll_table_page * next;
97	struct poll_table_entry * entry;
98	struct poll_table_entry entries[0];
99};
100
101#define POLL_TABLE_FULL(table) \
102	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
103
104/*
105 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106 * I have rewritten this, taking some shortcuts: This code may not be easy to
107 * follow, but it should be free of race-conditions, and it's practical. If you
108 * understand what I'm doing here, then you understand how the linux
109 * sleep/wakeup mechanism works.
110 *
111 * Two very simple procedures, poll_wait() and poll_freewait() make all the
112 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
113 * as all select/poll functions have to call it to add an entry to the
114 * poll table.
115 */
116static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
117		       poll_table *p);
118
119void poll_initwait(struct poll_wqueues *pwq)
120{
121	init_poll_funcptr(&pwq->pt, __pollwait);
122	pwq->polling_task = current;
123	pwq->triggered = 0;
124	pwq->error = 0;
125	pwq->table = NULL;
126	pwq->inline_index = 0;
127}
128EXPORT_SYMBOL(poll_initwait);
129
130static void free_poll_entry(struct poll_table_entry *entry)
131{
132	remove_wait_queue(entry->wait_address, &entry->wait);
133	fput(entry->filp);
134}
135
136void poll_freewait(struct poll_wqueues *pwq)
137{
138	struct poll_table_page * p = pwq->table;
139	int i;
140	for (i = 0; i < pwq->inline_index; i++)
141		free_poll_entry(pwq->inline_entries + i);
142	while (p) {
143		struct poll_table_entry * entry;
144		struct poll_table_page *old;
145
146		entry = p->entry;
147		do {
148			entry--;
149			free_poll_entry(entry);
150		} while (entry > p->entries);
151		old = p;
152		p = p->next;
153		free_page((unsigned long) old);
154	}
155}
156EXPORT_SYMBOL(poll_freewait);
157
158static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159{
160	struct poll_table_page *table = p->table;
161
162	if (p->inline_index < N_INLINE_POLL_ENTRIES)
163		return p->inline_entries + p->inline_index++;
164
165	if (!table || POLL_TABLE_FULL(table)) {
166		struct poll_table_page *new_table;
167
168		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
169		if (!new_table) {
170			p->error = -ENOMEM;
171			return NULL;
172		}
173		new_table->entry = new_table->entries;
174		new_table->next = table;
175		p->table = new_table;
176		table = new_table;
177	}
178
179	return table->entry++;
180}
181
182static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
183{
184	struct poll_wqueues *pwq = wait->private;
185	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
186
187	/*
188	 * Although this function is called under waitqueue lock, LOCK
189	 * doesn't imply write barrier and the users expect write
190	 * barrier semantics on wakeup functions.  The following
191	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192	 * and is paired with set_mb() in poll_schedule_timeout.
193	 */
194	smp_wmb();
195	pwq->triggered = 1;
196
197	/*
198	 * Perform the default wake up operation using a dummy
199	 * waitqueue.
200	 *
201	 * TODO: This is hacky but there currently is no interface to
202	 * pass in @sync.  @sync is scheduled to be removed and once
203	 * that happens, wake_up_process() can be used directly.
204	 */
205	return default_wake_function(&dummy_wait, mode, sync, key);
206}
207
208static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
209{
210	struct poll_table_entry *entry;
211
212	entry = container_of(wait, struct poll_table_entry, wait);
213	if (key && !((unsigned long)key & entry->key))
214		return 0;
215	return __pollwake(wait, mode, sync, key);
216}
217
218/* Add a new entry */
219static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
220				poll_table *p)
221{
222	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
223	struct poll_table_entry *entry = poll_get_entry(pwq);
224	if (!entry)
225		return;
226	entry->filp = get_file(filp);
227	entry->wait_address = wait_address;
228	entry->key = p->_key;
229	init_waitqueue_func_entry(&entry->wait, pollwake);
230	entry->wait.private = pwq;
231	add_wait_queue(wait_address, &entry->wait);
232}
233
234int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
235			  ktime_t *expires, unsigned long slack)
236{
237	int rc = -EINTR;
238
239	set_current_state(state);
240	if (!pwq->triggered)
241		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
242	__set_current_state(TASK_RUNNING);
243
244	/*
245	 * Prepare for the next iteration.
246	 *
247	 * The following set_mb() serves two purposes.  First, it's
248	 * the counterpart rmb of the wmb in pollwake() such that data
249	 * written before wake up is always visible after wake up.
250	 * Second, the full barrier guarantees that triggered clearing
251	 * doesn't pass event check of the next iteration.  Note that
252	 * this problem doesn't exist for the first iteration as
253	 * add_wait_queue() has full barrier semantics.
254	 */
255	set_mb(pwq->triggered, 0);
256
257	return rc;
258}
259EXPORT_SYMBOL(poll_schedule_timeout);
260
261/**
262 * poll_select_set_timeout - helper function to setup the timeout value
263 * @to:		pointer to timespec variable for the final timeout
264 * @sec:	seconds (from user space)
265 * @nsec:	nanoseconds (from user space)
266 *
267 * Note, we do not use a timespec for the user space value here, That
268 * way we can use the function for timeval and compat interfaces as well.
269 *
270 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
271 */
272int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
273{
274	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
275
276	if (!timespec_valid(&ts))
277		return -EINVAL;
278
279	/* Optimize for the zero timeout value here */
280	if (!sec && !nsec) {
281		to->tv_sec = to->tv_nsec = 0;
282	} else {
283		ktime_get_ts(to);
284		*to = timespec_add_safe(*to, ts);
285	}
286	return 0;
287}
288
289static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
290				      int timeval, int ret)
291{
292	struct timespec rts;
293	struct timeval rtv;
294
295	if (!p)
296		return ret;
297
298	if (current->personality & STICKY_TIMEOUTS)
299		goto sticky;
300
301	/* No update for zero timeout */
302	if (!end_time->tv_sec && !end_time->tv_nsec)
303		return ret;
304
305	ktime_get_ts(&rts);
306	rts = timespec_sub(*end_time, rts);
307	if (rts.tv_sec < 0)
308		rts.tv_sec = rts.tv_nsec = 0;
309
310	if (timeval) {
311		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
312			memset(&rtv, 0, sizeof(rtv));
313		rtv.tv_sec = rts.tv_sec;
314		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
315
316		if (!copy_to_user(p, &rtv, sizeof(rtv)))
317			return ret;
318
319	} else if (!copy_to_user(p, &rts, sizeof(rts)))
320		return ret;
321
322	/*
323	 * If an application puts its timeval in read-only memory, we
324	 * don't want the Linux-specific update to the timeval to
325	 * cause a fault after the select has completed
326	 * successfully. However, because we're not updating the
327	 * timeval, we can't restart the system call.
328	 */
329
330sticky:
331	if (ret == -ERESTARTNOHAND)
332		ret = -EINTR;
333	return ret;
334}
335
336#define FDS_IN(fds, n)		(fds->in + n)
337#define FDS_OUT(fds, n)		(fds->out + n)
338#define FDS_EX(fds, n)		(fds->ex + n)
339
340#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
341
342static int max_select_fd(unsigned long n, fd_set_bits *fds)
343{
344	unsigned long *open_fds;
345	unsigned long set;
346	int max;
347	struct fdtable *fdt;
348
349	/* handle last in-complete long-word first */
350	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
351	n /= BITS_PER_LONG;
352	fdt = files_fdtable(current->files);
353	open_fds = fdt->open_fds + n;
354	max = 0;
355	if (set) {
356		set &= BITS(fds, n);
357		if (set) {
358			if (!(set & ~*open_fds))
359				goto get_max;
360			return -EBADF;
361		}
362	}
363	while (n) {
364		open_fds--;
365		n--;
366		set = BITS(fds, n);
367		if (!set)
368			continue;
369		if (set & ~*open_fds)
370			return -EBADF;
371		if (max)
372			continue;
373get_max:
374		do {
375			max++;
376			set >>= 1;
377		} while (set);
378		max += n * BITS_PER_LONG;
379	}
380
381	return max;
382}
383
384#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
385#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
386#define POLLEX_SET (POLLPRI)
387
388static inline void wait_key_set(poll_table *wait, unsigned long in,
389				unsigned long out, unsigned long bit,
390				unsigned int ll_flag)
391{
392	wait->_key = POLLEX_SET | ll_flag;
393	if (in & bit)
394		wait->_key |= POLLIN_SET;
395	if (out & bit)
396		wait->_key |= POLLOUT_SET;
397}
398
399int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
400{
401	ktime_t expire, *to = NULL;
402	struct poll_wqueues table;
403	poll_table *wait;
404	int retval, i, timed_out = 0;
405	unsigned long slack = 0;
406	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
407	unsigned long busy_end = 0;
408
409	rcu_read_lock();
410	retval = max_select_fd(n, fds);
411	rcu_read_unlock();
412
413	if (retval < 0)
414		return retval;
415	n = retval;
416
417	poll_initwait(&table);
418	wait = &table.pt;
419	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
420		wait->_qproc = NULL;
421		timed_out = 1;
422	}
423
424	if (end_time && !timed_out)
425		slack = select_estimate_accuracy(end_time);
426
427	retval = 0;
428	for (;;) {
429		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
430		bool can_busy_loop = false;
431
432		inp = fds->in; outp = fds->out; exp = fds->ex;
433		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
434
435		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
436			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
437			unsigned long res_in = 0, res_out = 0, res_ex = 0;
438
439			in = *inp++; out = *outp++; ex = *exp++;
440			all_bits = in | out | ex;
441			if (all_bits == 0) {
442				i += BITS_PER_LONG;
443				continue;
444			}
445
446			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
447				struct fd f;
448				if (i >= n)
449					break;
450				if (!(bit & all_bits))
451					continue;
452				f = fdget(i);
453				if (f.file) {
454					const struct file_operations *f_op;
455					f_op = f.file->f_op;
456					mask = DEFAULT_POLLMASK;
457					if (f_op->poll) {
458						wait_key_set(wait, in, out,
459							     bit, busy_flag);
460						mask = (*f_op->poll)(f.file, wait);
461					}
462					fdput(f);
463					if ((mask & POLLIN_SET) && (in & bit)) {
464						res_in |= bit;
465						retval++;
466						wait->_qproc = NULL;
467					}
468					if ((mask & POLLOUT_SET) && (out & bit)) {
469						res_out |= bit;
470						retval++;
471						wait->_qproc = NULL;
472					}
473					if ((mask & POLLEX_SET) && (ex & bit)) {
474						res_ex |= bit;
475						retval++;
476						wait->_qproc = NULL;
477					}
478					/* got something, stop busy polling */
479					if (retval) {
480						can_busy_loop = false;
481						busy_flag = 0;
482
483					/*
484					 * only remember a returned
485					 * POLL_BUSY_LOOP if we asked for it
486					 */
487					} else if (busy_flag & mask)
488						can_busy_loop = true;
489
490				}
491			}
492			if (res_in)
493				*rinp = res_in;
494			if (res_out)
495				*routp = res_out;
496			if (res_ex)
497				*rexp = res_ex;
498			cond_resched();
499		}
500		wait->_qproc = NULL;
501		if (retval || timed_out || signal_pending(current))
502			break;
503		if (table.error) {
504			retval = table.error;
505			break;
506		}
507
508		/* only if found POLL_BUSY_LOOP sockets && not out of time */
509		if (can_busy_loop && !need_resched()) {
510			if (!busy_end) {
511				busy_end = busy_loop_end_time();
512				continue;
513			}
514			if (!busy_loop_timeout(busy_end))
515				continue;
516		}
517		busy_flag = 0;
518
519		/*
520		 * If this is the first loop and we have a timeout
521		 * given, then we convert to ktime_t and set the to
522		 * pointer to the expiry value.
523		 */
524		if (end_time && !to) {
525			expire = timespec_to_ktime(*end_time);
526			to = &expire;
527		}
528
529		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
530					   to, slack))
531			timed_out = 1;
532	}
533
534	poll_freewait(&table);
535
536	return retval;
537}
538
539/*
540 * We can actually return ERESTARTSYS instead of EINTR, but I'd
541 * like to be certain this leads to no problems. So I return
542 * EINTR just for safety.
543 *
544 * Update: ERESTARTSYS breaks at least the xview clock binary, so
545 * I'm trying ERESTARTNOHAND which restart only when you want to.
546 */
547int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
548			   fd_set __user *exp, struct timespec *end_time)
549{
550	fd_set_bits fds;
551	void *bits;
552	int ret, max_fds;
553	unsigned int size;
554	struct fdtable *fdt;
555	/* Allocate small arguments on the stack to save memory and be faster */
556	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
557
558	ret = -EINVAL;
559	if (n < 0)
560		goto out_nofds;
561
562	/* max_fds can increase, so grab it once to avoid race */
563	rcu_read_lock();
564	fdt = files_fdtable(current->files);
565	max_fds = fdt->max_fds;
566	rcu_read_unlock();
567	if (n > max_fds)
568		n = max_fds;
569
570	/*
571	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
572	 * since we used fdset we need to allocate memory in units of
573	 * long-words.
574	 */
575	size = FDS_BYTES(n);
576	bits = stack_fds;
577	if (size > sizeof(stack_fds) / 6) {
578		/* Not enough space in on-stack array; must use kmalloc */
579		ret = -ENOMEM;
580		bits = kmalloc(6 * size, GFP_KERNEL);
581		if (!bits)
582			goto out_nofds;
583	}
584	fds.in      = bits;
585	fds.out     = bits +   size;
586	fds.ex      = bits + 2*size;
587	fds.res_in  = bits + 3*size;
588	fds.res_out = bits + 4*size;
589	fds.res_ex  = bits + 5*size;
590
591	if ((ret = get_fd_set(n, inp, fds.in)) ||
592	    (ret = get_fd_set(n, outp, fds.out)) ||
593	    (ret = get_fd_set(n, exp, fds.ex)))
594		goto out;
595	zero_fd_set(n, fds.res_in);
596	zero_fd_set(n, fds.res_out);
597	zero_fd_set(n, fds.res_ex);
598
599	ret = do_select(n, &fds, end_time);
600
601	if (ret < 0)
602		goto out;
603	if (!ret) {
604		ret = -ERESTARTNOHAND;
605		if (signal_pending(current))
606			goto out;
607		ret = 0;
608	}
609
610	if (set_fd_set(n, inp, fds.res_in) ||
611	    set_fd_set(n, outp, fds.res_out) ||
612	    set_fd_set(n, exp, fds.res_ex))
613		ret = -EFAULT;
614
615out:
616	if (bits != stack_fds)
617		kfree(bits);
618out_nofds:
619	return ret;
620}
621
622SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
623		fd_set __user *, exp, struct timeval __user *, tvp)
624{
625	struct timespec end_time, *to = NULL;
626	struct timeval tv;
627	int ret;
628
629	if (tvp) {
630		if (copy_from_user(&tv, tvp, sizeof(tv)))
631			return -EFAULT;
632
633		to = &end_time;
634		if (poll_select_set_timeout(to,
635				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
636				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
637			return -EINVAL;
638	}
639
640	ret = core_sys_select(n, inp, outp, exp, to);
641	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
642
643	return ret;
644}
645
646static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
647		       fd_set __user *exp, struct timespec __user *tsp,
648		       const sigset_t __user *sigmask, size_t sigsetsize)
649{
650	sigset_t ksigmask, sigsaved;
651	struct timespec ts, end_time, *to = NULL;
652	int ret;
653
654	if (tsp) {
655		if (copy_from_user(&ts, tsp, sizeof(ts)))
656			return -EFAULT;
657
658		to = &end_time;
659		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
660			return -EINVAL;
661	}
662
663	if (sigmask) {
664		/* XXX: Don't preclude handling different sized sigset_t's.  */
665		if (sigsetsize != sizeof(sigset_t))
666			return -EINVAL;
667		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
668			return -EFAULT;
669
670		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
671		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
672	}
673
674	ret = core_sys_select(n, inp, outp, exp, to);
675	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
676
677	if (ret == -ERESTARTNOHAND) {
678		/*
679		 * Don't restore the signal mask yet. Let do_signal() deliver
680		 * the signal on the way back to userspace, before the signal
681		 * mask is restored.
682		 */
683		if (sigmask) {
684			memcpy(&current->saved_sigmask, &sigsaved,
685					sizeof(sigsaved));
686			set_restore_sigmask();
687		}
688	} else if (sigmask)
689		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
690
691	return ret;
692}
693
694/*
695 * Most architectures can't handle 7-argument syscalls. So we provide a
696 * 6-argument version where the sixth argument is a pointer to a structure
697 * which has a pointer to the sigset_t itself followed by a size_t containing
698 * the sigset size.
699 */
700SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
701		fd_set __user *, exp, struct timespec __user *, tsp,
702		void __user *, sig)
703{
704	size_t sigsetsize = 0;
705	sigset_t __user *up = NULL;
706
707	if (sig) {
708		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
709		    || __get_user(up, (sigset_t __user * __user *)sig)
710		    || __get_user(sigsetsize,
711				(size_t __user *)(sig+sizeof(void *))))
712			return -EFAULT;
713	}
714
715	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
716}
717
718#ifdef __ARCH_WANT_SYS_OLD_SELECT
719struct sel_arg_struct {
720	unsigned long n;
721	fd_set __user *inp, *outp, *exp;
722	struct timeval __user *tvp;
723};
724
725SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
726{
727	struct sel_arg_struct a;
728
729	if (copy_from_user(&a, arg, sizeof(a)))
730		return -EFAULT;
731	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
732}
733#endif
734
735struct poll_list {
736	struct poll_list *next;
737	int len;
738	struct pollfd entries[0];
739};
740
741#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
742
743/*
744 * Fish for pollable events on the pollfd->fd file descriptor. We're only
745 * interested in events matching the pollfd->events mask, and the result
746 * matching that mask is both recorded in pollfd->revents and returned. The
747 * pwait poll_table will be used by the fd-provided poll handler for waiting,
748 * if pwait->_qproc is non-NULL.
749 */
750static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
751				     bool *can_busy_poll,
752				     unsigned int busy_flag)
753{
754	unsigned int mask;
755	int fd;
756
757	mask = 0;
758	fd = pollfd->fd;
759	if (fd >= 0) {
760		struct fd f = fdget(fd);
761		mask = POLLNVAL;
762		if (f.file) {
763			mask = DEFAULT_POLLMASK;
764			if (f.file->f_op->poll) {
765				pwait->_key = pollfd->events|POLLERR|POLLHUP;
766				pwait->_key |= busy_flag;
767				mask = f.file->f_op->poll(f.file, pwait);
768				if (mask & busy_flag)
769					*can_busy_poll = true;
770			}
771			/* Mask out unneeded events. */
772			mask &= pollfd->events | POLLERR | POLLHUP;
773			fdput(f);
774		}
775	}
776	pollfd->revents = mask;
777
778	return mask;
779}
780
781static int do_poll(unsigned int nfds,  struct poll_list *list,
782		   struct poll_wqueues *wait, struct timespec *end_time)
783{
784	poll_table* pt = &wait->pt;
785	ktime_t expire, *to = NULL;
786	int timed_out = 0, count = 0;
787	unsigned long slack = 0;
788	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
789	unsigned long busy_end = 0;
790
791	/* Optimise the no-wait case */
792	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
793		pt->_qproc = NULL;
794		timed_out = 1;
795	}
796
797	if (end_time && !timed_out)
798		slack = select_estimate_accuracy(end_time);
799
800	for (;;) {
801		struct poll_list *walk;
802		bool can_busy_loop = false;
803
804		for (walk = list; walk != NULL; walk = walk->next) {
805			struct pollfd * pfd, * pfd_end;
806
807			pfd = walk->entries;
808			pfd_end = pfd + walk->len;
809			for (; pfd != pfd_end; pfd++) {
810				/*
811				 * Fish for events. If we found one, record it
812				 * and kill poll_table->_qproc, so we don't
813				 * needlessly register any other waiters after
814				 * this. They'll get immediately deregistered
815				 * when we break out and return.
816				 */
817				if (do_pollfd(pfd, pt, &can_busy_loop,
818					      busy_flag)) {
819					count++;
820					pt->_qproc = NULL;
821					/* found something, stop busy polling */
822					busy_flag = 0;
823					can_busy_loop = false;
824				}
825			}
826		}
827		/*
828		 * All waiters have already been registered, so don't provide
829		 * a poll_table->_qproc to them on the next loop iteration.
830		 */
831		pt->_qproc = NULL;
832		if (!count) {
833			count = wait->error;
834			if (signal_pending(current))
835				count = -EINTR;
836		}
837		if (count || timed_out)
838			break;
839
840		/* only if found POLL_BUSY_LOOP sockets && not out of time */
841		if (can_busy_loop && !need_resched()) {
842			if (!busy_end) {
843				busy_end = busy_loop_end_time();
844				continue;
845			}
846			if (!busy_loop_timeout(busy_end))
847				continue;
848		}
849		busy_flag = 0;
850
851		/*
852		 * If this is the first loop and we have a timeout
853		 * given, then we convert to ktime_t and set the to
854		 * pointer to the expiry value.
855		 */
856		if (end_time && !to) {
857			expire = timespec_to_ktime(*end_time);
858			to = &expire;
859		}
860
861		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
862			timed_out = 1;
863	}
864	return count;
865}
866
867#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
868			sizeof(struct pollfd))
869
870int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
871		struct timespec *end_time)
872{
873	struct poll_wqueues table;
874 	int err = -EFAULT, fdcount, len, size;
875	/* Allocate small arguments on the stack to save memory and be
876	   faster - use long to make sure the buffer is aligned properly
877	   on 64 bit archs to avoid unaligned access */
878	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
879	struct poll_list *const head = (struct poll_list *)stack_pps;
880 	struct poll_list *walk = head;
881 	unsigned long todo = nfds;
882
883	if (nfds > rlimit(RLIMIT_NOFILE))
884		return -EINVAL;
885
886	len = min_t(unsigned int, nfds, N_STACK_PPS);
887	for (;;) {
888		walk->next = NULL;
889		walk->len = len;
890		if (!len)
891			break;
892
893		if (copy_from_user(walk->entries, ufds + nfds-todo,
894					sizeof(struct pollfd) * walk->len))
895			goto out_fds;
896
897		todo -= walk->len;
898		if (!todo)
899			break;
900
901		len = min(todo, POLLFD_PER_PAGE);
902		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
903		walk = walk->next = kmalloc(size, GFP_KERNEL);
904		if (!walk) {
905			err = -ENOMEM;
906			goto out_fds;
907		}
908	}
909
910	poll_initwait(&table);
911	fdcount = do_poll(nfds, head, &table, end_time);
912	poll_freewait(&table);
913
914	for (walk = head; walk; walk = walk->next) {
915		struct pollfd *fds = walk->entries;
916		int j;
917
918		for (j = 0; j < walk->len; j++, ufds++)
919			if (__put_user(fds[j].revents, &ufds->revents))
920				goto out_fds;
921  	}
922
923	err = fdcount;
924out_fds:
925	walk = head->next;
926	while (walk) {
927		struct poll_list *pos = walk;
928		walk = walk->next;
929		kfree(pos);
930	}
931
932	return err;
933}
934
935static long do_restart_poll(struct restart_block *restart_block)
936{
937	struct pollfd __user *ufds = restart_block->poll.ufds;
938	int nfds = restart_block->poll.nfds;
939	struct timespec *to = NULL, end_time;
940	int ret;
941
942	if (restart_block->poll.has_timeout) {
943		end_time.tv_sec = restart_block->poll.tv_sec;
944		end_time.tv_nsec = restart_block->poll.tv_nsec;
945		to = &end_time;
946	}
947
948	ret = do_sys_poll(ufds, nfds, to);
949
950	if (ret == -EINTR) {
951		restart_block->fn = do_restart_poll;
952		ret = -ERESTART_RESTARTBLOCK;
953	}
954	return ret;
955}
956
957SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
958		int, timeout_msecs)
959{
960	struct timespec end_time, *to = NULL;
961	int ret;
962
963	if (timeout_msecs >= 0) {
964		to = &end_time;
965		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
966			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
967	}
968
969	ret = do_sys_poll(ufds, nfds, to);
970
971	if (ret == -EINTR) {
972		struct restart_block *restart_block;
973
974		restart_block = &current_thread_info()->restart_block;
975		restart_block->fn = do_restart_poll;
976		restart_block->poll.ufds = ufds;
977		restart_block->poll.nfds = nfds;
978
979		if (timeout_msecs >= 0) {
980			restart_block->poll.tv_sec = end_time.tv_sec;
981			restart_block->poll.tv_nsec = end_time.tv_nsec;
982			restart_block->poll.has_timeout = 1;
983		} else
984			restart_block->poll.has_timeout = 0;
985
986		ret = -ERESTART_RESTARTBLOCK;
987	}
988	return ret;
989}
990
991SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
992		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
993		size_t, sigsetsize)
994{
995	sigset_t ksigmask, sigsaved;
996	struct timespec ts, end_time, *to = NULL;
997	int ret;
998
999	if (tsp) {
1000		if (copy_from_user(&ts, tsp, sizeof(ts)))
1001			return -EFAULT;
1002
1003		to = &end_time;
1004		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1005			return -EINVAL;
1006	}
1007
1008	if (sigmask) {
1009		/* XXX: Don't preclude handling different sized sigset_t's.  */
1010		if (sigsetsize != sizeof(sigset_t))
1011			return -EINVAL;
1012		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1013			return -EFAULT;
1014
1015		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1016		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1017	}
1018
1019	ret = do_sys_poll(ufds, nfds, to);
1020
1021	/* We can restart this syscall, usually */
1022	if (ret == -EINTR) {
1023		/*
1024		 * Don't restore the signal mask yet. Let do_signal() deliver
1025		 * the signal on the way back to userspace, before the signal
1026		 * mask is restored.
1027		 */
1028		if (sigmask) {
1029			memcpy(&current->saved_sigmask, &sigsaved,
1030					sizeof(sigsaved));
1031			set_restore_sigmask();
1032		}
1033		ret = -ERESTARTNOHAND;
1034	} else if (sigmask)
1035		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1036
1037	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1038
1039	return ret;
1040}
1041