1/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9#include "includes.h"
10#include <assert.h>
11
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
17#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22#define CONFIG_ELOOP_SELECT
23#endif
24
25#ifdef CONFIG_ELOOP_POLL
26#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */
28
29#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */
32
33struct eloop_sock {
34	int sock;
35	void *eloop_data;
36	void *user_data;
37	eloop_sock_handler handler;
38	WPA_TRACE_REF(eloop);
39	WPA_TRACE_REF(user);
40	WPA_TRACE_INFO
41};
42
43struct eloop_timeout {
44	struct dl_list list;
45	struct os_reltime time;
46	void *eloop_data;
47	void *user_data;
48	eloop_timeout_handler handler;
49	WPA_TRACE_REF(eloop);
50	WPA_TRACE_REF(user);
51	WPA_TRACE_INFO
52};
53
54struct eloop_signal {
55	int sig;
56	void *user_data;
57	eloop_signal_handler handler;
58	int signaled;
59};
60
61struct eloop_sock_table {
62	int count;
63	struct eloop_sock *table;
64#ifdef CONFIG_ELOOP_EPOLL
65	eloop_event_type type;
66#else /* CONFIG_ELOOP_EPOLL */
67	int changed;
68#endif /* CONFIG_ELOOP_EPOLL */
69};
70
71struct eloop_data {
72	int max_sock;
73
74	int count; /* sum of all table counts */
75#ifdef CONFIG_ELOOP_POLL
76	int max_pollfd_map; /* number of pollfds_map currently allocated */
77	int max_poll_fds; /* number of pollfds currently allocated */
78	struct pollfd *pollfds;
79	struct pollfd **pollfds_map;
80#endif /* CONFIG_ELOOP_POLL */
81#ifdef CONFIG_ELOOP_EPOLL
82	int epollfd;
83	int epoll_max_event_num;
84	int epoll_max_fd;
85	struct eloop_sock *epoll_table;
86	struct epoll_event *epoll_events;
87#endif /* CONFIG_ELOOP_EPOLL */
88	struct eloop_sock_table readers;
89	struct eloop_sock_table writers;
90	struct eloop_sock_table exceptions;
91
92	struct dl_list timeout;
93
94	int signal_count;
95	struct eloop_signal *signals;
96	int signaled;
97	int pending_terminate;
98
99	int terminate;
100};
101
102static struct eloop_data eloop;
103
104
105#ifdef WPA_TRACE
106
107static void eloop_sigsegv_handler(int sig)
108{
109	wpa_trace_show("eloop SIGSEGV");
110	abort();
111}
112
113static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
114{
115	int i;
116	if (table == NULL || table->table == NULL)
117		return;
118	for (i = 0; i < table->count; i++) {
119		wpa_trace_add_ref(&table->table[i], eloop,
120				  table->table[i].eloop_data);
121		wpa_trace_add_ref(&table->table[i], user,
122				  table->table[i].user_data);
123	}
124}
125
126
127static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
128{
129	int i;
130	if (table == NULL || table->table == NULL)
131		return;
132	for (i = 0; i < table->count; i++) {
133		wpa_trace_remove_ref(&table->table[i], eloop,
134				     table->table[i].eloop_data);
135		wpa_trace_remove_ref(&table->table[i], user,
136				     table->table[i].user_data);
137	}
138}
139
140#else /* WPA_TRACE */
141
142#define eloop_trace_sock_add_ref(table) do { } while (0)
143#define eloop_trace_sock_remove_ref(table) do { } while (0)
144
145#endif /* WPA_TRACE */
146
147
148int eloop_init(void)
149{
150	os_memset(&eloop, 0, sizeof(eloop));
151	dl_list_init(&eloop.timeout);
152#ifdef CONFIG_ELOOP_EPOLL
153	eloop.epollfd = epoll_create1(0);
154	if (eloop.epollfd < 0) {
155		wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
156			   __func__, strerror(errno));
157		return -1;
158	}
159	eloop.readers.type = EVENT_TYPE_READ;
160	eloop.writers.type = EVENT_TYPE_WRITE;
161	eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
162#endif /* CONFIG_ELOOP_EPOLL */
163#ifdef WPA_TRACE
164	signal(SIGSEGV, eloop_sigsegv_handler);
165#endif /* WPA_TRACE */
166	return 0;
167}
168
169
170static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
171                                     int sock, eloop_sock_handler handler,
172                                     void *eloop_data, void *user_data)
173{
174#ifdef CONFIG_ELOOP_EPOLL
175	struct eloop_sock *temp_table;
176	struct epoll_event ev, *temp_events;
177	int next;
178#endif /* CONFIG_ELOOP_EPOLL */
179	struct eloop_sock *tmp;
180	int new_max_sock;
181
182	if (sock > eloop.max_sock)
183		new_max_sock = sock;
184	else
185		new_max_sock = eloop.max_sock;
186
187	if (table == NULL)
188		return -1;
189
190#ifdef CONFIG_ELOOP_POLL
191	if (new_max_sock >= eloop.max_pollfd_map) {
192		struct pollfd **nmap;
193		nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
194					sizeof(struct pollfd *));
195		if (nmap == NULL)
196			return -1;
197
198		eloop.max_pollfd_map = new_max_sock + 50;
199		eloop.pollfds_map = nmap;
200	}
201
202	if (eloop.count + 1 > eloop.max_poll_fds) {
203		struct pollfd *n;
204		int nmax = eloop.count + 1 + 50;
205		n = os_realloc_array(eloop.pollfds, nmax,
206				     sizeof(struct pollfd));
207		if (n == NULL)
208			return -1;
209
210		eloop.max_poll_fds = nmax;
211		eloop.pollfds = n;
212	}
213#endif /* CONFIG_ELOOP_POLL */
214#ifdef CONFIG_ELOOP_EPOLL
215	if (new_max_sock >= eloop.epoll_max_fd) {
216		next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
217		temp_table = os_realloc_array(eloop.epoll_table, next,
218					      sizeof(struct eloop_sock));
219		if (temp_table == NULL)
220			return -1;
221
222		eloop.epoll_max_fd = next;
223		eloop.epoll_table = temp_table;
224	}
225
226	if (eloop.count + 1 > eloop.epoll_max_event_num) {
227		next = eloop.epoll_max_event_num == 0 ? 8 :
228			eloop.epoll_max_event_num * 2;
229		temp_events = os_realloc_array(eloop.epoll_events, next,
230					       sizeof(struct epoll_event));
231		if (temp_events == NULL) {
232			wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
233				   "%s\n", __func__, strerror(errno));
234			return -1;
235		}
236
237		eloop.epoll_max_event_num = next;
238		eloop.epoll_events = temp_events;
239	}
240#endif /* CONFIG_ELOOP_EPOLL */
241
242	eloop_trace_sock_remove_ref(table);
243	tmp = os_realloc_array(table->table, table->count + 1,
244			       sizeof(struct eloop_sock));
245	if (tmp == NULL)
246		return -1;
247
248	tmp[table->count].sock = sock;
249	tmp[table->count].eloop_data = eloop_data;
250	tmp[table->count].user_data = user_data;
251	tmp[table->count].handler = handler;
252	wpa_trace_record(&tmp[table->count]);
253	table->count++;
254	table->table = tmp;
255	eloop.max_sock = new_max_sock;
256	eloop.count++;
257#ifndef CONFIG_ELOOP_EPOLL
258	table->changed = 1;
259#endif /* CONFIG_ELOOP_EPOLL */
260	eloop_trace_sock_add_ref(table);
261
262#ifdef CONFIG_ELOOP_EPOLL
263	os_memset(&ev, 0, sizeof(ev));
264	switch (table->type) {
265	case EVENT_TYPE_READ:
266		ev.events = EPOLLIN;
267		break;
268	case EVENT_TYPE_WRITE:
269		ev.events = EPOLLOUT;
270		break;
271	/*
272	 * Exceptions are always checked when using epoll, but I suppose it's
273	 * possible that someone registered a socket *only* for exception
274	 * handling.
275	 */
276	case EVENT_TYPE_EXCEPTION:
277		ev.events = EPOLLERR | EPOLLHUP;
278		break;
279	}
280	ev.data.fd = sock;
281	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
282		wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
283			   "failed. %s\n", __func__, sock, strerror(errno));
284		return -1;
285	}
286	os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
287		  sizeof(struct eloop_sock));
288#endif /* CONFIG_ELOOP_EPOLL */
289	return 0;
290}
291
292
293static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
294                                         int sock)
295{
296	int i;
297
298	if (table == NULL || table->table == NULL || table->count == 0)
299		return;
300
301	for (i = 0; i < table->count; i++) {
302		if (table->table[i].sock == sock)
303			break;
304	}
305	if (i == table->count)
306		return;
307	eloop_trace_sock_remove_ref(table);
308	if (i != table->count - 1) {
309		os_memmove(&table->table[i], &table->table[i + 1],
310			   (table->count - i - 1) *
311			   sizeof(struct eloop_sock));
312	}
313	table->count--;
314	eloop.count--;
315#ifndef CONFIG_ELOOP_EPOLL
316	table->changed = 1;
317#endif /* CONFIG_ELOOP_EPOLL */
318	eloop_trace_sock_add_ref(table);
319#ifdef CONFIG_ELOOP_EPOLL
320	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
321		wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
322			   "failed. %s\n", __func__, sock, strerror(errno));
323		return;
324	}
325	os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
326#endif /* CONFIG_ELOOP_EPOLL */
327}
328
329
330#ifdef CONFIG_ELOOP_POLL
331
332static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
333{
334	if (fd < mx && fd >= 0)
335		return pollfds_map[fd];
336	return NULL;
337}
338
339
340static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
341				    struct eloop_sock_table *writers,
342				    struct eloop_sock_table *exceptions,
343				    struct pollfd *pollfds,
344				    struct pollfd **pollfds_map,
345				    int max_pollfd_map)
346{
347	int i;
348	int nxt = 0;
349	int fd;
350	struct pollfd *pfd;
351
352	/* Clear pollfd lookup map. It will be re-populated below. */
353	os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
354
355	if (readers && readers->table) {
356		for (i = 0; i < readers->count; i++) {
357			fd = readers->table[i].sock;
358			assert(fd >= 0 && fd < max_pollfd_map);
359			pollfds[nxt].fd = fd;
360			pollfds[nxt].events = POLLIN;
361			pollfds[nxt].revents = 0;
362			pollfds_map[fd] = &(pollfds[nxt]);
363			nxt++;
364		}
365	}
366
367	if (writers && writers->table) {
368		for (i = 0; i < writers->count; i++) {
369			/*
370			 * See if we already added this descriptor, update it
371			 * if so.
372			 */
373			fd = writers->table[i].sock;
374			assert(fd >= 0 && fd < max_pollfd_map);
375			pfd = pollfds_map[fd];
376			if (!pfd) {
377				pfd = &(pollfds[nxt]);
378				pfd->events = 0;
379				pfd->fd = fd;
380				pollfds[i].revents = 0;
381				pollfds_map[fd] = pfd;
382				nxt++;
383			}
384			pfd->events |= POLLOUT;
385		}
386	}
387
388	/*
389	 * Exceptions are always checked when using poll, but I suppose it's
390	 * possible that someone registered a socket *only* for exception
391	 * handling. Set the POLLIN bit in this case.
392	 */
393	if (exceptions && exceptions->table) {
394		for (i = 0; i < exceptions->count; i++) {
395			/*
396			 * See if we already added this descriptor, just use it
397			 * if so.
398			 */
399			fd = exceptions->table[i].sock;
400			assert(fd >= 0 && fd < max_pollfd_map);
401			pfd = pollfds_map[fd];
402			if (!pfd) {
403				pfd = &(pollfds[nxt]);
404				pfd->events = POLLIN;
405				pfd->fd = fd;
406				pollfds[i].revents = 0;
407				pollfds_map[fd] = pfd;
408				nxt++;
409			}
410		}
411	}
412
413	return nxt;
414}
415
416
417static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
418					   struct pollfd **pollfds_map,
419					   int max_pollfd_map,
420					   short int revents)
421{
422	int i;
423	struct pollfd *pfd;
424
425	if (!table || !table->table)
426		return 0;
427
428	table->changed = 0;
429	for (i = 0; i < table->count; i++) {
430		pfd = find_pollfd(pollfds_map, table->table[i].sock,
431				  max_pollfd_map);
432		if (!pfd)
433			continue;
434
435		if (!(pfd->revents & revents))
436			continue;
437
438		table->table[i].handler(table->table[i].sock,
439					table->table[i].eloop_data,
440					table->table[i].user_data);
441		if (table->changed)
442			return 1;
443	}
444
445	return 0;
446}
447
448
449static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
450				      struct eloop_sock_table *writers,
451				      struct eloop_sock_table *exceptions,
452				      struct pollfd **pollfds_map,
453				      int max_pollfd_map)
454{
455	if (eloop_sock_table_dispatch_table(readers, pollfds_map,
456					    max_pollfd_map, POLLIN | POLLERR |
457					    POLLHUP))
458		return; /* pollfds may be invalid at this point */
459
460	if (eloop_sock_table_dispatch_table(writers, pollfds_map,
461					    max_pollfd_map, POLLOUT))
462		return; /* pollfds may be invalid at this point */
463
464	eloop_sock_table_dispatch_table(exceptions, pollfds_map,
465					max_pollfd_map, POLLERR | POLLHUP);
466}
467
468#endif /* CONFIG_ELOOP_POLL */
469
470#ifdef CONFIG_ELOOP_SELECT
471
472static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
473				     fd_set *fds)
474{
475	int i;
476
477	FD_ZERO(fds);
478
479	if (table->table == NULL)
480		return;
481
482	for (i = 0; i < table->count; i++) {
483		assert(table->table[i].sock >= 0);
484		FD_SET(table->table[i].sock, fds);
485	}
486}
487
488
489static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
490				      fd_set *fds)
491{
492	int i;
493
494	if (table == NULL || table->table == NULL)
495		return;
496
497	table->changed = 0;
498	for (i = 0; i < table->count; i++) {
499		if (FD_ISSET(table->table[i].sock, fds)) {
500			table->table[i].handler(table->table[i].sock,
501						table->table[i].eloop_data,
502						table->table[i].user_data);
503			if (table->changed)
504				break;
505		}
506	}
507}
508
509#endif /* CONFIG_ELOOP_SELECT */
510
511
512#ifdef CONFIG_ELOOP_EPOLL
513static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
514{
515	struct eloop_sock *table;
516	int i;
517
518	for (i = 0; i < nfds; i++) {
519		table = &eloop.epoll_table[events[i].data.fd];
520		if (table->handler == NULL)
521			continue;
522		table->handler(table->sock, table->eloop_data,
523			       table->user_data);
524	}
525}
526#endif /* CONFIG_ELOOP_EPOLL */
527
528
529static void eloop_sock_table_destroy(struct eloop_sock_table *table)
530{
531	if (table) {
532		int i;
533		for (i = 0; i < table->count && table->table; i++) {
534			wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
535				   "sock=%d eloop_data=%p user_data=%p "
536				   "handler=%p",
537				   table->table[i].sock,
538				   table->table[i].eloop_data,
539				   table->table[i].user_data,
540				   table->table[i].handler);
541			wpa_trace_dump_funcname("eloop unregistered socket "
542						"handler",
543						table->table[i].handler);
544			wpa_trace_dump("eloop sock", &table->table[i]);
545		}
546		os_free(table->table);
547	}
548}
549
550
551int eloop_register_read_sock(int sock, eloop_sock_handler handler,
552			     void *eloop_data, void *user_data)
553{
554	return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
555				   eloop_data, user_data);
556}
557
558
559void eloop_unregister_read_sock(int sock)
560{
561	eloop_unregister_sock(sock, EVENT_TYPE_READ);
562}
563
564
565static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
566{
567	switch (type) {
568	case EVENT_TYPE_READ:
569		return &eloop.readers;
570	case EVENT_TYPE_WRITE:
571		return &eloop.writers;
572	case EVENT_TYPE_EXCEPTION:
573		return &eloop.exceptions;
574	}
575
576	return NULL;
577}
578
579
580int eloop_register_sock(int sock, eloop_event_type type,
581			eloop_sock_handler handler,
582			void *eloop_data, void *user_data)
583{
584	struct eloop_sock_table *table;
585
586	assert(sock >= 0);
587	table = eloop_get_sock_table(type);
588	return eloop_sock_table_add_sock(table, sock, handler,
589					 eloop_data, user_data);
590}
591
592
593void eloop_unregister_sock(int sock, eloop_event_type type)
594{
595	struct eloop_sock_table *table;
596
597	table = eloop_get_sock_table(type);
598	eloop_sock_table_remove_sock(table, sock);
599}
600
601
602int eloop_register_timeout(unsigned int secs, unsigned int usecs,
603			   eloop_timeout_handler handler,
604			   void *eloop_data, void *user_data)
605{
606	struct eloop_timeout *timeout, *tmp;
607	os_time_t now_sec;
608
609	timeout = os_zalloc(sizeof(*timeout));
610	if (timeout == NULL)
611		return -1;
612	if (os_get_reltime(&timeout->time) < 0) {
613		os_free(timeout);
614		return -1;
615	}
616	now_sec = timeout->time.sec;
617	timeout->time.sec += secs;
618	if (timeout->time.sec < now_sec) {
619		/*
620		 * Integer overflow - assume long enough timeout to be assumed
621		 * to be infinite, i.e., the timeout would never happen.
622		 */
623		wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
624			   "ever happen - ignore it", secs);
625		os_free(timeout);
626		return 0;
627	}
628	timeout->time.usec += usecs;
629	while (timeout->time.usec >= 1000000) {
630		timeout->time.sec++;
631		timeout->time.usec -= 1000000;
632	}
633	timeout->eloop_data = eloop_data;
634	timeout->user_data = user_data;
635	timeout->handler = handler;
636	wpa_trace_add_ref(timeout, eloop, eloop_data);
637	wpa_trace_add_ref(timeout, user, user_data);
638	wpa_trace_record(timeout);
639
640	/* Maintain timeouts in order of increasing time */
641	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
642		if (os_reltime_before(&timeout->time, &tmp->time)) {
643			dl_list_add(tmp->list.prev, &timeout->list);
644			return 0;
645		}
646	}
647	dl_list_add_tail(&eloop.timeout, &timeout->list);
648
649	return 0;
650}
651
652
653static void eloop_remove_timeout(struct eloop_timeout *timeout)
654{
655	dl_list_del(&timeout->list);
656	wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
657	wpa_trace_remove_ref(timeout, user, timeout->user_data);
658	os_free(timeout);
659}
660
661
662int eloop_cancel_timeout(eloop_timeout_handler handler,
663			 void *eloop_data, void *user_data)
664{
665	struct eloop_timeout *timeout, *prev;
666	int removed = 0;
667
668	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
669			      struct eloop_timeout, list) {
670		if (timeout->handler == handler &&
671		    (timeout->eloop_data == eloop_data ||
672		     eloop_data == ELOOP_ALL_CTX) &&
673		    (timeout->user_data == user_data ||
674		     user_data == ELOOP_ALL_CTX)) {
675			eloop_remove_timeout(timeout);
676			removed++;
677		}
678	}
679
680	return removed;
681}
682
683
684int eloop_cancel_timeout_one(eloop_timeout_handler handler,
685			     void *eloop_data, void *user_data,
686			     struct os_reltime *remaining)
687{
688	struct eloop_timeout *timeout, *prev;
689	int removed = 0;
690	struct os_reltime now;
691
692	os_get_reltime(&now);
693	remaining->sec = remaining->usec = 0;
694
695	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
696			      struct eloop_timeout, list) {
697		if (timeout->handler == handler &&
698		    (timeout->eloop_data == eloop_data) &&
699		    (timeout->user_data == user_data)) {
700			removed = 1;
701			if (os_reltime_before(&now, &timeout->time))
702				os_reltime_sub(&timeout->time, &now, remaining);
703			eloop_remove_timeout(timeout);
704			break;
705		}
706	}
707	return removed;
708}
709
710
711int eloop_is_timeout_registered(eloop_timeout_handler handler,
712				void *eloop_data, void *user_data)
713{
714	struct eloop_timeout *tmp;
715
716	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
717		if (tmp->handler == handler &&
718		    tmp->eloop_data == eloop_data &&
719		    tmp->user_data == user_data)
720			return 1;
721	}
722
723	return 0;
724}
725
726
727int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
728			  eloop_timeout_handler handler, void *eloop_data,
729			  void *user_data)
730{
731	struct os_reltime now, requested, remaining;
732	struct eloop_timeout *tmp;
733
734	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
735		if (tmp->handler == handler &&
736		    tmp->eloop_data == eloop_data &&
737		    tmp->user_data == user_data) {
738			requested.sec = req_secs;
739			requested.usec = req_usecs;
740			os_get_reltime(&now);
741			os_reltime_sub(&tmp->time, &now, &remaining);
742			if (os_reltime_before(&requested, &remaining)) {
743				eloop_cancel_timeout(handler, eloop_data,
744						     user_data);
745				eloop_register_timeout(requested.sec,
746						       requested.usec,
747						       handler, eloop_data,
748						       user_data);
749				return 1;
750			}
751			return 0;
752		}
753	}
754
755	return -1;
756}
757
758
759int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
760			    eloop_timeout_handler handler, void *eloop_data,
761			    void *user_data)
762{
763	struct os_reltime now, requested, remaining;
764	struct eloop_timeout *tmp;
765
766	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
767		if (tmp->handler == handler &&
768		    tmp->eloop_data == eloop_data &&
769		    tmp->user_data == user_data) {
770			requested.sec = req_secs;
771			requested.usec = req_usecs;
772			os_get_reltime(&now);
773			os_reltime_sub(&tmp->time, &now, &remaining);
774			if (os_reltime_before(&remaining, &requested)) {
775				eloop_cancel_timeout(handler, eloop_data,
776						     user_data);
777				eloop_register_timeout(requested.sec,
778						       requested.usec,
779						       handler, eloop_data,
780						       user_data);
781				return 1;
782			}
783			return 0;
784		}
785	}
786
787	return -1;
788}
789
790
791#ifndef CONFIG_NATIVE_WINDOWS
792static void eloop_handle_alarm(int sig)
793{
794	wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
795		   "two seconds. Looks like there\n"
796		   "is a bug that ends up in a busy loop that "
797		   "prevents clean shutdown.\n"
798		   "Killing program forcefully.\n");
799	exit(1);
800}
801#endif /* CONFIG_NATIVE_WINDOWS */
802
803
804static void eloop_handle_signal(int sig)
805{
806	int i;
807
808#ifndef CONFIG_NATIVE_WINDOWS
809	if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
810		/* Use SIGALRM to break out from potential busy loops that
811		 * would not allow the program to be killed. */
812		eloop.pending_terminate = 1;
813		signal(SIGALRM, eloop_handle_alarm);
814		alarm(2);
815	}
816#endif /* CONFIG_NATIVE_WINDOWS */
817
818	eloop.signaled++;
819	for (i = 0; i < eloop.signal_count; i++) {
820		if (eloop.signals[i].sig == sig) {
821			eloop.signals[i].signaled++;
822			break;
823		}
824	}
825}
826
827
828static void eloop_process_pending_signals(void)
829{
830	int i;
831
832	if (eloop.signaled == 0)
833		return;
834	eloop.signaled = 0;
835
836	if (eloop.pending_terminate) {
837#ifndef CONFIG_NATIVE_WINDOWS
838		alarm(0);
839#endif /* CONFIG_NATIVE_WINDOWS */
840		eloop.pending_terminate = 0;
841	}
842
843	for (i = 0; i < eloop.signal_count; i++) {
844		if (eloop.signals[i].signaled) {
845			eloop.signals[i].signaled = 0;
846			eloop.signals[i].handler(eloop.signals[i].sig,
847						 eloop.signals[i].user_data);
848		}
849	}
850}
851
852
853int eloop_register_signal(int sig, eloop_signal_handler handler,
854			  void *user_data)
855{
856	struct eloop_signal *tmp;
857
858	tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
859			       sizeof(struct eloop_signal));
860	if (tmp == NULL)
861		return -1;
862
863	tmp[eloop.signal_count].sig = sig;
864	tmp[eloop.signal_count].user_data = user_data;
865	tmp[eloop.signal_count].handler = handler;
866	tmp[eloop.signal_count].signaled = 0;
867	eloop.signal_count++;
868	eloop.signals = tmp;
869	signal(sig, eloop_handle_signal);
870
871	return 0;
872}
873
874
875int eloop_register_signal_terminate(eloop_signal_handler handler,
876				    void *user_data)
877{
878	int ret = eloop_register_signal(SIGINT, handler, user_data);
879	if (ret == 0)
880		ret = eloop_register_signal(SIGTERM, handler, user_data);
881	return ret;
882}
883
884
885int eloop_register_signal_reconfig(eloop_signal_handler handler,
886				   void *user_data)
887{
888#ifdef CONFIG_NATIVE_WINDOWS
889	return 0;
890#else /* CONFIG_NATIVE_WINDOWS */
891	return eloop_register_signal(SIGHUP, handler, user_data);
892#endif /* CONFIG_NATIVE_WINDOWS */
893}
894
895
896void eloop_run(void)
897{
898#ifdef CONFIG_ELOOP_POLL
899	int num_poll_fds;
900	int timeout_ms = 0;
901#endif /* CONFIG_ELOOP_POLL */
902#ifdef CONFIG_ELOOP_SELECT
903	fd_set *rfds, *wfds, *efds;
904	struct timeval _tv;
905#endif /* CONFIG_ELOOP_SELECT */
906#ifdef CONFIG_ELOOP_EPOLL
907	int timeout_ms = -1;
908#endif /* CONFIG_ELOOP_EPOLL */
909	int res;
910	struct os_reltime tv, now;
911
912#ifdef CONFIG_ELOOP_SELECT
913	rfds = os_malloc(sizeof(*rfds));
914	wfds = os_malloc(sizeof(*wfds));
915	efds = os_malloc(sizeof(*efds));
916	if (rfds == NULL || wfds == NULL || efds == NULL)
917		goto out;
918#endif /* CONFIG_ELOOP_SELECT */
919
920	while (!eloop.terminate &&
921	       (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
922		eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
923		struct eloop_timeout *timeout;
924		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
925					list);
926		if (timeout) {
927			os_get_reltime(&now);
928			if (os_reltime_before(&now, &timeout->time))
929				os_reltime_sub(&timeout->time, &now, &tv);
930			else
931				tv.sec = tv.usec = 0;
932#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
933			timeout_ms = tv.sec * 1000 + tv.usec / 1000;
934#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
935#ifdef CONFIG_ELOOP_SELECT
936			_tv.tv_sec = tv.sec;
937			_tv.tv_usec = tv.usec;
938#endif /* CONFIG_ELOOP_SELECT */
939		}
940
941#ifdef CONFIG_ELOOP_POLL
942		num_poll_fds = eloop_sock_table_set_fds(
943			&eloop.readers, &eloop.writers, &eloop.exceptions,
944			eloop.pollfds, eloop.pollfds_map,
945			eloop.max_pollfd_map);
946		res = poll(eloop.pollfds, num_poll_fds,
947			   timeout ? timeout_ms : -1);
948#endif /* CONFIG_ELOOP_POLL */
949#ifdef CONFIG_ELOOP_SELECT
950		eloop_sock_table_set_fds(&eloop.readers, rfds);
951		eloop_sock_table_set_fds(&eloop.writers, wfds);
952		eloop_sock_table_set_fds(&eloop.exceptions, efds);
953		res = select(eloop.max_sock + 1, rfds, wfds, efds,
954			     timeout ? &_tv : NULL);
955#endif /* CONFIG_ELOOP_SELECT */
956#ifdef CONFIG_ELOOP_EPOLL
957		if (eloop.count == 0) {
958			res = 0;
959		} else {
960			res = epoll_wait(eloop.epollfd, eloop.epoll_events,
961					 eloop.count, timeout_ms);
962		}
963#endif /* CONFIG_ELOOP_EPOLL */
964		if (res < 0 && errno != EINTR && errno != 0) {
965			wpa_printf(MSG_ERROR, "eloop: %s: %s",
966#ifdef CONFIG_ELOOP_POLL
967				   "poll"
968#endif /* CONFIG_ELOOP_POLL */
969#ifdef CONFIG_ELOOP_SELECT
970				   "select"
971#endif /* CONFIG_ELOOP_SELECT */
972#ifdef CONFIG_ELOOP_EPOLL
973				   "epoll"
974#endif /* CONFIG_ELOOP_EPOLL */
975				   , strerror(errno));
976			goto out;
977		}
978		eloop_process_pending_signals();
979
980		/* check if some registered timeouts have occurred */
981		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
982					list);
983		if (timeout) {
984			os_get_reltime(&now);
985			if (!os_reltime_before(&now, &timeout->time)) {
986				void *eloop_data = timeout->eloop_data;
987				void *user_data = timeout->user_data;
988				eloop_timeout_handler handler =
989					timeout->handler;
990				eloop_remove_timeout(timeout);
991				handler(eloop_data, user_data);
992			}
993
994		}
995
996		if (res <= 0)
997			continue;
998
999#ifdef CONFIG_ELOOP_POLL
1000		eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1001					  &eloop.exceptions, eloop.pollfds_map,
1002					  eloop.max_pollfd_map);
1003#endif /* CONFIG_ELOOP_POLL */
1004#ifdef CONFIG_ELOOP_SELECT
1005		eloop_sock_table_dispatch(&eloop.readers, rfds);
1006		eloop_sock_table_dispatch(&eloop.writers, wfds);
1007		eloop_sock_table_dispatch(&eloop.exceptions, efds);
1008#endif /* CONFIG_ELOOP_SELECT */
1009#ifdef CONFIG_ELOOP_EPOLL
1010		eloop_sock_table_dispatch(eloop.epoll_events, res);
1011#endif /* CONFIG_ELOOP_EPOLL */
1012	}
1013
1014	eloop.terminate = 0;
1015out:
1016#ifdef CONFIG_ELOOP_SELECT
1017	os_free(rfds);
1018	os_free(wfds);
1019	os_free(efds);
1020#endif /* CONFIG_ELOOP_SELECT */
1021	return;
1022}
1023
1024
1025void eloop_terminate(void)
1026{
1027	eloop.terminate = 1;
1028}
1029
1030
1031void eloop_destroy(void)
1032{
1033	struct eloop_timeout *timeout, *prev;
1034	struct os_reltime now;
1035
1036	os_get_reltime(&now);
1037	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1038			      struct eloop_timeout, list) {
1039		int sec, usec;
1040		sec = timeout->time.sec - now.sec;
1041		usec = timeout->time.usec - now.usec;
1042		if (timeout->time.usec < now.usec) {
1043			sec--;
1044			usec += 1000000;
1045		}
1046		wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1047			   "eloop_data=%p user_data=%p handler=%p",
1048			   sec, usec, timeout->eloop_data, timeout->user_data,
1049			   timeout->handler);
1050		wpa_trace_dump_funcname("eloop unregistered timeout handler",
1051					timeout->handler);
1052		wpa_trace_dump("eloop timeout", timeout);
1053		eloop_remove_timeout(timeout);
1054	}
1055	eloop_sock_table_destroy(&eloop.readers);
1056	eloop_sock_table_destroy(&eloop.writers);
1057	eloop_sock_table_destroy(&eloop.exceptions);
1058	os_free(eloop.signals);
1059
1060#ifdef CONFIG_ELOOP_POLL
1061	os_free(eloop.pollfds);
1062	os_free(eloop.pollfds_map);
1063#endif /* CONFIG_ELOOP_POLL */
1064#ifdef CONFIG_ELOOP_EPOLL
1065	os_free(eloop.epoll_table);
1066	os_free(eloop.epoll_events);
1067	close(eloop.epollfd);
1068#endif /* CONFIG_ELOOP_EPOLL */
1069}
1070
1071
1072int eloop_terminated(void)
1073{
1074	return eloop.terminate;
1075}
1076
1077
1078void eloop_wait_for_read_sock(int sock)
1079{
1080#ifdef CONFIG_ELOOP_POLL
1081	struct pollfd pfd;
1082
1083	if (sock < 0)
1084		return;
1085
1086	os_memset(&pfd, 0, sizeof(pfd));
1087	pfd.fd = sock;
1088	pfd.events = POLLIN;
1089
1090	poll(&pfd, 1, -1);
1091#endif /* CONFIG_ELOOP_POLL */
1092#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1093	/*
1094	 * We can use epoll() here. But epoll() requres 4 system calls.
1095	 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1096	 * epoll fd. So select() is better for performance here.
1097	 */
1098	fd_set rfds;
1099
1100	if (sock < 0)
1101		return;
1102
1103	FD_ZERO(&rfds);
1104	FD_SET(sock, &rfds);
1105	select(sock + 1, &rfds, NULL, NULL, NULL);
1106#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1107}
1108
1109#ifdef CONFIG_ELOOP_SELECT
1110#undef CONFIG_ELOOP_SELECT
1111#endif /* CONFIG_ELOOP_SELECT */
1112