1/*
2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#ifdef WIN32
32#define WIN32_LEAN_AND_MEAN
33#include <windows.h>
34#undef WIN32_LEAN_AND_MEAN
35#endif
36#include <sys/types.h>
37#ifdef HAVE_SYS_TIME_H
38#include <sys/time.h>
39#else
40#include <sys/_libevent_time.h>
41#endif
42#include <sys/queue.h>
43#include <stdio.h>
44#include <stdlib.h>
45#ifndef WIN32
46#include <unistd.h>
47#endif
48#include <errno.h>
49#include <signal.h>
50#include <string.h>
51#include <assert.h>
52#include <time.h>
53
54#include "event.h"
55#include "event-internal.h"
56#include "evutil.h"
57#include "log.h"
58
59#ifdef HAVE_EVENT_PORTS
60extern const struct eventop evportops;
61#endif
62#ifdef HAVE_SELECT
63extern const struct eventop selectops;
64#endif
65#ifdef HAVE_POLL
66extern const struct eventop pollops;
67#endif
68#ifdef HAVE_EPOLL
69extern const struct eventop epollops;
70#endif
71#ifdef HAVE_WORKING_KQUEUE
72extern const struct eventop kqops;
73#endif
74#ifdef HAVE_DEVPOLL
75extern const struct eventop devpollops;
76#endif
77#ifdef WIN32
78extern const struct eventop win32ops;
79#endif
80
81/* In order of preference */
82static const struct eventop *eventops[] = {
83#ifdef HAVE_EVENT_PORTS
84	&evportops,
85#endif
86#ifdef HAVE_WORKING_KQUEUE
87	&kqops,
88#endif
89#ifdef HAVE_EPOLL
90	&epollops,
91#endif
92#ifdef HAVE_DEVPOLL
93	&devpollops,
94#endif
95#ifdef HAVE_POLL
96	&pollops,
97#endif
98#ifdef HAVE_SELECT
99	&selectops,
100#endif
101#ifdef WIN32
102	&win32ops,
103#endif
104	NULL
105};
106
107/* Global state */
108struct event_base *current_base = NULL;
109extern struct event_base *evsignal_base;
110static int use_monotonic = 1;
111
112/* Prototypes */
113static void	event_queue_insert(struct event_base *, struct event *, int);
114static void	event_queue_remove(struct event_base *, struct event *, int);
115static int	event_haveevents(struct event_base *);
116
117static void	event_process_active(struct event_base *);
118
119static int	timeout_next(struct event_base *, struct timeval **);
120static void	timeout_process(struct event_base *);
121static void	timeout_correct(struct event_base *, struct timeval *);
122
123static int
124gettime(struct event_base *base, struct timeval *tp)
125{
126	if (base->tv_cache.tv_sec) {
127		*tp = base->tv_cache;
128		return (0);
129	}
130
131#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
132	struct timespec	ts;
133
134	if (use_monotonic &&
135	    clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
136		tp->tv_sec = ts.tv_sec;
137		tp->tv_usec = ts.tv_nsec / 1000;
138		return (0);
139	}
140#endif
141
142	use_monotonic = 0;
143
144	return (evutil_gettimeofday(tp, NULL));
145}
146
147struct event_base *
148event_init(void)
149{
150	struct event_base *base = event_base_new();
151
152	if (base != NULL)
153		current_base = base;
154
155	return (base);
156}
157
158struct event_base *
159event_base_new(void)
160{
161	int i;
162	struct event_base *base;
163
164	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
165		event_err(1, "%s: calloc", __func__);
166
167	gettime(base, &base->event_tv);
168
169	min_heap_ctor(&base->timeheap);
170	TAILQ_INIT(&base->eventqueue);
171	base->sig.ev_signal_pair[0] = -1;
172	base->sig.ev_signal_pair[1] = -1;
173
174	base->evbase = NULL;
175	for (i = 0; eventops[i] && !base->evbase; i++) {
176		base->evsel = eventops[i];
177
178		base->evbase = base->evsel->init(base);
179	}
180
181	if (base->evbase == NULL)
182		event_errx(1, "%s: no event mechanism available", __func__);
183
184	if (evutil_getenv("EVENT_SHOW_METHOD"))
185		event_msgx("libevent using: %s\n",
186			   base->evsel->name);
187
188	/* allocate a single active event queue */
189	event_base_priority_init(base, 1);
190
191	return (base);
192}
193
194void
195event_base_free(struct event_base *base)
196{
197	int i, n_deleted=0;
198	struct event *ev;
199
200	if (base == NULL && current_base)
201		base = current_base;
202	if (base == current_base)
203		current_base = NULL;
204
205	/* XXX(niels) - check for internal events first */
206	assert(base);
207	/* Delete all non-internal events. */
208	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
209		struct event *next = TAILQ_NEXT(ev, ev_next);
210		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
211			event_del(ev);
212			++n_deleted;
213		}
214		ev = next;
215	}
216	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
217		event_del(ev);
218		++n_deleted;
219	}
220
221	for (i = 0; i < base->nactivequeues; ++i) {
222		for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
223			struct event *next = TAILQ_NEXT(ev, ev_active_next);
224			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
225				event_del(ev);
226				++n_deleted;
227			}
228			ev = next;
229		}
230	}
231
232	if (n_deleted)
233		event_debug(("%s: %d events were still set in base",
234			__func__, n_deleted));
235
236	if (base->evsel->dealloc != NULL)
237		base->evsel->dealloc(base, base->evbase);
238
239	for (i = 0; i < base->nactivequeues; ++i)
240		assert(TAILQ_EMPTY(base->activequeues[i]));
241
242	assert(min_heap_empty(&base->timeheap));
243	min_heap_dtor(&base->timeheap);
244
245	for (i = 0; i < base->nactivequeues; ++i)
246		free(base->activequeues[i]);
247	free(base->activequeues);
248
249	assert(TAILQ_EMPTY(&base->eventqueue));
250
251	free(base);
252}
253
254/* reinitialized the event base after a fork */
255int
256event_reinit(struct event_base *base)
257{
258	const struct eventop *evsel = base->evsel;
259	void *evbase = base->evbase;
260	int res = 0;
261	struct event *ev;
262
263	/* check if this event mechanism requires reinit */
264	if (!evsel->need_reinit)
265		return (0);
266
267	/* prevent internal delete */
268	if (base->sig.ev_signal_added) {
269		/* we cannot call event_del here because the base has
270		 * not been reinitialized yet. */
271		event_queue_remove(base, &base->sig.ev_signal,
272		    EVLIST_INSERTED);
273		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
274			event_queue_remove(base, &base->sig.ev_signal,
275			    EVLIST_ACTIVE);
276		base->sig.ev_signal_added = 0;
277	}
278
279	if (base->evsel->dealloc != NULL)
280		base->evsel->dealloc(base, base->evbase);
281	evbase = base->evbase = evsel->init(base);
282	if (base->evbase == NULL)
283		event_errx(1, "%s: could not reinitialize event mechanism",
284		    __func__);
285
286	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
287		if (evsel->add(evbase, ev) == -1)
288			res = -1;
289	}
290
291	return (res);
292}
293
294int
295event_priority_init(int npriorities)
296{
297  return event_base_priority_init(current_base, npriorities);
298}
299
300int
301event_base_priority_init(struct event_base *base, int npriorities)
302{
303	int i;
304
305	if (base->event_count_active)
306		return (-1);
307
308	if (base->nactivequeues && npriorities != base->nactivequeues) {
309		for (i = 0; i < base->nactivequeues; ++i) {
310			free(base->activequeues[i]);
311		}
312		free(base->activequeues);
313	}
314
315	/* Allocate our priority queues */
316	base->nactivequeues = npriorities;
317	base->activequeues = (struct event_list **)
318	    calloc(base->nactivequeues, sizeof(struct event_list *));
319	if (base->activequeues == NULL)
320		event_err(1, "%s: calloc", __func__);
321
322	for (i = 0; i < base->nactivequeues; ++i) {
323		base->activequeues[i] = malloc(sizeof(struct event_list));
324		if (base->activequeues[i] == NULL)
325			event_err(1, "%s: malloc", __func__);
326		TAILQ_INIT(base->activequeues[i]);
327	}
328
329	return (0);
330}
331
332int
333event_haveevents(struct event_base *base)
334{
335	return (base->event_count > 0);
336}
337
338/*
339 * Active events are stored in priority queues.  Lower priorities are always
340 * process before higher priorities.  Low priority events can starve high
341 * priority ones.
342 */
343
344static void
345event_process_active(struct event_base *base)
346{
347	struct event *ev;
348	struct event_list *activeq = NULL;
349	int i;
350	short ncalls;
351
352	for (i = 0; i < base->nactivequeues; ++i) {
353		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
354			activeq = base->activequeues[i];
355			break;
356		}
357	}
358
359	assert(activeq != NULL);
360
361	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
362		if (ev->ev_events & EV_PERSIST)
363			event_queue_remove(base, ev, EVLIST_ACTIVE);
364		else
365			event_del(ev);
366
367		/* Allows deletes to work */
368		ncalls = ev->ev_ncalls;
369		ev->ev_pncalls = &ncalls;
370		while (ncalls) {
371			ncalls--;
372			ev->ev_ncalls = ncalls;
373			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
374			if (base->event_break)
375				return;
376		}
377	}
378}
379
380/*
381 * Wait continously for events.  We exit only if no events are left.
382 */
383
384int
385event_dispatch(void)
386{
387	return (event_loop(0));
388}
389
390int
391event_base_dispatch(struct event_base *event_base)
392{
393  return (event_base_loop(event_base, 0));
394}
395
396const char *
397event_base_get_method(struct event_base *base)
398{
399	assert(base);
400	return (base->evsel->name);
401}
402
403static void
404event_loopexit_cb(int fd, short what, void *arg)
405{
406	struct event_base *base = arg;
407	base->event_gotterm = 1;
408}
409
410/* not thread safe */
411int
412event_loopexit(const struct timeval *tv)
413{
414	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
415		    current_base, tv));
416}
417
418int
419event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
420{
421	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
422		    event_base, tv));
423}
424
425/* not thread safe */
426int
427event_loopbreak(void)
428{
429	return (event_base_loopbreak(current_base));
430}
431
432int
433event_base_loopbreak(struct event_base *event_base)
434{
435	if (event_base == NULL)
436		return (-1);
437
438	event_base->event_break = 1;
439	return (0);
440}
441
442
443
444/* not thread safe */
445
446int
447event_loop(int flags)
448{
449	return event_base_loop(current_base, flags);
450}
451
452int
453event_base_loop(struct event_base *base, int flags)
454{
455	const struct eventop *evsel = base->evsel;
456	void *evbase = base->evbase;
457	struct timeval tv;
458	struct timeval *tv_p;
459	int res, done;
460
461	/* clear time cache */
462	base->tv_cache.tv_sec = 0;
463
464	if (base->sig.ev_signal_added)
465		evsignal_base = base;
466	done = 0;
467	while (!done) {
468		/* Terminate the loop if we have been asked to */
469		if (base->event_gotterm) {
470			base->event_gotterm = 0;
471			break;
472		}
473
474		if (base->event_break) {
475			base->event_break = 0;
476			break;
477		}
478
479		timeout_correct(base, &tv);
480
481		tv_p = &tv;
482		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
483			timeout_next(base, &tv_p);
484		} else {
485			/*
486			 * if we have active events, we just poll new events
487			 * without waiting.
488			 */
489			evutil_timerclear(&tv);
490		}
491
492		/* If we have no events, we just exit */
493		if (!event_haveevents(base)) {
494			event_debug(("%s: no events registered.", __func__));
495			return (1);
496		}
497
498		/* update last old time */
499		gettime(base, &base->event_tv);
500
501		/* clear time cache */
502		base->tv_cache.tv_sec = 0;
503
504		res = evsel->dispatch(base, evbase, tv_p);
505
506		if (res == -1)
507			return (-1);
508		gettime(base, &base->tv_cache);
509
510		timeout_process(base);
511
512		if (base->event_count_active) {
513			event_process_active(base);
514			if (!base->event_count_active && (flags & EVLOOP_ONCE))
515				done = 1;
516		} else if (flags & EVLOOP_NONBLOCK)
517			done = 1;
518	}
519
520	/* clear time cache */
521	base->tv_cache.tv_sec = 0;
522
523	event_debug(("%s: asked to terminate loop.", __func__));
524	return (0);
525}
526
527/* Sets up an event for processing once */
528
529struct event_once {
530	struct event ev;
531
532	void (*cb)(int, short, void *);
533	void *arg;
534};
535
536/* One-time callback, it deletes itself */
537
538static void
539event_once_cb(int fd, short events, void *arg)
540{
541	struct event_once *eonce = arg;
542
543	(*eonce->cb)(fd, events, eonce->arg);
544	free(eonce);
545}
546
547/* not threadsafe, event scheduled once. */
548int
549event_once(int fd, short events,
550    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
551{
552	return event_base_once(current_base, fd, events, callback, arg, tv);
553}
554
555/* Schedules an event once */
556int
557event_base_once(struct event_base *base, int fd, short events,
558    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
559{
560	struct event_once *eonce;
561	struct timeval etv;
562	int res;
563
564	/* We cannot support signals that just fire once */
565	if (events & EV_SIGNAL)
566		return (-1);
567
568	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
569		return (-1);
570
571	eonce->cb = callback;
572	eonce->arg = arg;
573
574	if (events == EV_TIMEOUT) {
575		if (tv == NULL) {
576			evutil_timerclear(&etv);
577			tv = &etv;
578		}
579
580		evtimer_set(&eonce->ev, event_once_cb, eonce);
581	} else if (events & (EV_READ|EV_WRITE)) {
582		events &= EV_READ|EV_WRITE;
583
584		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
585	} else {
586		/* Bad event combination */
587		free(eonce);
588		return (-1);
589	}
590
591	res = event_base_set(base, &eonce->ev);
592	if (res == 0)
593		res = event_add(&eonce->ev, tv);
594	if (res != 0) {
595		free(eonce);
596		return (res);
597	}
598
599	return (0);
600}
601
602void
603event_set(struct event *ev, int fd, short events,
604	  void (*callback)(int, short, void *), void *arg)
605{
606	/* Take the current base - caller needs to set the real base later */
607	ev->ev_base = current_base;
608
609	ev->ev_callback = callback;
610	ev->ev_arg = arg;
611	ev->ev_fd = fd;
612	ev->ev_events = events;
613	ev->ev_res = 0;
614	ev->ev_flags = EVLIST_INIT;
615	ev->ev_ncalls = 0;
616	ev->ev_pncalls = NULL;
617
618	min_heap_elem_init(ev);
619
620	/* by default, we put new events into the middle priority */
621	if(current_base)
622		ev->ev_pri = current_base->nactivequeues/2;
623}
624
625int
626event_base_set(struct event_base *base, struct event *ev)
627{
628	/* Only innocent events may be assigned to a different base */
629	if (ev->ev_flags != EVLIST_INIT)
630		return (-1);
631
632	ev->ev_base = base;
633	ev->ev_pri = base->nactivequeues/2;
634
635	return (0);
636}
637
638/*
639 * Set's the priority of an event - if an event is already scheduled
640 * changing the priority is going to fail.
641 */
642
643int
644event_priority_set(struct event *ev, int pri)
645{
646	if (ev->ev_flags & EVLIST_ACTIVE)
647		return (-1);
648	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
649		return (-1);
650
651	ev->ev_pri = pri;
652
653	return (0);
654}
655
656/*
657 * Checks if a specific event is pending or scheduled.
658 */
659
660int
661event_pending(struct event *ev, short event, struct timeval *tv)
662{
663	struct timeval	now, res;
664	int flags = 0;
665
666	if (ev->ev_flags & EVLIST_INSERTED)
667		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
668	if (ev->ev_flags & EVLIST_ACTIVE)
669		flags |= ev->ev_res;
670	if (ev->ev_flags & EVLIST_TIMEOUT)
671		flags |= EV_TIMEOUT;
672
673	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
674
675	/* See if there is a timeout that we should report */
676	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
677		gettime(ev->ev_base, &now);
678		evutil_timersub(&ev->ev_timeout, &now, &res);
679		/* correctly remap to real time */
680		evutil_gettimeofday(&now, NULL);
681		evutil_timeradd(&now, &res, tv);
682	}
683
684	return (flags & event);
685}
686
687int
688event_add(struct event *ev, const struct timeval *tv)
689{
690	struct event_base *base = ev->ev_base;
691	const struct eventop *evsel = base->evsel;
692	void *evbase = base->evbase;
693	int res = 0;
694
695	event_debug((
696		 "event_add: event: %p, %s%s%scall %p",
697		 ev,
698		 ev->ev_events & EV_READ ? "EV_READ " : " ",
699		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
700		 tv ? "EV_TIMEOUT " : " ",
701		 ev->ev_callback));
702
703	assert(!(ev->ev_flags & ~EVLIST_ALL));
704
705	/*
706	 * prepare for timeout insertion further below, if we get a
707	 * failure on any step, we should not change any state.
708	 */
709	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
710		if (min_heap_reserve(&base->timeheap,
711			1 + min_heap_size(&base->timeheap)) == -1)
712			return (-1);  /* ENOMEM == errno */
713	}
714
715	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
716	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
717		res = evsel->add(evbase, ev);
718		if (res != -1)
719			event_queue_insert(base, ev, EVLIST_INSERTED);
720	}
721
722	/*
723	 * we should change the timout state only if the previous event
724	 * addition succeeded.
725	 */
726	if (res != -1 && tv != NULL) {
727		struct timeval now;
728
729		/*
730		 * we already reserved memory above for the case where we
731		 * are not replacing an exisiting timeout.
732		 */
733		if (ev->ev_flags & EVLIST_TIMEOUT)
734			event_queue_remove(base, ev, EVLIST_TIMEOUT);
735
736		/* Check if it is active due to a timeout.  Rescheduling
737		 * this timeout before the callback can be executed
738		 * removes it from the active list. */
739		if ((ev->ev_flags & EVLIST_ACTIVE) &&
740		    (ev->ev_res & EV_TIMEOUT)) {
741			/* See if we are just active executing this
742			 * event in a loop
743			 */
744			if (ev->ev_ncalls && ev->ev_pncalls) {
745				/* Abort loop */
746				*ev->ev_pncalls = 0;
747			}
748
749			event_queue_remove(base, ev, EVLIST_ACTIVE);
750		}
751
752		gettime(base, &now);
753		evutil_timeradd(&now, tv, &ev->ev_timeout);
754
755		event_debug((
756			 "event_add: timeout in %ld seconds, call %p",
757			 tv->tv_sec, ev->ev_callback));
758
759		event_queue_insert(base, ev, EVLIST_TIMEOUT);
760	}
761
762	return (res);
763}
764
765int
766event_del(struct event *ev)
767{
768	struct event_base *base;
769
770	event_debug(("event_del: %p, callback %p",
771		 ev, ev->ev_callback));
772
773	/* An event without a base has not been added */
774	if (ev->ev_base == NULL)
775		return (-1);
776
777	base = ev->ev_base;
778
779	assert(!(ev->ev_flags & ~EVLIST_ALL));
780
781	/* See if we are just active executing this event in a loop */
782	if (ev->ev_ncalls && ev->ev_pncalls) {
783		/* Abort loop */
784		*ev->ev_pncalls = 0;
785	}
786
787	if (ev->ev_flags & EVLIST_TIMEOUT)
788		event_queue_remove(base, ev, EVLIST_TIMEOUT);
789
790	if (ev->ev_flags & EVLIST_ACTIVE)
791		event_queue_remove(base, ev, EVLIST_ACTIVE);
792
793	if (ev->ev_flags & EVLIST_INSERTED) {
794		event_queue_remove(base, ev, EVLIST_INSERTED);
795		return (base->evsel->del(base->evbase, ev));
796	}
797
798	return (0);
799}
800
801void
802event_active(struct event *ev, int res, short ncalls)
803{
804	/* We get different kinds of events, add them together */
805	if (ev->ev_flags & EVLIST_ACTIVE) {
806		ev->ev_res |= res;
807		return;
808	}
809
810	ev->ev_res = res;
811	ev->ev_ncalls = ncalls;
812	ev->ev_pncalls = NULL;
813	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
814}
815
816static int
817timeout_next(struct event_base *base, struct timeval **tv_p)
818{
819	struct timeval now;
820	struct event *ev;
821	struct timeval *tv = *tv_p;
822
823	if ((ev = min_heap_top(&base->timeheap)) == NULL) {
824		/* if no time-based events are active wait for I/O */
825		*tv_p = NULL;
826		return (0);
827	}
828
829	if (gettime(base, &now) == -1)
830		return (-1);
831
832	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
833		evutil_timerclear(tv);
834		return (0);
835	}
836
837	evutil_timersub(&ev->ev_timeout, &now, tv);
838
839	assert(tv->tv_sec >= 0);
840	assert(tv->tv_usec >= 0);
841
842	event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
843	return (0);
844}
845
846/*
847 * Determines if the time is running backwards by comparing the current
848 * time against the last time we checked.  Not needed when using clock
849 * monotonic.
850 */
851
852static void
853timeout_correct(struct event_base *base, struct timeval *tv)
854{
855	struct event **pev;
856	unsigned int size;
857	struct timeval off;
858
859	if (use_monotonic)
860		return;
861
862	/* Check if time is running backwards */
863	gettime(base, tv);
864	if (evutil_timercmp(tv, &base->event_tv, >=)) {
865		base->event_tv = *tv;
866		return;
867	}
868
869	event_debug(("%s: time is running backwards, corrected",
870		    __func__));
871	evutil_timersub(&base->event_tv, tv, &off);
872
873	/*
874	 * We can modify the key element of the node without destroying
875	 * the key, beause we apply it to all in the right order.
876	 */
877	pev = base->timeheap.p;
878	size = base->timeheap.n;
879	for (; size-- > 0; ++pev) {
880		struct timeval *ev_tv = &(**pev).ev_timeout;
881		evutil_timersub(ev_tv, &off, ev_tv);
882	}
883	/* Now remember what the new time turned out to be. */
884	base->event_tv = *tv;
885}
886
887void
888timeout_process(struct event_base *base)
889{
890	struct timeval now;
891	struct event *ev;
892
893	if (min_heap_empty(&base->timeheap))
894		return;
895
896	gettime(base, &now);
897
898	while ((ev = min_heap_top(&base->timeheap))) {
899		if (evutil_timercmp(&ev->ev_timeout, &now, >))
900			break;
901
902		/* delete this event from the I/O queues */
903		event_del(ev);
904
905		event_debug(("timeout_process: call %p",
906			 ev->ev_callback));
907		event_active(ev, EV_TIMEOUT, 1);
908	}
909}
910
911void
912event_queue_remove(struct event_base *base, struct event *ev, int queue)
913{
914	if (!(ev->ev_flags & queue))
915		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
916			   ev, ev->ev_fd, queue);
917
918	if (~ev->ev_flags & EVLIST_INTERNAL)
919		base->event_count--;
920
921	ev->ev_flags &= ~queue;
922	switch (queue) {
923	case EVLIST_INSERTED:
924		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
925		break;
926	case EVLIST_ACTIVE:
927		base->event_count_active--;
928		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
929		    ev, ev_active_next);
930		break;
931	case EVLIST_TIMEOUT:
932		min_heap_erase(&base->timeheap, ev);
933		break;
934	default:
935		event_errx(1, "%s: unknown queue %x", __func__, queue);
936	}
937}
938
939void
940event_queue_insert(struct event_base *base, struct event *ev, int queue)
941{
942	if (ev->ev_flags & queue) {
943		/* Double insertion is possible for active events */
944		if (queue & EVLIST_ACTIVE)
945			return;
946
947		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
948			   ev, ev->ev_fd, queue);
949	}
950
951	if (~ev->ev_flags & EVLIST_INTERNAL)
952		base->event_count++;
953
954	ev->ev_flags |= queue;
955	switch (queue) {
956	case EVLIST_INSERTED:
957		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
958		break;
959	case EVLIST_ACTIVE:
960		base->event_count_active++;
961		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
962		    ev,ev_active_next);
963		break;
964	case EVLIST_TIMEOUT: {
965		min_heap_push(&base->timeheap, ev);
966		break;
967	}
968	default:
969		event_errx(1, "%s: unknown queue %x", __func__, queue);
970	}
971}
972
973/* Functions for debugging */
974
975const char *
976event_get_version(void)
977{
978	return (VERSION);
979}
980
981/*
982 * No thread-safe interface needed - the information should be the same
983 * for all threads.
984 */
985
986const char *
987event_get_method(void)
988{
989	return (current_base->evsel->name);
990}
991