1/* 2 * Event loop based on select() loop 3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi> 4 * 5 * This software may be distributed under the terms of the BSD license. 6 * See README for more details. 7 */ 8 9#include "includes.h" 10 11#include "common.h" 12#include "trace.h" 13#include "list.h" 14#include "eloop.h" 15 16#ifdef CONFIG_ELOOP_POLL 17#include <assert.h> 18#include <poll.h> 19#endif /* CONFIG_ELOOP_POLL */ 20 21 22struct eloop_sock { 23 int sock; 24 void *eloop_data; 25 void *user_data; 26 eloop_sock_handler handler; 27 WPA_TRACE_REF(eloop); 28 WPA_TRACE_REF(user); 29 WPA_TRACE_INFO 30}; 31 32struct eloop_timeout { 33 struct dl_list list; 34 struct os_time time; 35 void *eloop_data; 36 void *user_data; 37 eloop_timeout_handler handler; 38 WPA_TRACE_REF(eloop); 39 WPA_TRACE_REF(user); 40 WPA_TRACE_INFO 41}; 42 43struct eloop_signal { 44 int sig; 45 void *user_data; 46 eloop_signal_handler handler; 47 int signaled; 48}; 49 50struct eloop_sock_table { 51 int count; 52 struct eloop_sock *table; 53 int changed; 54}; 55 56struct eloop_data { 57 int max_sock; 58 59 int count; /* sum of all table counts */ 60#ifdef CONFIG_ELOOP_POLL 61 int max_pollfd_map; /* number of pollfds_map currently allocated */ 62 int max_poll_fds; /* number of pollfds currently allocated */ 63 struct pollfd *pollfds; 64 struct pollfd **pollfds_map; 65#endif /* CONFIG_ELOOP_POLL */ 66 struct eloop_sock_table readers; 67 struct eloop_sock_table writers; 68 struct eloop_sock_table exceptions; 69 70 struct dl_list timeout; 71 72 int signal_count; 73 struct eloop_signal *signals; 74 int signaled; 75 int pending_terminate; 76 77 int terminate; 78 int reader_table_changed; 79}; 80 81static struct eloop_data eloop; 82 83 84#ifdef WPA_TRACE 85 86static void eloop_sigsegv_handler(int sig) 87{ 88 wpa_trace_show("eloop SIGSEGV"); 89 abort(); 90} 91 92static void eloop_trace_sock_add_ref(struct eloop_sock_table *table) 93{ 94 int i; 95 if (table == NULL || table->table == NULL) 96 return; 97 for (i = 0; i < table->count; i++) { 98 wpa_trace_add_ref(&table->table[i], eloop, 99 table->table[i].eloop_data); 100 wpa_trace_add_ref(&table->table[i], user, 101 table->table[i].user_data); 102 } 103} 104 105 106static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table) 107{ 108 int i; 109 if (table == NULL || table->table == NULL) 110 return; 111 for (i = 0; i < table->count; i++) { 112 wpa_trace_remove_ref(&table->table[i], eloop, 113 table->table[i].eloop_data); 114 wpa_trace_remove_ref(&table->table[i], user, 115 table->table[i].user_data); 116 } 117} 118 119#else /* WPA_TRACE */ 120 121#define eloop_trace_sock_add_ref(table) do { } while (0) 122#define eloop_trace_sock_remove_ref(table) do { } while (0) 123 124#endif /* WPA_TRACE */ 125 126 127int eloop_init(void) 128{ 129 os_memset(&eloop, 0, sizeof(eloop)); 130 dl_list_init(&eloop.timeout); 131#ifdef WPA_TRACE 132 signal(SIGSEGV, eloop_sigsegv_handler); 133#endif /* WPA_TRACE */ 134 return 0; 135} 136 137 138static int eloop_sock_table_add_sock(struct eloop_sock_table *table, 139 int sock, eloop_sock_handler handler, 140 void *eloop_data, void *user_data) 141{ 142 struct eloop_sock *tmp; 143 int new_max_sock; 144 145 if (sock > eloop.max_sock) 146 new_max_sock = sock; 147 else 148 new_max_sock = eloop.max_sock; 149 150 if (table == NULL) 151 return -1; 152 153#ifdef CONFIG_ELOOP_POLL 154 if (new_max_sock >= eloop.max_pollfd_map) { 155 struct pollfd **nmap; 156 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50, 157 sizeof(struct pollfd *)); 158 if (nmap == NULL) 159 return -1; 160 161 eloop.max_pollfd_map = new_max_sock + 50; 162 eloop.pollfds_map = nmap; 163 } 164 165 if (eloop.count + 1 > eloop.max_poll_fds) { 166 struct pollfd *n; 167 int nmax = eloop.count + 1 + 50; 168 n = os_realloc_array(eloop.pollfds, nmax, 169 sizeof(struct pollfd)); 170 if (n == NULL) 171 return -1; 172 173 eloop.max_poll_fds = nmax; 174 eloop.pollfds = n; 175 } 176#endif /* CONFIG_ELOOP_POLL */ 177 178 eloop_trace_sock_remove_ref(table); 179 tmp = os_realloc_array(table->table, table->count + 1, 180 sizeof(struct eloop_sock)); 181 if (tmp == NULL) 182 return -1; 183 184 tmp[table->count].sock = sock; 185 tmp[table->count].eloop_data = eloop_data; 186 tmp[table->count].user_data = user_data; 187 tmp[table->count].handler = handler; 188 wpa_trace_record(&tmp[table->count]); 189 table->count++; 190 table->table = tmp; 191 eloop.max_sock = new_max_sock; 192 eloop.count++; 193 table->changed = 1; 194 eloop_trace_sock_add_ref(table); 195 196 return 0; 197} 198 199 200static void eloop_sock_table_remove_sock(struct eloop_sock_table *table, 201 int sock) 202{ 203 int i; 204 205 if (table == NULL || table->table == NULL || table->count == 0) 206 return; 207 208 for (i = 0; i < table->count; i++) { 209 if (table->table[i].sock == sock) 210 break; 211 } 212 if (i == table->count) 213 return; 214 eloop_trace_sock_remove_ref(table); 215 if (i != table->count - 1) { 216 os_memmove(&table->table[i], &table->table[i + 1], 217 (table->count - i - 1) * 218 sizeof(struct eloop_sock)); 219 } 220 table->count--; 221 eloop.count--; 222 table->changed = 1; 223 eloop_trace_sock_add_ref(table); 224} 225 226 227#ifdef CONFIG_ELOOP_POLL 228 229static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx) 230{ 231 if (fd < mx && fd >= 0) 232 return pollfds_map[fd]; 233 return NULL; 234} 235 236 237static int eloop_sock_table_set_fds(struct eloop_sock_table *readers, 238 struct eloop_sock_table *writers, 239 struct eloop_sock_table *exceptions, 240 struct pollfd *pollfds, 241 struct pollfd **pollfds_map, 242 int max_pollfd_map) 243{ 244 int i; 245 int nxt = 0; 246 int fd; 247 struct pollfd *pfd; 248 249 /* Clear pollfd lookup map. It will be re-populated below. */ 250 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map); 251 252 if (readers && readers->table) { 253 for (i = 0; i < readers->count; i++) { 254 fd = readers->table[i].sock; 255 assert(fd >= 0 && fd < max_pollfd_map); 256 pollfds[nxt].fd = fd; 257 pollfds[nxt].events = POLLIN; 258 pollfds[nxt].revents = 0; 259 pollfds_map[fd] = &(pollfds[nxt]); 260 nxt++; 261 } 262 } 263 264 if (writers && writers->table) { 265 for (i = 0; i < writers->count; i++) { 266 /* 267 * See if we already added this descriptor, update it 268 * if so. 269 */ 270 fd = writers->table[i].sock; 271 assert(fd >= 0 && fd < max_pollfd_map); 272 pfd = pollfds_map[fd]; 273 if (!pfd) { 274 pfd = &(pollfds[nxt]); 275 pfd->events = 0; 276 pfd->fd = fd; 277 pollfds[i].revents = 0; 278 pollfds_map[fd] = pfd; 279 nxt++; 280 } 281 pfd->events |= POLLOUT; 282 } 283 } 284 285 /* 286 * Exceptions are always checked when using poll, but I suppose it's 287 * possible that someone registered a socket *only* for exception 288 * handling. Set the POLLIN bit in this case. 289 */ 290 if (exceptions && exceptions->table) { 291 for (i = 0; i < exceptions->count; i++) { 292 /* 293 * See if we already added this descriptor, just use it 294 * if so. 295 */ 296 fd = exceptions->table[i].sock; 297 assert(fd >= 0 && fd < max_pollfd_map); 298 pfd = pollfds_map[fd]; 299 if (!pfd) { 300 pfd = &(pollfds[nxt]); 301 pfd->events = POLLIN; 302 pfd->fd = fd; 303 pollfds[i].revents = 0; 304 pollfds_map[fd] = pfd; 305 nxt++; 306 } 307 } 308 } 309 310 return nxt; 311} 312 313 314static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table, 315 struct pollfd **pollfds_map, 316 int max_pollfd_map, 317 short int revents) 318{ 319 int i; 320 struct pollfd *pfd; 321 322 if (!table || !table->table) 323 return 0; 324 325 table->changed = 0; 326 for (i = 0; i < table->count; i++) { 327 pfd = find_pollfd(pollfds_map, table->table[i].sock, 328 max_pollfd_map); 329 if (!pfd) 330 continue; 331 332 if (!(pfd->revents & revents)) 333 continue; 334 335 table->table[i].handler(table->table[i].sock, 336 table->table[i].eloop_data, 337 table->table[i].user_data); 338 if (table->changed) 339 return 1; 340 } 341 342 return 0; 343} 344 345 346static void eloop_sock_table_dispatch(struct eloop_sock_table *readers, 347 struct eloop_sock_table *writers, 348 struct eloop_sock_table *exceptions, 349 struct pollfd **pollfds_map, 350 int max_pollfd_map) 351{ 352 if (eloop_sock_table_dispatch_table(readers, pollfds_map, 353 max_pollfd_map, POLLIN | POLLERR | 354 POLLHUP)) 355 return; /* pollfds may be invalid at this point */ 356 357 if (eloop_sock_table_dispatch_table(writers, pollfds_map, 358 max_pollfd_map, POLLOUT)) 359 return; /* pollfds may be invalid at this point */ 360 361 eloop_sock_table_dispatch_table(exceptions, pollfds_map, 362 max_pollfd_map, POLLERR | POLLHUP); 363} 364 365#else /* CONFIG_ELOOP_POLL */ 366 367static void eloop_sock_table_set_fds(struct eloop_sock_table *table, 368 fd_set *fds) 369{ 370 int i; 371 372 FD_ZERO(fds); 373 374 if (table->table == NULL) 375 return; 376 377 for (i = 0; i < table->count; i++) 378 FD_SET(table->table[i].sock, fds); 379} 380 381 382static void eloop_sock_table_dispatch(struct eloop_sock_table *table, 383 fd_set *fds) 384{ 385 int i; 386 387 if (table == NULL || table->table == NULL) 388 return; 389 390 table->changed = 0; 391 for (i = 0; i < table->count; i++) { 392 if (FD_ISSET(table->table[i].sock, fds)) { 393 table->table[i].handler(table->table[i].sock, 394 table->table[i].eloop_data, 395 table->table[i].user_data); 396 if (table->changed) 397 break; 398 } 399 } 400} 401 402#endif /* CONFIG_ELOOP_POLL */ 403 404 405static void eloop_sock_table_destroy(struct eloop_sock_table *table) 406{ 407 if (table) { 408 int i; 409 for (i = 0; i < table->count && table->table; i++) { 410 wpa_printf(MSG_INFO, "ELOOP: remaining socket: " 411 "sock=%d eloop_data=%p user_data=%p " 412 "handler=%p", 413 table->table[i].sock, 414 table->table[i].eloop_data, 415 table->table[i].user_data, 416 table->table[i].handler); 417 wpa_trace_dump_funcname("eloop unregistered socket " 418 "handler", 419 table->table[i].handler); 420 wpa_trace_dump("eloop sock", &table->table[i]); 421 } 422 os_free(table->table); 423 } 424} 425 426 427int eloop_register_read_sock(int sock, eloop_sock_handler handler, 428 void *eloop_data, void *user_data) 429{ 430 return eloop_register_sock(sock, EVENT_TYPE_READ, handler, 431 eloop_data, user_data); 432} 433 434 435void eloop_unregister_read_sock(int sock) 436{ 437 eloop_unregister_sock(sock, EVENT_TYPE_READ); 438} 439 440 441static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type) 442{ 443 switch (type) { 444 case EVENT_TYPE_READ: 445 return &eloop.readers; 446 case EVENT_TYPE_WRITE: 447 return &eloop.writers; 448 case EVENT_TYPE_EXCEPTION: 449 return &eloop.exceptions; 450 } 451 452 return NULL; 453} 454 455 456int eloop_register_sock(int sock, eloop_event_type type, 457 eloop_sock_handler handler, 458 void *eloop_data, void *user_data) 459{ 460 struct eloop_sock_table *table; 461 462 table = eloop_get_sock_table(type); 463 return eloop_sock_table_add_sock(table, sock, handler, 464 eloop_data, user_data); 465} 466 467 468void eloop_unregister_sock(int sock, eloop_event_type type) 469{ 470 struct eloop_sock_table *table; 471 472 table = eloop_get_sock_table(type); 473 eloop_sock_table_remove_sock(table, sock); 474} 475 476 477int eloop_register_timeout(unsigned int secs, unsigned int usecs, 478 eloop_timeout_handler handler, 479 void *eloop_data, void *user_data) 480{ 481 struct eloop_timeout *timeout, *tmp; 482 os_time_t now_sec; 483 484 timeout = os_zalloc(sizeof(*timeout)); 485 if (timeout == NULL) 486 return -1; 487 if (os_get_time(&timeout->time) < 0) { 488 os_free(timeout); 489 return -1; 490 } 491 now_sec = timeout->time.sec; 492 timeout->time.sec += secs; 493 if (timeout->time.sec < now_sec) { 494 /* 495 * Integer overflow - assume long enough timeout to be assumed 496 * to be infinite, i.e., the timeout would never happen. 497 */ 498 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to " 499 "ever happen - ignore it", secs); 500 os_free(timeout); 501 return 0; 502 } 503 timeout->time.usec += usecs; 504 while (timeout->time.usec >= 1000000) { 505 timeout->time.sec++; 506 timeout->time.usec -= 1000000; 507 } 508 timeout->eloop_data = eloop_data; 509 timeout->user_data = user_data; 510 timeout->handler = handler; 511 wpa_trace_add_ref(timeout, eloop, eloop_data); 512 wpa_trace_add_ref(timeout, user, user_data); 513 wpa_trace_record(timeout); 514 515 /* Maintain timeouts in order of increasing time */ 516 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 517 if (os_time_before(&timeout->time, &tmp->time)) { 518 dl_list_add(tmp->list.prev, &timeout->list); 519 return 0; 520 } 521 } 522 dl_list_add_tail(&eloop.timeout, &timeout->list); 523 524 return 0; 525} 526 527 528static void eloop_remove_timeout(struct eloop_timeout *timeout) 529{ 530 dl_list_del(&timeout->list); 531 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data); 532 wpa_trace_remove_ref(timeout, user, timeout->user_data); 533 os_free(timeout); 534} 535 536 537int eloop_cancel_timeout(eloop_timeout_handler handler, 538 void *eloop_data, void *user_data) 539{ 540 struct eloop_timeout *timeout, *prev; 541 int removed = 0; 542 543 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 544 struct eloop_timeout, list) { 545 if (timeout->handler == handler && 546 (timeout->eloop_data == eloop_data || 547 eloop_data == ELOOP_ALL_CTX) && 548 (timeout->user_data == user_data || 549 user_data == ELOOP_ALL_CTX)) { 550 eloop_remove_timeout(timeout); 551 removed++; 552 } 553 } 554 555 return removed; 556} 557 558 559int eloop_is_timeout_registered(eloop_timeout_handler handler, 560 void *eloop_data, void *user_data) 561{ 562 struct eloop_timeout *tmp; 563 564 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 565 if (tmp->handler == handler && 566 tmp->eloop_data == eloop_data && 567 tmp->user_data == user_data) 568 return 1; 569 } 570 571 return 0; 572} 573 574 575#ifndef CONFIG_NATIVE_WINDOWS 576static void eloop_handle_alarm(int sig) 577{ 578 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in " 579 "two seconds. Looks like there\n" 580 "is a bug that ends up in a busy loop that " 581 "prevents clean shutdown.\n" 582 "Killing program forcefully.\n"); 583 exit(1); 584} 585#endif /* CONFIG_NATIVE_WINDOWS */ 586 587 588static void eloop_handle_signal(int sig) 589{ 590 int i; 591 592#ifndef CONFIG_NATIVE_WINDOWS 593 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) { 594 /* Use SIGALRM to break out from potential busy loops that 595 * would not allow the program to be killed. */ 596 eloop.pending_terminate = 1; 597 signal(SIGALRM, eloop_handle_alarm); 598 alarm(2); 599 } 600#endif /* CONFIG_NATIVE_WINDOWS */ 601 602 eloop.signaled++; 603 for (i = 0; i < eloop.signal_count; i++) { 604 if (eloop.signals[i].sig == sig) { 605 eloop.signals[i].signaled++; 606 break; 607 } 608 } 609} 610 611 612static void eloop_process_pending_signals(void) 613{ 614 int i; 615 616 if (eloop.signaled == 0) 617 return; 618 eloop.signaled = 0; 619 620 if (eloop.pending_terminate) { 621#ifndef CONFIG_NATIVE_WINDOWS 622 alarm(0); 623#endif /* CONFIG_NATIVE_WINDOWS */ 624 eloop.pending_terminate = 0; 625 } 626 627 for (i = 0; i < eloop.signal_count; i++) { 628 if (eloop.signals[i].signaled) { 629 eloop.signals[i].signaled = 0; 630 eloop.signals[i].handler(eloop.signals[i].sig, 631 eloop.signals[i].user_data); 632 } 633 } 634} 635 636 637int eloop_register_signal(int sig, eloop_signal_handler handler, 638 void *user_data) 639{ 640 struct eloop_signal *tmp; 641 642 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1, 643 sizeof(struct eloop_signal)); 644 if (tmp == NULL) 645 return -1; 646 647 tmp[eloop.signal_count].sig = sig; 648 tmp[eloop.signal_count].user_data = user_data; 649 tmp[eloop.signal_count].handler = handler; 650 tmp[eloop.signal_count].signaled = 0; 651 eloop.signal_count++; 652 eloop.signals = tmp; 653 signal(sig, eloop_handle_signal); 654 655 return 0; 656} 657 658 659int eloop_register_signal_terminate(eloop_signal_handler handler, 660 void *user_data) 661{ 662 int ret = eloop_register_signal(SIGINT, handler, user_data); 663 if (ret == 0) 664 ret = eloop_register_signal(SIGTERM, handler, user_data); 665 return ret; 666} 667 668 669int eloop_register_signal_reconfig(eloop_signal_handler handler, 670 void *user_data) 671{ 672#ifdef CONFIG_NATIVE_WINDOWS 673 return 0; 674#else /* CONFIG_NATIVE_WINDOWS */ 675 return eloop_register_signal(SIGHUP, handler, user_data); 676#endif /* CONFIG_NATIVE_WINDOWS */ 677} 678 679 680void eloop_run(void) 681{ 682#ifdef CONFIG_ELOOP_POLL 683 int num_poll_fds; 684 int timeout_ms = 0; 685#else /* CONFIG_ELOOP_POLL */ 686 fd_set *rfds, *wfds, *efds; 687 struct timeval _tv; 688#endif /* CONFIG_ELOOP_POLL */ 689 int res; 690 struct os_time tv, now; 691 692#ifndef CONFIG_ELOOP_POLL 693 rfds = os_malloc(sizeof(*rfds)); 694 wfds = os_malloc(sizeof(*wfds)); 695 efds = os_malloc(sizeof(*efds)); 696 if (rfds == NULL || wfds == NULL || efds == NULL) 697 goto out; 698#endif /* CONFIG_ELOOP_POLL */ 699 700 while (!eloop.terminate && 701 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || 702 eloop.writers.count > 0 || eloop.exceptions.count > 0)) { 703 struct eloop_timeout *timeout; 704 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 705 list); 706 if (timeout) { 707 os_get_time(&now); 708 if (os_time_before(&now, &timeout->time)) 709 os_time_sub(&timeout->time, &now, &tv); 710 else 711 tv.sec = tv.usec = 0; 712#ifdef CONFIG_ELOOP_POLL 713 timeout_ms = tv.sec * 1000 + tv.usec / 1000; 714#else /* CONFIG_ELOOP_POLL */ 715 _tv.tv_sec = tv.sec; 716 _tv.tv_usec = tv.usec; 717#endif /* CONFIG_ELOOP_POLL */ 718 } 719 720#ifdef CONFIG_ELOOP_POLL 721 num_poll_fds = eloop_sock_table_set_fds( 722 &eloop.readers, &eloop.writers, &eloop.exceptions, 723 eloop.pollfds, eloop.pollfds_map, 724 eloop.max_pollfd_map); 725 res = poll(eloop.pollfds, num_poll_fds, 726 timeout ? timeout_ms : -1); 727 728 if (res < 0 && errno != EINTR && errno != 0) { 729 perror("poll"); 730 goto out; 731 } 732#else /* CONFIG_ELOOP_POLL */ 733 eloop_sock_table_set_fds(&eloop.readers, rfds); 734 eloop_sock_table_set_fds(&eloop.writers, wfds); 735 eloop_sock_table_set_fds(&eloop.exceptions, efds); 736 res = select(eloop.max_sock + 1, rfds, wfds, efds, 737 timeout ? &_tv : NULL); 738 if (res < 0 && errno != EINTR && errno != 0) { 739 perror("select"); 740 goto out; 741 } 742#endif /* CONFIG_ELOOP_POLL */ 743 eloop_process_pending_signals(); 744 745 /* check if some registered timeouts have occurred */ 746 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 747 list); 748 if (timeout) { 749 os_get_time(&now); 750 if (!os_time_before(&now, &timeout->time)) { 751 void *eloop_data = timeout->eloop_data; 752 void *user_data = timeout->user_data; 753 eloop_timeout_handler handler = 754 timeout->handler; 755 eloop_remove_timeout(timeout); 756 handler(eloop_data, user_data); 757 } 758 759 } 760 761 if (res <= 0) 762 continue; 763 764#ifdef CONFIG_ELOOP_POLL 765 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, 766 &eloop.exceptions, eloop.pollfds_map, 767 eloop.max_pollfd_map); 768#else /* CONFIG_ELOOP_POLL */ 769 eloop_sock_table_dispatch(&eloop.readers, rfds); 770 eloop_sock_table_dispatch(&eloop.writers, wfds); 771 eloop_sock_table_dispatch(&eloop.exceptions, efds); 772#endif /* CONFIG_ELOOP_POLL */ 773 } 774 775out: 776#ifndef CONFIG_ELOOP_POLL 777 os_free(rfds); 778 os_free(wfds); 779 os_free(efds); 780#endif /* CONFIG_ELOOP_POLL */ 781 return; 782} 783 784 785void eloop_terminate(void) 786{ 787 eloop.terminate = 1; 788} 789 790 791void eloop_destroy(void) 792{ 793 struct eloop_timeout *timeout, *prev; 794 struct os_time now; 795 796 os_get_time(&now); 797 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 798 struct eloop_timeout, list) { 799 int sec, usec; 800 sec = timeout->time.sec - now.sec; 801 usec = timeout->time.usec - now.usec; 802 if (timeout->time.usec < now.usec) { 803 sec--; 804 usec += 1000000; 805 } 806 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d " 807 "eloop_data=%p user_data=%p handler=%p", 808 sec, usec, timeout->eloop_data, timeout->user_data, 809 timeout->handler); 810 wpa_trace_dump_funcname("eloop unregistered timeout handler", 811 timeout->handler); 812 wpa_trace_dump("eloop timeout", timeout); 813 eloop_remove_timeout(timeout); 814 } 815 eloop_sock_table_destroy(&eloop.readers); 816 eloop_sock_table_destroy(&eloop.writers); 817 eloop_sock_table_destroy(&eloop.exceptions); 818 os_free(eloop.signals); 819 820#ifdef CONFIG_ELOOP_POLL 821 os_free(eloop.pollfds); 822 os_free(eloop.pollfds_map); 823#endif /* CONFIG_ELOOP_POLL */ 824} 825 826 827int eloop_terminated(void) 828{ 829 return eloop.terminate; 830} 831 832 833void eloop_wait_for_read_sock(int sock) 834{ 835#ifdef CONFIG_ELOOP_POLL 836 struct pollfd pfd; 837 838 if (sock < 0) 839 return; 840 841 os_memset(&pfd, 0, sizeof(pfd)); 842 pfd.fd = sock; 843 pfd.events = POLLIN; 844 845 poll(&pfd, 1, -1); 846#else /* CONFIG_ELOOP_POLL */ 847 fd_set rfds; 848 849 if (sock < 0) 850 return; 851 852 FD_ZERO(&rfds); 853 FD_SET(sock, &rfds); 854 select(sock + 1, &rfds, NULL, NULL, NULL); 855#endif /* CONFIG_ELOOP_POLL */ 856} 857