eloop.c revision e0e48dc666fb14a7bb60264ca87463ba7bc1fe0b
1/* 2 * Event loop based on select() loop 3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi> 4 * 5 * This software may be distributed under the terms of the BSD license. 6 * See README for more details. 7 */ 8 9#include "includes.h" 10 11#include "common.h" 12#include "trace.h" 13#include "list.h" 14#include "eloop.h" 15 16#ifdef CONFIG_ELOOP_POLL 17#include <assert.h> 18#include <poll.h> 19#endif /* CONFIG_ELOOP_POLL */ 20 21 22struct eloop_sock { 23 int sock; 24 void *eloop_data; 25 void *user_data; 26 eloop_sock_handler handler; 27 WPA_TRACE_REF(eloop); 28 WPA_TRACE_REF(user); 29 WPA_TRACE_INFO 30}; 31 32struct eloop_timeout { 33 struct dl_list list; 34 struct os_time time; 35 void *eloop_data; 36 void *user_data; 37 eloop_timeout_handler handler; 38 WPA_TRACE_REF(eloop); 39 WPA_TRACE_REF(user); 40 WPA_TRACE_INFO 41}; 42 43struct eloop_signal { 44 int sig; 45 void *user_data; 46 eloop_signal_handler handler; 47 int signaled; 48}; 49 50struct eloop_sock_table { 51 int count; 52 struct eloop_sock *table; 53 int changed; 54}; 55 56struct eloop_data { 57 int max_sock; 58 59 int count; /* sum of all table counts */ 60#ifdef CONFIG_ELOOP_POLL 61 int max_pollfd_map; /* number of pollfds_map currently allocated */ 62 int max_poll_fds; /* number of pollfds currently allocated */ 63 struct pollfd *pollfds; 64 struct pollfd **pollfds_map; 65#endif /* CONFIG_ELOOP_POLL */ 66 struct eloop_sock_table readers; 67 struct eloop_sock_table writers; 68 struct eloop_sock_table exceptions; 69 70 struct dl_list timeout; 71 72 int signal_count; 73 struct eloop_signal *signals; 74 int signaled; 75 int pending_terminate; 76 77 int terminate; 78 int reader_table_changed; 79}; 80 81static struct eloop_data eloop; 82 83 84#ifdef WPA_TRACE 85 86static void eloop_sigsegv_handler(int sig) 87{ 88 wpa_trace_show("eloop SIGSEGV"); 89 abort(); 90} 91 92static void eloop_trace_sock_add_ref(struct eloop_sock_table *table) 93{ 94 int i; 95 if (table == NULL || table->table == NULL) 96 return; 97 for (i = 0; i < table->count; i++) { 98 wpa_trace_add_ref(&table->table[i], eloop, 99 table->table[i].eloop_data); 100 wpa_trace_add_ref(&table->table[i], user, 101 table->table[i].user_data); 102 } 103} 104 105 106static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table) 107{ 108 int i; 109 if (table == NULL || table->table == NULL) 110 return; 111 for (i = 0; i < table->count; i++) { 112 wpa_trace_remove_ref(&table->table[i], eloop, 113 table->table[i].eloop_data); 114 wpa_trace_remove_ref(&table->table[i], user, 115 table->table[i].user_data); 116 } 117} 118 119#else /* WPA_TRACE */ 120 121#define eloop_trace_sock_add_ref(table) do { } while (0) 122#define eloop_trace_sock_remove_ref(table) do { } while (0) 123 124#endif /* WPA_TRACE */ 125 126 127int eloop_init(void) 128{ 129 os_memset(&eloop, 0, sizeof(eloop)); 130 dl_list_init(&eloop.timeout); 131#ifdef WPA_TRACE 132 signal(SIGSEGV, eloop_sigsegv_handler); 133#endif /* WPA_TRACE */ 134 return 0; 135} 136 137 138static int eloop_sock_table_add_sock(struct eloop_sock_table *table, 139 int sock, eloop_sock_handler handler, 140 void *eloop_data, void *user_data) 141{ 142 struct eloop_sock *tmp; 143 int new_max_sock; 144 145 if (sock > eloop.max_sock) 146 new_max_sock = sock; 147 else 148 new_max_sock = eloop.max_sock; 149 150 if (table == NULL) 151 return -1; 152 153#ifdef CONFIG_ELOOP_POLL 154 if (new_max_sock >= eloop.max_pollfd_map) { 155 struct pollfd **nmap; 156 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50, 157 sizeof(struct pollfd *)); 158 if (nmap == NULL) 159 return -1; 160 161 eloop.max_pollfd_map = new_max_sock + 50; 162 eloop.pollfds_map = nmap; 163 } 164 165 if (eloop.count + 1 > eloop.max_poll_fds) { 166 struct pollfd *n; 167 int nmax = eloop.count + 1 + 50; 168 n = os_realloc_array(eloop.pollfds, nmax, 169 sizeof(struct pollfd)); 170 if (n == NULL) 171 return -1; 172 173 eloop.max_poll_fds = nmax; 174 eloop.pollfds = n; 175 } 176#endif /* CONFIG_ELOOP_POLL */ 177 178 eloop_trace_sock_remove_ref(table); 179 tmp = os_realloc_array(table->table, table->count + 1, 180 sizeof(struct eloop_sock)); 181 if (tmp == NULL) 182 return -1; 183 184 tmp[table->count].sock = sock; 185 tmp[table->count].eloop_data = eloop_data; 186 tmp[table->count].user_data = user_data; 187 tmp[table->count].handler = handler; 188 wpa_trace_record(&tmp[table->count]); 189 table->count++; 190 table->table = tmp; 191 eloop.max_sock = new_max_sock; 192 eloop.count++; 193 table->changed = 1; 194 eloop_trace_sock_add_ref(table); 195 196 return 0; 197} 198 199 200static void eloop_sock_table_remove_sock(struct eloop_sock_table *table, 201 int sock) 202{ 203 int i; 204 205 if (table == NULL || table->table == NULL || table->count == 0) 206 return; 207 208 for (i = 0; i < table->count; i++) { 209 if (table->table[i].sock == sock) 210 break; 211 } 212 if (i == table->count) 213 return; 214 eloop_trace_sock_remove_ref(table); 215 if (i != table->count - 1) { 216 os_memmove(&table->table[i], &table->table[i + 1], 217 (table->count - i - 1) * 218 sizeof(struct eloop_sock)); 219 } 220 table->count--; 221 eloop.count--; 222 table->changed = 1; 223 eloop_trace_sock_add_ref(table); 224} 225 226 227#ifdef CONFIG_ELOOP_POLL 228 229static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx) 230{ 231 if (fd < mx && fd >= 0) 232 return pollfds_map[fd]; 233 return NULL; 234} 235 236 237static int eloop_sock_table_set_fds(struct eloop_sock_table *readers, 238 struct eloop_sock_table *writers, 239 struct eloop_sock_table *exceptions, 240 struct pollfd *pollfds, 241 struct pollfd **pollfds_map, 242 int max_pollfd_map) 243{ 244 int i; 245 int nxt = 0; 246 int fd; 247 struct pollfd *pfd; 248 249 /* Clear pollfd lookup map. It will be re-populated below. */ 250 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map); 251 252 if (readers && readers->table) { 253 for (i = 0; i < readers->count; i++) { 254 fd = readers->table[i].sock; 255 assert(fd >= 0 && fd < max_pollfd_map); 256 pollfds[nxt].fd = fd; 257 pollfds[nxt].events = POLLIN; 258 pollfds[nxt].revents = 0; 259 pollfds_map[fd] = &(pollfds[nxt]); 260 nxt++; 261 } 262 } 263 264 if (writers && writers->table) { 265 for (i = 0; i < writers->count; i++) { 266 /* 267 * See if we already added this descriptor, update it 268 * if so. 269 */ 270 fd = writers->table[i].sock; 271 assert(fd >= 0 && fd < max_pollfd_map); 272 pfd = pollfds_map[fd]; 273 if (!pfd) { 274 pfd = &(pollfds[nxt]); 275 pfd->events = 0; 276 pfd->fd = fd; 277 pollfds[i].revents = 0; 278 pollfds_map[fd] = pfd; 279 nxt++; 280 } 281 pfd->events |= POLLOUT; 282 } 283 } 284 285 /* 286 * Exceptions are always checked when using poll, but I suppose it's 287 * possible that someone registered a socket *only* for exception 288 * handling. Set the POLLIN bit in this case. 289 */ 290 if (exceptions && exceptions->table) { 291 for (i = 0; i < exceptions->count; i++) { 292 /* 293 * See if we already added this descriptor, just use it 294 * if so. 295 */ 296 fd = exceptions->table[i].sock; 297 assert(fd >= 0 && fd < max_pollfd_map); 298 pfd = pollfds_map[fd]; 299 if (!pfd) { 300 pfd = &(pollfds[nxt]); 301 pfd->events = POLLIN; 302 pfd->fd = fd; 303 pollfds[i].revents = 0; 304 pollfds_map[fd] = pfd; 305 nxt++; 306 } 307 } 308 } 309 310 return nxt; 311} 312 313 314static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table, 315 struct pollfd **pollfds_map, 316 int max_pollfd_map, 317 short int revents) 318{ 319 int i; 320 struct pollfd *pfd; 321 322 if (!table || !table->table) 323 return 0; 324 325 table->changed = 0; 326 for (i = 0; i < table->count; i++) { 327 pfd = find_pollfd(pollfds_map, table->table[i].sock, 328 max_pollfd_map); 329 if (!pfd) 330 continue; 331 332 if (!(pfd->revents & revents)) 333 continue; 334 335 table->table[i].handler(table->table[i].sock, 336 table->table[i].eloop_data, 337 table->table[i].user_data); 338 if (table->changed) 339 return 1; 340 } 341 342 return 0; 343} 344 345 346static void eloop_sock_table_dispatch(struct eloop_sock_table *readers, 347 struct eloop_sock_table *writers, 348 struct eloop_sock_table *exceptions, 349 struct pollfd **pollfds_map, 350 int max_pollfd_map) 351{ 352 if (eloop_sock_table_dispatch_table(readers, pollfds_map, 353 max_pollfd_map, POLLIN | POLLERR | 354 POLLHUP)) 355 return; /* pollfds may be invalid at this point */ 356 357 if (eloop_sock_table_dispatch_table(writers, pollfds_map, 358 max_pollfd_map, POLLOUT)) 359 return; /* pollfds may be invalid at this point */ 360 361 eloop_sock_table_dispatch_table(exceptions, pollfds_map, 362 max_pollfd_map, POLLERR | POLLHUP); 363} 364 365#else /* CONFIG_ELOOP_POLL */ 366 367static void eloop_sock_table_set_fds(struct eloop_sock_table *table, 368 fd_set *fds) 369{ 370 int i; 371 372 FD_ZERO(fds); 373 374 if (table->table == NULL) 375 return; 376 377 for (i = 0; i < table->count; i++) 378 FD_SET(table->table[i].sock, fds); 379} 380 381 382static void eloop_sock_table_dispatch(struct eloop_sock_table *table, 383 fd_set *fds) 384{ 385 int i; 386 387 if (table == NULL || table->table == NULL) 388 return; 389 390 table->changed = 0; 391 for (i = 0; i < table->count; i++) { 392 if (FD_ISSET(table->table[i].sock, fds)) { 393 table->table[i].handler(table->table[i].sock, 394 table->table[i].eloop_data, 395 table->table[i].user_data); 396 if (table->changed) 397 break; 398 } 399 } 400} 401 402#endif /* CONFIG_ELOOP_POLL */ 403 404 405static void eloop_sock_table_destroy(struct eloop_sock_table *table) 406{ 407 if (table) { 408 int i; 409 for (i = 0; i < table->count && table->table; i++) { 410 wpa_printf(MSG_INFO, "ELOOP: remaining socket: " 411 "sock=%d eloop_data=%p user_data=%p " 412 "handler=%p", 413 table->table[i].sock, 414 table->table[i].eloop_data, 415 table->table[i].user_data, 416 table->table[i].handler); 417 wpa_trace_dump_funcname("eloop unregistered socket " 418 "handler", 419 table->table[i].handler); 420 wpa_trace_dump("eloop sock", &table->table[i]); 421 } 422 os_free(table->table); 423 } 424} 425 426 427int eloop_register_read_sock(int sock, eloop_sock_handler handler, 428 void *eloop_data, void *user_data) 429{ 430 return eloop_register_sock(sock, EVENT_TYPE_READ, handler, 431 eloop_data, user_data); 432} 433 434 435void eloop_unregister_read_sock(int sock) 436{ 437 eloop_unregister_sock(sock, EVENT_TYPE_READ); 438} 439 440 441static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type) 442{ 443 switch (type) { 444 case EVENT_TYPE_READ: 445 return &eloop.readers; 446 case EVENT_TYPE_WRITE: 447 return &eloop.writers; 448 case EVENT_TYPE_EXCEPTION: 449 return &eloop.exceptions; 450 } 451 452 return NULL; 453} 454 455 456int eloop_register_sock(int sock, eloop_event_type type, 457 eloop_sock_handler handler, 458 void *eloop_data, void *user_data) 459{ 460 struct eloop_sock_table *table; 461 462 table = eloop_get_sock_table(type); 463 return eloop_sock_table_add_sock(table, sock, handler, 464 eloop_data, user_data); 465} 466 467 468void eloop_unregister_sock(int sock, eloop_event_type type) 469{ 470 struct eloop_sock_table *table; 471 472 table = eloop_get_sock_table(type); 473 eloop_sock_table_remove_sock(table, sock); 474} 475 476 477int eloop_register_timeout(unsigned int secs, unsigned int usecs, 478 eloop_timeout_handler handler, 479 void *eloop_data, void *user_data) 480{ 481 struct eloop_timeout *timeout, *tmp; 482 os_time_t now_sec; 483 484 timeout = os_zalloc(sizeof(*timeout)); 485 if (timeout == NULL) 486 return -1; 487 if (os_get_time(&timeout->time) < 0) { 488 os_free(timeout); 489 return -1; 490 } 491 now_sec = timeout->time.sec; 492 timeout->time.sec += secs; 493 if (timeout->time.sec < now_sec) { 494 /* 495 * Integer overflow - assume long enough timeout to be assumed 496 * to be infinite, i.e., the timeout would never happen. 497 */ 498 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to " 499 "ever happen - ignore it", secs); 500 os_free(timeout); 501 return 0; 502 } 503 timeout->time.usec += usecs; 504 while (timeout->time.usec >= 1000000) { 505 timeout->time.sec++; 506 timeout->time.usec -= 1000000; 507 } 508 timeout->eloop_data = eloop_data; 509 timeout->user_data = user_data; 510 timeout->handler = handler; 511 wpa_trace_add_ref(timeout, eloop, eloop_data); 512 wpa_trace_add_ref(timeout, user, user_data); 513 wpa_trace_record(timeout); 514 515 /* Maintain timeouts in order of increasing time */ 516 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 517 if (os_time_before(&timeout->time, &tmp->time)) { 518 dl_list_add(tmp->list.prev, &timeout->list); 519 return 0; 520 } 521 } 522 dl_list_add_tail(&eloop.timeout, &timeout->list); 523 524 return 0; 525} 526 527 528static void eloop_remove_timeout(struct eloop_timeout *timeout) 529{ 530 dl_list_del(&timeout->list); 531 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data); 532 wpa_trace_remove_ref(timeout, user, timeout->user_data); 533 os_free(timeout); 534} 535 536 537int eloop_cancel_timeout(eloop_timeout_handler handler, 538 void *eloop_data, void *user_data) 539{ 540 struct eloop_timeout *timeout, *prev; 541 int removed = 0; 542 543 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 544 struct eloop_timeout, list) { 545 if (timeout->handler == handler && 546 (timeout->eloop_data == eloop_data || 547 eloop_data == ELOOP_ALL_CTX) && 548 (timeout->user_data == user_data || 549 user_data == ELOOP_ALL_CTX)) { 550 eloop_remove_timeout(timeout); 551 removed++; 552 } 553 } 554 555 return removed; 556} 557 558 559int eloop_cancel_timeout_one(eloop_timeout_handler handler, 560 void *eloop_data, void *user_data, 561 struct os_time *remaining) 562{ 563 struct eloop_timeout *timeout, *prev; 564 int removed = 0; 565 struct os_time now; 566 567 os_get_time(&now); 568 remaining->sec = remaining->usec = 0; 569 570 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 571 struct eloop_timeout, list) { 572 if (timeout->handler == handler && 573 (timeout->eloop_data == eloop_data) && 574 (timeout->user_data == user_data)) { 575 removed = 1; 576 if (os_time_before(&now, &timeout->time)) 577 os_time_sub(&timeout->time, &now, remaining); 578 eloop_remove_timeout(timeout); 579 break; 580 } 581 } 582 return removed; 583} 584 585 586int eloop_is_timeout_registered(eloop_timeout_handler handler, 587 void *eloop_data, void *user_data) 588{ 589 struct eloop_timeout *tmp; 590 591 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 592 if (tmp->handler == handler && 593 tmp->eloop_data == eloop_data && 594 tmp->user_data == user_data) 595 return 1; 596 } 597 598 return 0; 599} 600 601 602int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs, 603 eloop_timeout_handler handler, void *eloop_data, 604 void *user_data) 605{ 606 struct os_time now, requested, remaining; 607 struct eloop_timeout *tmp; 608 609 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 610 if (tmp->handler == handler && 611 tmp->eloop_data == eloop_data && 612 tmp->user_data == user_data) { 613 requested.sec = req_secs; 614 requested.usec = req_usecs; 615 os_get_time(&now); 616 os_time_sub(&tmp->time, &now, &remaining); 617 if (os_time_before(&requested, &remaining)) { 618 eloop_cancel_timeout(handler, eloop_data, 619 user_data); 620 eloop_register_timeout(requested.sec, 621 requested.usec, 622 handler, eloop_data, 623 user_data); 624 return 1; 625 } 626 } 627 } 628 629 return 0; 630} 631 632 633int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs, 634 eloop_timeout_handler handler, void *eloop_data, 635 void *user_data) 636{ 637 struct os_time now, requested, remaining; 638 struct eloop_timeout *tmp; 639 640 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 641 if (tmp->handler == handler && 642 tmp->eloop_data == eloop_data && 643 tmp->user_data == user_data) { 644 requested.sec = req_secs; 645 requested.usec = req_usecs; 646 os_get_time(&now); 647 os_time_sub(&tmp->time, &now, &remaining); 648 if (os_time_before(&remaining, &requested)) { 649 eloop_cancel_timeout(handler, eloop_data, 650 user_data); 651 eloop_register_timeout(requested.sec, 652 requested.usec, 653 handler, eloop_data, 654 user_data); 655 return 1; 656 } 657 } 658 } 659 660 return 0; 661} 662 663 664#ifndef CONFIG_NATIVE_WINDOWS 665static void eloop_handle_alarm(int sig) 666{ 667 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in " 668 "two seconds. Looks like there\n" 669 "is a bug that ends up in a busy loop that " 670 "prevents clean shutdown.\n" 671 "Killing program forcefully.\n"); 672 exit(1); 673} 674#endif /* CONFIG_NATIVE_WINDOWS */ 675 676 677static void eloop_handle_signal(int sig) 678{ 679 int i; 680 681#ifndef CONFIG_NATIVE_WINDOWS 682 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) { 683 /* Use SIGALRM to break out from potential busy loops that 684 * would not allow the program to be killed. */ 685 eloop.pending_terminate = 1; 686 signal(SIGALRM, eloop_handle_alarm); 687 alarm(2); 688 } 689#endif /* CONFIG_NATIVE_WINDOWS */ 690 691 eloop.signaled++; 692 for (i = 0; i < eloop.signal_count; i++) { 693 if (eloop.signals[i].sig == sig) { 694 eloop.signals[i].signaled++; 695 break; 696 } 697 } 698} 699 700 701static void eloop_process_pending_signals(void) 702{ 703 int i; 704 705 if (eloop.signaled == 0) 706 return; 707 eloop.signaled = 0; 708 709 if (eloop.pending_terminate) { 710#ifndef CONFIG_NATIVE_WINDOWS 711 alarm(0); 712#endif /* CONFIG_NATIVE_WINDOWS */ 713 eloop.pending_terminate = 0; 714 } 715 716 for (i = 0; i < eloop.signal_count; i++) { 717 if (eloop.signals[i].signaled) { 718 eloop.signals[i].signaled = 0; 719 eloop.signals[i].handler(eloop.signals[i].sig, 720 eloop.signals[i].user_data); 721 } 722 } 723} 724 725 726int eloop_register_signal(int sig, eloop_signal_handler handler, 727 void *user_data) 728{ 729 struct eloop_signal *tmp; 730 731 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1, 732 sizeof(struct eloop_signal)); 733 if (tmp == NULL) 734 return -1; 735 736 tmp[eloop.signal_count].sig = sig; 737 tmp[eloop.signal_count].user_data = user_data; 738 tmp[eloop.signal_count].handler = handler; 739 tmp[eloop.signal_count].signaled = 0; 740 eloop.signal_count++; 741 eloop.signals = tmp; 742 signal(sig, eloop_handle_signal); 743 744 return 0; 745} 746 747 748int eloop_register_signal_terminate(eloop_signal_handler handler, 749 void *user_data) 750{ 751 int ret = eloop_register_signal(SIGINT, handler, user_data); 752 if (ret == 0) 753 ret = eloop_register_signal(SIGTERM, handler, user_data); 754 return ret; 755} 756 757 758int eloop_register_signal_reconfig(eloop_signal_handler handler, 759 void *user_data) 760{ 761#ifdef CONFIG_NATIVE_WINDOWS 762 return 0; 763#else /* CONFIG_NATIVE_WINDOWS */ 764 return eloop_register_signal(SIGHUP, handler, user_data); 765#endif /* CONFIG_NATIVE_WINDOWS */ 766} 767 768 769void eloop_run(void) 770{ 771#ifdef CONFIG_ELOOP_POLL 772 int num_poll_fds; 773 int timeout_ms = 0; 774#else /* CONFIG_ELOOP_POLL */ 775 fd_set *rfds, *wfds, *efds; 776 struct timeval _tv; 777#endif /* CONFIG_ELOOP_POLL */ 778 int res; 779 struct os_time tv, now; 780 781#ifndef CONFIG_ELOOP_POLL 782 rfds = os_malloc(sizeof(*rfds)); 783 wfds = os_malloc(sizeof(*wfds)); 784 efds = os_malloc(sizeof(*efds)); 785 if (rfds == NULL || wfds == NULL || efds == NULL) 786 goto out; 787#endif /* CONFIG_ELOOP_POLL */ 788 789 while (!eloop.terminate && 790 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || 791 eloop.writers.count > 0 || eloop.exceptions.count > 0)) { 792 struct eloop_timeout *timeout; 793 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 794 list); 795 if (timeout) { 796 os_get_time(&now); 797 if (os_time_before(&now, &timeout->time)) 798 os_time_sub(&timeout->time, &now, &tv); 799 else 800 tv.sec = tv.usec = 0; 801#ifdef CONFIG_ELOOP_POLL 802 timeout_ms = tv.sec * 1000 + tv.usec / 1000; 803#else /* CONFIG_ELOOP_POLL */ 804 _tv.tv_sec = tv.sec; 805 _tv.tv_usec = tv.usec; 806#endif /* CONFIG_ELOOP_POLL */ 807 } 808 809#ifdef CONFIG_ELOOP_POLL 810 num_poll_fds = eloop_sock_table_set_fds( 811 &eloop.readers, &eloop.writers, &eloop.exceptions, 812 eloop.pollfds, eloop.pollfds_map, 813 eloop.max_pollfd_map); 814 res = poll(eloop.pollfds, num_poll_fds, 815 timeout ? timeout_ms : -1); 816 817 if (res < 0 && errno != EINTR && errno != 0) { 818 wpa_printf(MSG_INFO, "eloop: poll: %s", 819 strerror(errno)); 820 goto out; 821 } 822#else /* CONFIG_ELOOP_POLL */ 823 eloop_sock_table_set_fds(&eloop.readers, rfds); 824 eloop_sock_table_set_fds(&eloop.writers, wfds); 825 eloop_sock_table_set_fds(&eloop.exceptions, efds); 826 res = select(eloop.max_sock + 1, rfds, wfds, efds, 827 timeout ? &_tv : NULL); 828 if (res < 0 && errno != EINTR && errno != 0) { 829 wpa_printf(MSG_INFO, "eloop: select: %s", 830 strerror(errno)); 831 goto out; 832 } 833#endif /* CONFIG_ELOOP_POLL */ 834 eloop_process_pending_signals(); 835 836 /* check if some registered timeouts have occurred */ 837 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 838 list); 839 if (timeout) { 840 os_get_time(&now); 841 if (!os_time_before(&now, &timeout->time)) { 842 void *eloop_data = timeout->eloop_data; 843 void *user_data = timeout->user_data; 844 eloop_timeout_handler handler = 845 timeout->handler; 846 eloop_remove_timeout(timeout); 847 handler(eloop_data, user_data); 848 } 849 850 } 851 852 if (res <= 0) 853 continue; 854 855#ifdef CONFIG_ELOOP_POLL 856 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, 857 &eloop.exceptions, eloop.pollfds_map, 858 eloop.max_pollfd_map); 859#else /* CONFIG_ELOOP_POLL */ 860 eloop_sock_table_dispatch(&eloop.readers, rfds); 861 eloop_sock_table_dispatch(&eloop.writers, wfds); 862 eloop_sock_table_dispatch(&eloop.exceptions, efds); 863#endif /* CONFIG_ELOOP_POLL */ 864 } 865 866 eloop.terminate = 0; 867out: 868#ifndef CONFIG_ELOOP_POLL 869 os_free(rfds); 870 os_free(wfds); 871 os_free(efds); 872#endif /* CONFIG_ELOOP_POLL */ 873 return; 874} 875 876 877void eloop_terminate(void) 878{ 879 eloop.terminate = 1; 880} 881 882 883void eloop_destroy(void) 884{ 885 struct eloop_timeout *timeout, *prev; 886 struct os_time now; 887 888 os_get_time(&now); 889 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 890 struct eloop_timeout, list) { 891 int sec, usec; 892 sec = timeout->time.sec - now.sec; 893 usec = timeout->time.usec - now.usec; 894 if (timeout->time.usec < now.usec) { 895 sec--; 896 usec += 1000000; 897 } 898 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d " 899 "eloop_data=%p user_data=%p handler=%p", 900 sec, usec, timeout->eloop_data, timeout->user_data, 901 timeout->handler); 902 wpa_trace_dump_funcname("eloop unregistered timeout handler", 903 timeout->handler); 904 wpa_trace_dump("eloop timeout", timeout); 905 eloop_remove_timeout(timeout); 906 } 907 eloop_sock_table_destroy(&eloop.readers); 908 eloop_sock_table_destroy(&eloop.writers); 909 eloop_sock_table_destroy(&eloop.exceptions); 910 os_free(eloop.signals); 911 912#ifdef CONFIG_ELOOP_POLL 913 os_free(eloop.pollfds); 914 os_free(eloop.pollfds_map); 915#endif /* CONFIG_ELOOP_POLL */ 916} 917 918 919int eloop_terminated(void) 920{ 921 return eloop.terminate; 922} 923 924 925void eloop_wait_for_read_sock(int sock) 926{ 927#ifdef CONFIG_ELOOP_POLL 928 struct pollfd pfd; 929 930 if (sock < 0) 931 return; 932 933 os_memset(&pfd, 0, sizeof(pfd)); 934 pfd.fd = sock; 935 pfd.events = POLLIN; 936 937 poll(&pfd, 1, -1); 938#else /* CONFIG_ELOOP_POLL */ 939 fd_set rfds; 940 941 if (sock < 0) 942 return; 943 944 FD_ZERO(&rfds); 945 FD_SET(sock, &rfds); 946 select(sock + 1, &rfds, NULL, NULL, NULL); 947#endif /* CONFIG_ELOOP_POLL */ 948} 949