| @@ -34,57 +34,54 @@ | | | @@ -34,57 +34,54 @@ |
34 | #ifdef CONFIG_ELOOP_EPOLL | | 34 | #ifdef CONFIG_ELOOP_EPOLL |
35 | #include <sys/epoll.h> | | 35 | #include <sys/epoll.h> |
36 | #endif /* CONFIG_ELOOP_EPOLL */ | | 36 | #endif /* CONFIG_ELOOP_EPOLL */ |
37 | | | 37 | |
38 | #ifdef CONFIG_ELOOP_KQUEUE | | 38 | #ifdef CONFIG_ELOOP_KQUEUE |
39 | #include <sys/event.h> | | 39 | #include <sys/event.h> |
40 | #endif /* CONFIG_ELOOP_KQUEUE */ | | 40 | #endif /* CONFIG_ELOOP_KQUEUE */ |
41 | | | 41 | |
42 | struct eloop_sock { | | 42 | struct eloop_sock { |
43 | int sock; | | 43 | int sock; |
44 | void *eloop_data; | | 44 | void *eloop_data; |
45 | void *user_data; | | 45 | void *user_data; |
46 | eloop_sock_handler handler; | | 46 | eloop_sock_handler handler; |
47 | WPA_TRACE_REF(eloop) | | 47 | WPA_TRACE_REF(eloop); |
48 | WPA_TRACE_REF(user) | | 48 | WPA_TRACE_REF(user); |
49 | WPA_TRACE_INFO | | 49 | WPA_TRACE_INFO |
50 | }; | | 50 | }; |
51 | | | 51 | |
52 | struct eloop_timeout { | | 52 | struct eloop_timeout { |
53 | struct dl_list list; | | 53 | struct dl_list list; |
54 | struct os_reltime time; | | 54 | struct os_reltime time; |
55 | void *eloop_data; | | 55 | void *eloop_data; |
56 | void *user_data; | | 56 | void *user_data; |
57 | eloop_timeout_handler handler; | | 57 | eloop_timeout_handler handler; |
58 | WPA_TRACE_REF(eloop) | | 58 | WPA_TRACE_REF(eloop); |
59 | WPA_TRACE_REF(user) | | 59 | WPA_TRACE_REF(user); |
60 | WPA_TRACE_INFO | | 60 | WPA_TRACE_INFO |
61 | }; | | 61 | }; |
62 | | | 62 | |
63 | struct eloop_signal { | | 63 | struct eloop_signal { |
64 | int sig; | | 64 | int sig; |
65 | void *user_data; | | 65 | void *user_data; |
66 | eloop_signal_handler handler; | | 66 | eloop_signal_handler handler; |
67 | int signaled; | | 67 | int signaled; |
68 | }; | | 68 | }; |
69 | | | 69 | |
70 | struct eloop_sock_table { | | 70 | struct eloop_sock_table { |
71 | int count; | | 71 | int count; |
72 | struct eloop_sock *table; | | 72 | struct eloop_sock *table; |
73 | #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE) | | | |
74 | eloop_event_type type; | | 73 | eloop_event_type type; |
75 | #else /* CONFIG_ELOOP_EPOLL */ | | | |
76 | int changed; | | 74 | int changed; |
77 | #endif /* CONFIG_ELOOP_EPOLL */ | | | |
78 | }; | | 75 | }; |
79 | | | 76 | |
80 | struct eloop_data { | | 77 | struct eloop_data { |
81 | int max_sock; | | 78 | int max_sock; |
82 | | | 79 | |
83 | int count; /* sum of all table counts */ | | 80 | int count; /* sum of all table counts */ |
84 | #ifdef CONFIG_ELOOP_POLL | | 81 | #ifdef CONFIG_ELOOP_POLL |
85 | int max_pollfd_map; /* number of pollfds_map currently allocated */ | | 82 | int max_pollfd_map; /* number of pollfds_map currently allocated */ |
86 | int max_poll_fds; /* number of pollfds currently allocated */ | | 83 | int max_poll_fds; /* number of pollfds currently allocated */ |
87 | struct pollfd *pollfds; | | 84 | struct pollfd *pollfds; |
88 | struct pollfd **pollfds_map; | | 85 | struct pollfd **pollfds_map; |
89 | #endif /* CONFIG_ELOOP_POLL */ | | 86 | #endif /* CONFIG_ELOOP_POLL */ |
90 | #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE) | | 87 | #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE) |
| @@ -292,29 +289,27 @@ static int eloop_sock_table_add_sock(str | | | @@ -292,29 +289,27 @@ static int eloop_sock_table_add_sock(str |
292 | eloop_trace_sock_add_ref(table); | | 289 | eloop_trace_sock_add_ref(table); |
293 | return -1; | | 290 | return -1; |
294 | } | | 291 | } |
295 | | | 292 | |
296 | tmp[table->count].sock = sock; | | 293 | tmp[table->count].sock = sock; |
297 | tmp[table->count].eloop_data = eloop_data; | | 294 | tmp[table->count].eloop_data = eloop_data; |
298 | tmp[table->count].user_data = user_data; | | 295 | tmp[table->count].user_data = user_data; |
299 | tmp[table->count].handler = handler; | | 296 | tmp[table->count].handler = handler; |
300 | wpa_trace_record(&tmp[table->count]); | | 297 | wpa_trace_record(&tmp[table->count]); |
301 | table->count++; | | 298 | table->count++; |
302 | table->table = tmp; | | 299 | table->table = tmp; |
303 | eloop.max_sock = new_max_sock; | | 300 | eloop.max_sock = new_max_sock; |
304 | eloop.count++; | | 301 | eloop.count++; |
305 | #if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE) | | | |
306 | table->changed = 1; | | 302 | table->changed = 1; |
307 | #endif /* CONFIG_ELOOP_EPOLL */ | | | |
308 | eloop_trace_sock_add_ref(table); | | 303 | eloop_trace_sock_add_ref(table); |
309 | | | 304 | |
310 | #ifdef CONFIG_ELOOP_EPOLL | | 305 | #ifdef CONFIG_ELOOP_EPOLL |
311 | os_memset(&ev, 0, sizeof(ev)); | | 306 | os_memset(&ev, 0, sizeof(ev)); |
312 | switch (table->type) { | | 307 | switch (table->type) { |
313 | case EVENT_TYPE_READ: | | 308 | case EVENT_TYPE_READ: |
314 | ev.events = EPOLLIN; | | 309 | ev.events = EPOLLIN; |
315 | break; | | 310 | break; |
316 | case EVENT_TYPE_WRITE: | | 311 | case EVENT_TYPE_WRITE: |
317 | ev.events = EPOLLOUT; | | 312 | ev.events = EPOLLOUT; |
318 | break; | | 313 | break; |
319 | /* | | 314 | /* |
320 | * Exceptions are always checked when using epoll, but I suppose it's | | 315 | * Exceptions are always checked when using epoll, but I suppose it's |
| @@ -373,29 +368,27 @@ static void eloop_sock_table_remove_sock | | | @@ -373,29 +368,27 @@ static void eloop_sock_table_remove_sock |
373 | if (table->table[i].sock == sock) | | 368 | if (table->table[i].sock == sock) |
374 | break; | | 369 | break; |
375 | } | | 370 | } |
376 | if (i == table->count) | | 371 | if (i == table->count) |
377 | return; | | 372 | return; |
378 | eloop_trace_sock_remove_ref(table); | | 373 | eloop_trace_sock_remove_ref(table); |
379 | if (i != table->count - 1) { | | 374 | if (i != table->count - 1) { |
380 | os_memmove(&table->table[i], &table->table[i + 1], | | 375 | os_memmove(&table->table[i], &table->table[i + 1], |
381 | (table->count - i - 1) * | | 376 | (table->count - i - 1) * |
382 | sizeof(struct eloop_sock)); | | 377 | sizeof(struct eloop_sock)); |
383 | } | | 378 | } |
384 | table->count--; | | 379 | table->count--; |
385 | eloop.count--; | | 380 | eloop.count--; |
386 | #if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE) | | | |
387 | table->changed = 1; | | 381 | table->changed = 1; |
388 | #endif /* CONFIG_ELOOP_EPOLL */ | | | |
389 | eloop_trace_sock_add_ref(table); | | 382 | eloop_trace_sock_add_ref(table); |
390 | #ifdef CONFIG_ELOOP_EPOLL | | 383 | #ifdef CONFIG_ELOOP_EPOLL |
391 | if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) { | | 384 | if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) { |
392 | wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d " | | 385 | wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d " |
393 | "failed. %s\n", __func__, sock, strerror(errno)); | | 386 | "failed. %s\n", __func__, sock, strerror(errno)); |
394 | return; | | 387 | return; |
395 | } | | 388 | } |
396 | os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock)); | | 389 | os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock)); |
397 | #endif /* CONFIG_ELOOP_EPOLL */ | | 390 | #endif /* CONFIG_ELOOP_EPOLL */ |
398 | #ifdef CONFIG_ELOOP_KQUEUE | | 391 | #ifdef CONFIG_ELOOP_KQUEUE |
399 | EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, NULL); | | 392 | EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, NULL); |
400 | if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) { | | 393 | if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) { |
401 | wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d " | | 394 | wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d " |
| @@ -591,43 +584,51 @@ static void eloop_sock_table_dispatch(st | | | @@ -591,43 +584,51 @@ static void eloop_sock_table_dispatch(st |
591 | | | 584 | |
592 | #ifdef CONFIG_ELOOP_EPOLL | | 585 | #ifdef CONFIG_ELOOP_EPOLL |
593 | static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds) | | 586 | static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds) |
594 | { | | 587 | { |
595 | struct eloop_sock *table; | | 588 | struct eloop_sock *table; |
596 | int i; | | 589 | int i; |
597 | | | 590 | |
598 | for (i = 0; i < nfds; i++) { | | 591 | for (i = 0; i < nfds; i++) { |
599 | table = &eloop.fd_table[events[i].data.fd]; | | 592 | table = &eloop.fd_table[events[i].data.fd]; |
600 | if (table->handler == NULL) | | 593 | if (table->handler == NULL) |
601 | continue; | | 594 | continue; |
602 | table->handler(table->sock, table->eloop_data, | | 595 | table->handler(table->sock, table->eloop_data, |
603 | table->user_data); | | 596 | table->user_data); |
| | | 597 | if (eloop.readers.changed || |
| | | 598 | eloop.writers.changed || |
| | | 599 | eloop.exceptions.changed) |
| | | 600 | break; |
604 | } | | 601 | } |
605 | } | | 602 | } |
606 | #endif /* CONFIG_ELOOP_EPOLL */ | | 603 | #endif /* CONFIG_ELOOP_EPOLL */ |
607 | | | 604 | |
608 | | | 605 | |
609 | #ifdef CONFIG_ELOOP_KQUEUE | | 606 | #ifdef CONFIG_ELOOP_KQUEUE |
610 | static void eloop_sock_table_dispatch(struct kevent *events, int nfds) | | 607 | static void eloop_sock_table_dispatch(struct kevent *events, int nfds) |
611 | { | | 608 | { |
612 | struct eloop_sock *table; | | 609 | struct eloop_sock *table; |
613 | int i; | | 610 | int i; |
614 | | | 611 | |
615 | for (i = 0; i < nfds; i++) { | | 612 | for (i = 0; i < nfds; i++) { |
616 | table = &eloop.fd_table[events[i].ident]; | | 613 | table = &eloop.fd_table[events[i].ident]; |
617 | if (table->handler == NULL) | | 614 | if (table->handler == NULL) |
618 | continue; | | 615 | continue; |
619 | table->handler(table->sock, table->eloop_data, | | 616 | table->handler(table->sock, table->eloop_data, |
620 | table->user_data); | | 617 | table->user_data); |
| | | 618 | if (eloop.readers.changed || |
| | | 619 | eloop.writers.changed || |
| | | 620 | eloop.exceptions.changed) |
| | | 621 | break; |
621 | } | | 622 | } |
622 | } | | 623 | } |
623 | #endif /* CONFIG_ELOOP_KQUEUE */ | | 624 | #endif /* CONFIG_ELOOP_KQUEUE */ |
624 | | | 625 | |
625 | static void eloop_sock_table_destroy(struct eloop_sock_table *table) | | 626 | static void eloop_sock_table_destroy(struct eloop_sock_table *table) |
626 | { | | 627 | { |
627 | if (table) { | | 628 | if (table) { |
628 | int i; | | 629 | int i; |
629 | for (i = 0; i < table->count && table->table; i++) { | | 630 | for (i = 0; i < table->count && table->table; i++) { |
630 | wpa_printf(MSG_INFO, "ELOOP: remaining socket: " | | 631 | wpa_printf(MSG_INFO, "ELOOP: remaining socket: " |
631 | "sock=%d eloop_data=%p user_data=%p " | | 632 | "sock=%d eloop_data=%p user_data=%p " |
632 | "handler=%p", | | 633 | "handler=%p", |
633 | table->table[i].sock, | | 634 | table->table[i].sock, |
| @@ -1010,26 +1011,40 @@ void eloop_run(void) | | | @@ -1010,26 +1011,40 @@ void eloop_run(void) |
1010 | | | 1011 | |
1011 | #ifdef CONFIG_ELOOP_SELECT | | 1012 | #ifdef CONFIG_ELOOP_SELECT |
1012 | rfds = os_malloc(sizeof(*rfds)); | | 1013 | rfds = os_malloc(sizeof(*rfds)); |
1013 | wfds = os_malloc(sizeof(*wfds)); | | 1014 | wfds = os_malloc(sizeof(*wfds)); |
1014 | efds = os_malloc(sizeof(*efds)); | | 1015 | efds = os_malloc(sizeof(*efds)); |
1015 | if (rfds == NULL || wfds == NULL || efds == NULL) | | 1016 | if (rfds == NULL || wfds == NULL || efds == NULL) |
1016 | goto out; | | 1017 | goto out; |
1017 | #endif /* CONFIG_ELOOP_SELECT */ | | 1018 | #endif /* CONFIG_ELOOP_SELECT */ |
1018 | | | 1019 | |
1019 | while (!eloop.terminate && | | 1020 | while (!eloop.terminate && |
1020 | (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || | | 1021 | (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || |
1021 | eloop.writers.count > 0 || eloop.exceptions.count > 0)) { | | 1022 | eloop.writers.count > 0 || eloop.exceptions.count > 0)) { |
1022 | struct eloop_timeout *timeout; | | 1023 | struct eloop_timeout *timeout; |
| | | 1024 | |
| | | 1025 | if (eloop.pending_terminate) { |
| | | 1026 | /* |
| | | 1027 | * This may happen in some corner cases where a signal |
| | | 1028 | * is received during a blocking operation. We need to |
| | | 1029 | * process the pending signals and exit if requested to |
| | | 1030 | * avoid hitting the SIGALRM limit if the blocking |
| | | 1031 | * operation took more than two seconds. |
| | | 1032 | */ |
| | | 1033 | eloop_process_pending_signals(); |
| | | 1034 | if (eloop.terminate) |
| | | 1035 | break; |
| | | 1036 | } |
| | | 1037 | |
1023 | timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, | | 1038 | timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, |
1024 | list); | | 1039 | list); |
1025 | if (timeout) { | | 1040 | if (timeout) { |
1026 | os_get_reltime(&now); | | 1041 | os_get_reltime(&now); |
1027 | if (os_reltime_before(&now, &timeout->time)) | | 1042 | if (os_reltime_before(&now, &timeout->time)) |
1028 | os_reltime_sub(&timeout->time, &now, &tv); | | 1043 | os_reltime_sub(&timeout->time, &now, &tv); |
1029 | else | | 1044 | else |
1030 | tv.sec = tv.usec = 0; | | 1045 | tv.sec = tv.usec = 0; |
1031 | #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) | | 1046 | #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) |
1032 | timeout_ms = tv.sec * 1000 + tv.usec / 1000; | | 1047 | timeout_ms = tv.sec * 1000 + tv.usec / 1000; |
1033 | #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */ | | 1048 | #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */ |
1034 | #ifdef CONFIG_ELOOP_SELECT | | 1049 | #ifdef CONFIG_ELOOP_SELECT |
1035 | _tv.tv_sec = tv.sec; | | 1050 | _tv.tv_sec = tv.sec; |
| @@ -1081,47 +1096,66 @@ void eloop_run(void) | | | @@ -1081,47 +1096,66 @@ void eloop_run(void) |
1081 | #ifdef CONFIG_ELOOP_SELECT | | 1096 | #ifdef CONFIG_ELOOP_SELECT |
1082 | "select" | | 1097 | "select" |
1083 | #endif /* CONFIG_ELOOP_SELECT */ | | 1098 | #endif /* CONFIG_ELOOP_SELECT */ |
1084 | #ifdef CONFIG_ELOOP_EPOLL | | 1099 | #ifdef CONFIG_ELOOP_EPOLL |
1085 | "epoll" | | 1100 | "epoll" |
1086 | #endif /* CONFIG_ELOOP_EPOLL */ | | 1101 | #endif /* CONFIG_ELOOP_EPOLL */ |
1087 | #ifdef CONFIG_ELOOP_KQUEUE | | 1102 | #ifdef CONFIG_ELOOP_KQUEUE |
1088 | "kqueue" | | 1103 | "kqueue" |
1089 | #endif /* CONFIG_ELOOP_EKQUEUE */ | | 1104 | #endif /* CONFIG_ELOOP_EKQUEUE */ |
1090 | | | 1105 | |
1091 | , strerror(errno)); | | 1106 | , strerror(errno)); |
1092 | goto out; | | 1107 | goto out; |
1093 | } | | 1108 | } |
| | | 1109 | |
| | | 1110 | eloop.readers.changed = 0; |
| | | 1111 | eloop.writers.changed = 0; |
| | | 1112 | eloop.exceptions.changed = 0; |
| | | 1113 | |
1094 | eloop_process_pending_signals(); | | 1114 | eloop_process_pending_signals(); |
1095 | | | 1115 | |
| | | 1116 | |
1096 | /* check if some registered timeouts have occurred */ | | 1117 | /* check if some registered timeouts have occurred */ |
1097 | timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, | | 1118 | timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, |
1098 | list); | | 1119 | list); |
1099 | if (timeout) { | | 1120 | if (timeout) { |
1100 | os_get_reltime(&now); | | 1121 | os_get_reltime(&now); |
1101 | if (!os_reltime_before(&now, &timeout->time)) { | | 1122 | if (!os_reltime_before(&now, &timeout->time)) { |
1102 | void *eloop_data = timeout->eloop_data; | | 1123 | void *eloop_data = timeout->eloop_data; |
1103 | void *user_data = timeout->user_data; | | 1124 | void *user_data = timeout->user_data; |
1104 | eloop_timeout_handler handler = | | 1125 | eloop_timeout_handler handler = |
1105 | timeout->handler; | | 1126 | timeout->handler; |
1106 | eloop_remove_timeout(timeout); | | 1127 | eloop_remove_timeout(timeout); |
1107 | handler(eloop_data, user_data); | | 1128 | handler(eloop_data, user_data); |
1108 | } | | 1129 | } |
1109 | | | 1130 | |
1110 | } | | 1131 | } |
1111 | | | 1132 | |
1112 | if (res <= 0) | | 1133 | if (res <= 0) |
1113 | continue; | | 1134 | continue; |
1114 | | | 1135 | |
| | | 1136 | if (eloop.readers.changed || |
| | | 1137 | eloop.writers.changed || |
| | | 1138 | eloop.exceptions.changed) { |
| | | 1139 | /* |
| | | 1140 | * Sockets may have been closed and reopened with the |
| | | 1141 | * same FD in the signal or timeout handlers, so we |
| | | 1142 | * must skip the previous results and check again |
| | | 1143 | * whether any of the currently registered sockets have |
| | | 1144 | * events. |
| | | 1145 | */ |
| | | 1146 | continue; |
| | | 1147 | } |
| | | 1148 | |
1115 | #ifdef CONFIG_ELOOP_POLL | | 1149 | #ifdef CONFIG_ELOOP_POLL |
1116 | eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, | | 1150 | eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, |
1117 | &eloop.exceptions, eloop.pollfds_map, | | 1151 | &eloop.exceptions, eloop.pollfds_map, |
1118 | eloop.max_pollfd_map); | | 1152 | eloop.max_pollfd_map); |
1119 | #endif /* CONFIG_ELOOP_POLL */ | | 1153 | #endif /* CONFIG_ELOOP_POLL */ |
1120 | #ifdef CONFIG_ELOOP_SELECT | | 1154 | #ifdef CONFIG_ELOOP_SELECT |
1121 | eloop_sock_table_dispatch(&eloop.readers, rfds); | | 1155 | eloop_sock_table_dispatch(&eloop.readers, rfds); |
1122 | eloop_sock_table_dispatch(&eloop.writers, wfds); | | 1156 | eloop_sock_table_dispatch(&eloop.writers, wfds); |
1123 | eloop_sock_table_dispatch(&eloop.exceptions, efds); | | 1157 | eloop_sock_table_dispatch(&eloop.exceptions, efds); |
1124 | #endif /* CONFIG_ELOOP_SELECT */ | | 1158 | #endif /* CONFIG_ELOOP_SELECT */ |
1125 | #ifdef CONFIG_ELOOP_EPOLL | | 1159 | #ifdef CONFIG_ELOOP_EPOLL |
1126 | eloop_sock_table_dispatch(eloop.epoll_events, res); | | 1160 | eloop_sock_table_dispatch(eloop.epoll_events, res); |
1127 | #endif /* CONFIG_ELOOP_EPOLL */ | | 1161 | #endif /* CONFIG_ELOOP_EPOLL */ |
| @@ -1186,27 +1220,27 @@ void eloop_destroy(void) | | | @@ -1186,27 +1220,27 @@ void eloop_destroy(void) |
1186 | #ifdef CONFIG_ELOOP_EPOLL | | 1220 | #ifdef CONFIG_ELOOP_EPOLL |
1187 | os_free(eloop.epoll_events); | | 1221 | os_free(eloop.epoll_events); |
1188 | close(eloop.epollfd); | | 1222 | close(eloop.epollfd); |
1189 | #endif /* CONFIG_ELOOP_EPOLL */ | | 1223 | #endif /* CONFIG_ELOOP_EPOLL */ |
1190 | #ifdef CONFIG_ELOOP_KQUEUE | | 1224 | #ifdef CONFIG_ELOOP_KQUEUE |
1191 | os_free(eloop.kqueue_events); | | 1225 | os_free(eloop.kqueue_events); |
1192 | close(eloop.kqueuefd); | | 1226 | close(eloop.kqueuefd); |
1193 | #endif /* CONFIG_ELOOP_KQUEUE */ | | 1227 | #endif /* CONFIG_ELOOP_KQUEUE */ |
1194 | } | | 1228 | } |
1195 | | | 1229 | |
1196 | | | 1230 | |
1197 | int eloop_terminated(void) | | 1231 | int eloop_terminated(void) |
1198 | { | | 1232 | { |
1199 | return eloop.terminate; | | 1233 | return eloop.terminate || eloop.pending_terminate; |
1200 | } | | 1234 | } |
1201 | | | 1235 | |
1202 | | | 1236 | |
1203 | void eloop_wait_for_read_sock(int sock) | | 1237 | void eloop_wait_for_read_sock(int sock) |
1204 | { | | 1238 | { |
1205 | #ifdef CONFIG_ELOOP_POLL | | 1239 | #ifdef CONFIG_ELOOP_POLL |
1206 | struct pollfd pfd; | | 1240 | struct pollfd pfd; |
1207 | | | 1241 | |
1208 | if (sock < 0) | | 1242 | if (sock < 0) |
1209 | return; | | 1243 | return; |
1210 | | | 1244 | |
1211 | os_memset(&pfd, 0, sizeof(pfd)); | | 1245 | os_memset(&pfd, 0, sizeof(pfd)); |
1212 | pfd.fd = sock; | | 1246 | pfd.fd = sock; |