Wed Jan 20 15:07:52 2016 UTC ()
Add kqueue(2) support.


(roy)
diff -r1.6 -r1.7 src/external/bsd/wpa/dist/src/utils/eloop.c

cvs diff -r1.6 -r1.7 src/external/bsd/wpa/dist/src/utils/eloop.c (switch to unified diff)

--- src/external/bsd/wpa/dist/src/utils/eloop.c 2015/04/01 19:45:15 1.6
+++ src/external/bsd/wpa/dist/src/utils/eloop.c 2016/01/20 15:07:52 1.7
@@ -1,1113 +1,1247 @@ @@ -1,1113 +1,1247 @@
1/* 1/*
2 * Event loop based on select() loop 2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi> 3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 * 4 *
5 * This software may be distributed under the terms of the BSD license. 5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details. 6 * See README for more details.
7 */ 7 */
8 8
9#include "includes.h" 9#include "includes.h"
10#include <assert.h> 10#include <assert.h>
11 11
12#include "common.h" 12#include "common.h"
13#include "trace.h" 13#include "trace.h"
14#include "list.h" 14#include "list.h"
15#include "eloop.h" 15#include "eloop.h"
16 16
17#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL) 17#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll 18#error Do not define both of poll and epoll
19#endif 19#endif
20 20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) 21#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
 22#error Do not define both of poll and kqueue
 23#endif
 24
 25#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
 26 !defined(CONFIG_ELOOP_KQUEUE)
22#define CONFIG_ELOOP_SELECT 27#define CONFIG_ELOOP_SELECT
23#endif 28#endif
24 29
25#ifdef CONFIG_ELOOP_POLL 30#ifdef CONFIG_ELOOP_POLL
26#include <poll.h> 31#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */ 32#endif /* CONFIG_ELOOP_POLL */
28 33
29#ifdef CONFIG_ELOOP_EPOLL 34#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h> 35#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */ 36#endif /* CONFIG_ELOOP_EPOLL */
32 37
 38#ifdef CONFIG_ELOOP_KQUEUE
 39#include <sys/event.h>
 40#endif /* CONFIG_ELOOP_KQUEUE */
 41
33struct eloop_sock { 42struct eloop_sock {
34 int sock; 43 int sock;
35 void *eloop_data; 44 void *eloop_data;
36 void *user_data; 45 void *user_data;
37 eloop_sock_handler handler; 46 eloop_sock_handler handler;
38 WPA_TRACE_REF(eloop) 47 WPA_TRACE_REF(eloop)
39 WPA_TRACE_REF(user) 48 WPA_TRACE_REF(user)
40 WPA_TRACE_INFO 49 WPA_TRACE_INFO
41}; 50};
42 51
43struct eloop_timeout { 52struct eloop_timeout {
44 struct dl_list list; 53 struct dl_list list;
45 struct os_reltime time; 54 struct os_reltime time;
46 void *eloop_data; 55 void *eloop_data;
47 void *user_data; 56 void *user_data;
48 eloop_timeout_handler handler; 57 eloop_timeout_handler handler;
49 WPA_TRACE_REF(eloop) 58 WPA_TRACE_REF(eloop)
50 WPA_TRACE_REF(user) 59 WPA_TRACE_REF(user)
51 WPA_TRACE_INFO 60 WPA_TRACE_INFO
52}; 61};
53 62
54struct eloop_signal { 63struct eloop_signal {
55 int sig; 64 int sig;
56 void *user_data; 65 void *user_data;
57 eloop_signal_handler handler; 66 eloop_signal_handler handler;
58 int signaled; 67 int signaled;
59}; 68};
60 69
61struct eloop_sock_table { 70struct eloop_sock_table {
62 int count; 71 int count;
63 struct eloop_sock *table; 72 struct eloop_sock *table;
64#ifdef CONFIG_ELOOP_EPOLL 73#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
65 eloop_event_type type; 74 eloop_event_type type;
66#else /* CONFIG_ELOOP_EPOLL */ 75#else /* CONFIG_ELOOP_EPOLL */
67 int changed; 76 int changed;
68#endif /* CONFIG_ELOOP_EPOLL */ 77#endif /* CONFIG_ELOOP_EPOLL */
69}; 78};
70 79
71struct eloop_data { 80struct eloop_data {
72 int max_sock; 81 int max_sock;
73 82
74 int count; /* sum of all table counts */ 83 int count; /* sum of all table counts */
75#ifdef CONFIG_ELOOP_POLL 84#ifdef CONFIG_ELOOP_POLL
76 int max_pollfd_map; /* number of pollfds_map currently allocated */ 85 int max_pollfd_map; /* number of pollfds_map currently allocated */
77 int max_poll_fds; /* number of pollfds currently allocated */ 86 int max_poll_fds; /* number of pollfds currently allocated */
78 struct pollfd *pollfds; 87 struct pollfd *pollfds;
79 struct pollfd **pollfds_map; 88 struct pollfd **pollfds_map;
80#endif /* CONFIG_ELOOP_POLL */ 89#endif /* CONFIG_ELOOP_POLL */
 90#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
 91 int max_fd;
 92 struct eloop_sock *fd_table;
 93#endif
81#ifdef CONFIG_ELOOP_EPOLL 94#ifdef CONFIG_ELOOP_EPOLL
82 int epollfd; 95 int epollfd;
83 int epoll_max_event_num; 96 int epoll_max_event_num;
84 int epoll_max_fd; 
85 struct eloop_sock *epoll_table; 
86 struct epoll_event *epoll_events; 97 struct epoll_event *epoll_events;
87#endif /* CONFIG_ELOOP_EPOLL */ 98#endif /* CONFIG_ELOOP_EPOLL */
 99#ifdef CONFIG_ELOOP_KQUEUE
 100 int kqueuefd;
 101 int kqueue_nevents;
 102 struct kevent *kqueue_events;
 103#endif /* CONFIG_ELOOP_KQUEUE */
88 struct eloop_sock_table readers; 104 struct eloop_sock_table readers;
89 struct eloop_sock_table writers; 105 struct eloop_sock_table writers;
90 struct eloop_sock_table exceptions; 106 struct eloop_sock_table exceptions;
91 107
92 struct dl_list timeout; 108 struct dl_list timeout;
93 109
94 int signal_count; 110 int signal_count;
95 struct eloop_signal *signals; 111 struct eloop_signal *signals;
96 int signaled; 112 int signaled;
97 int pending_terminate; 113 int pending_terminate;
98 114
99 int terminate; 115 int terminate;
100}; 116};
101 117
102static struct eloop_data eloop; 118static struct eloop_data eloop;
103 119
104 120
105#ifdef WPA_TRACE 121#ifdef WPA_TRACE
106 122
107static void eloop_sigsegv_handler(int sig) 123static void eloop_sigsegv_handler(int sig)
108{ 124{
109 wpa_trace_show("eloop SIGSEGV"); 125 wpa_trace_show("eloop SIGSEGV");
110 abort(); 126 abort();
111} 127}
112 128
113static void eloop_trace_sock_add_ref(struct eloop_sock_table *table) 129static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
114{ 130{
115 int i; 131 int i;
116 if (table == NULL || table->table == NULL) 132 if (table == NULL || table->table == NULL)
117 return; 133 return;
118 for (i = 0; i < table->count; i++) { 134 for (i = 0; i < table->count; i++) {
119 wpa_trace_add_ref(&table->table[i], eloop, 135 wpa_trace_add_ref(&table->table[i], eloop,
120 table->table[i].eloop_data); 136 table->table[i].eloop_data);
121 wpa_trace_add_ref(&table->table[i], user, 137 wpa_trace_add_ref(&table->table[i], user,
122 table->table[i].user_data); 138 table->table[i].user_data);
123 } 139 }
124} 140}
125 141
126 142
127static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table) 143static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
128{ 144{
129 int i; 145 int i;
130 if (table == NULL || table->table == NULL) 146 if (table == NULL || table->table == NULL)
131 return; 147 return;
132 for (i = 0; i < table->count; i++) { 148 for (i = 0; i < table->count; i++) {
133 wpa_trace_remove_ref(&table->table[i], eloop, 149 wpa_trace_remove_ref(&table->table[i], eloop,
134 table->table[i].eloop_data); 150 table->table[i].eloop_data);
135 wpa_trace_remove_ref(&table->table[i], user, 151 wpa_trace_remove_ref(&table->table[i], user,
136 table->table[i].user_data); 152 table->table[i].user_data);
137 } 153 }
138} 154}
139 155
140#else /* WPA_TRACE */ 156#else /* WPA_TRACE */
141 157
142#define eloop_trace_sock_add_ref(table) do { } while (0) 158#define eloop_trace_sock_add_ref(table) do { } while (0)
143#define eloop_trace_sock_remove_ref(table) do { } while (0) 159#define eloop_trace_sock_remove_ref(table) do { } while (0)
144 160
145#endif /* WPA_TRACE */ 161#endif /* WPA_TRACE */
146 162
147 163
148int eloop_init(void) 164int eloop_init(void)
149{ 165{
150 os_memset(&eloop, 0, sizeof(eloop)); 166 os_memset(&eloop, 0, sizeof(eloop));
151 dl_list_init(&eloop.timeout); 167 dl_list_init(&eloop.timeout);
152#ifdef CONFIG_ELOOP_EPOLL 168#ifdef CONFIG_ELOOP_EPOLL
153 eloop.epollfd = epoll_create1(0); 169 eloop.epollfd = epoll_create1(0);
154 if (eloop.epollfd < 0) { 170 if (eloop.epollfd < 0) {
155 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n", 171 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
156 __func__, strerror(errno)); 172 __func__, strerror(errno));
157 return -1; 173 return -1;
158 } 174 }
159 eloop.readers.type = EVENT_TYPE_READ; 175 eloop.readers.type = EVENT_TYPE_READ;
160 eloop.writers.type = EVENT_TYPE_WRITE; 176 eloop.writers.type = EVENT_TYPE_WRITE;
161 eloop.exceptions.type = EVENT_TYPE_EXCEPTION; 177 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
162#endif /* CONFIG_ELOOP_EPOLL */ 178#endif /* CONFIG_ELOOP_EPOLL */
 179#ifdef CONFIG_ELOOP_KQUEUE
 180 eloop.kqueuefd = kqueue();
 181 if (eloop.kqueuefd < 0) {
 182 wpa_printf(MSG_ERROR, "%s: kqueue failed. %s\n",
 183 __func__, strerror(errno));
 184 return -1;
 185 }
 186#endif /* CONFIG_ELOOP_KQUEUE */
163#ifdef WPA_TRACE 187#ifdef WPA_TRACE
164 signal(SIGSEGV, eloop_sigsegv_handler); 188 signal(SIGSEGV, eloop_sigsegv_handler);
165#endif /* WPA_TRACE */ 189#endif /* WPA_TRACE */
166 return 0; 190 return 0;
167} 191}
168 192
169 193
170static int eloop_sock_table_add_sock(struct eloop_sock_table *table, 194static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
171 int sock, eloop_sock_handler handler, 195 int sock, eloop_sock_handler handler,
172 void *eloop_data, void *user_data) 196 void *eloop_data, void *user_data)
173{ 197{
174#ifdef CONFIG_ELOOP_EPOLL 198#ifdef CONFIG_ELOOP_EPOLL
175 struct eloop_sock *temp_table; 199 struct eloop_sock *temp_table;
176 struct epoll_event ev, *temp_events; 200 struct epoll_event ev, *temp_events;
177 int next; 201 int next;
178#endif /* CONFIG_ELOOP_EPOLL */ 202#endif /* CONFIG_ELOOP_EPOLL */
 203#ifdef CONFIG_ELOOP_KQUEUE
 204 struct eloop_sock *temp_table;
 205 int next, filter;
 206 struct kevent ke;
 207#endif
179 struct eloop_sock *tmp; 208 struct eloop_sock *tmp;
180 int new_max_sock; 209 int new_max_sock;
181 210
182 if (sock > eloop.max_sock) 211 if (sock > eloop.max_sock)
183 new_max_sock = sock; 212 new_max_sock = sock;
184 else 213 else
185 new_max_sock = eloop.max_sock; 214 new_max_sock = eloop.max_sock;
186 215
187 if (table == NULL) 216 if (table == NULL)
188 return -1; 217 return -1;
189 218
190#ifdef CONFIG_ELOOP_POLL 219#ifdef CONFIG_ELOOP_POLL
191 if (new_max_sock >= eloop.max_pollfd_map) { 220 if (new_max_sock >= eloop.max_pollfd_map) {
192 struct pollfd **nmap; 221 struct pollfd **nmap;
193 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50, 222 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
194 sizeof(struct pollfd *)); 223 sizeof(struct pollfd *));
195 if (nmap == NULL) 224 if (nmap == NULL)
196 return -1; 225 return -1;
197 226
198 eloop.max_pollfd_map = new_max_sock + 50; 227 eloop.max_pollfd_map = new_max_sock + 50;
199 eloop.pollfds_map = nmap; 228 eloop.pollfds_map = nmap;
200 } 229 }
201 230
202 if (eloop.count + 1 > eloop.max_poll_fds) { 231 if (eloop.count + 1 > eloop.max_poll_fds) {
203 struct pollfd *n; 232 struct pollfd *n;
204 int nmax = eloop.count + 1 + 50; 233 int nmax = eloop.count + 1 + 50;
205 n = os_realloc_array(eloop.pollfds, nmax, 234 n = os_realloc_array(eloop.pollfds, nmax,
206 sizeof(struct pollfd)); 235 sizeof(struct pollfd));
207 if (n == NULL) 236 if (n == NULL)
208 return -1; 237 return -1;
209 238
210 eloop.max_poll_fds = nmax; 239 eloop.max_poll_fds = nmax;
211 eloop.pollfds = n; 240 eloop.pollfds = n;
212 } 241 }
213#endif /* CONFIG_ELOOP_POLL */ 242#endif /* CONFIG_ELOOP_POLL */
214#ifdef CONFIG_ELOOP_EPOLL 243#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
215 if (new_max_sock >= eloop.epoll_max_fd) { 244 if (new_max_sock >= eloop.max_fd) {
216 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2; 245 next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
217 temp_table = os_realloc_array(eloop.epoll_table, next, 246 temp_table = os_realloc_array(eloop.fd_table, next,
218 sizeof(struct eloop_sock)); 247 sizeof(struct eloop_sock));
219 if (temp_table == NULL) 248 if (temp_table == NULL)
220 return -1; 249 return -1;
221 250
222 eloop.epoll_max_fd = next; 251 eloop.max_fd = next;
223 eloop.epoll_table = temp_table; 252 eloop.fd_table = temp_table;
224 } 253 }
 254#endif
225 255
 256#ifdef CONFIG_ELOOP_EPOLL
226 if (eloop.count + 1 > eloop.epoll_max_event_num) { 257 if (eloop.count + 1 > eloop.epoll_max_event_num) {
227 next = eloop.epoll_max_event_num == 0 ? 8 : 258 next = eloop.epoll_max_event_num == 0 ? 8 :
228 eloop.epoll_max_event_num * 2; 259 eloop.epoll_max_event_num * 2;
229 temp_events = os_realloc_array(eloop.epoll_events, next, 260 temp_events = os_realloc_array(eloop.epoll_events, next,
230 sizeof(struct epoll_event)); 261 sizeof(struct epoll_event));
231 if (temp_events == NULL) { 262 if (temp_events == NULL) {
232 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. " 263 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
233 "%s\n", __func__, strerror(errno)); 264 "%s\n", __func__, strerror(errno));
234 return -1; 265 return -1;
235 } 266 }
236 267
237 eloop.epoll_max_event_num = next; 268 eloop.epoll_max_event_num = next;
238 eloop.epoll_events = temp_events; 269 eloop.epoll_events = temp_events;
239 } 270 }
240#endif /* CONFIG_ELOOP_EPOLL */ 271#endif /* CONFIG_ELOOP_EPOLL */
 272#ifdef CONFIG_ELOOP_KQUEUE
 273 if (eloop.count + 1 > eloop.kqueue_nevents) {
 274 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
 275 os_free(eloop.kqueue_events);
 276 eloop.kqueue_events = os_malloc(next *
 277 sizeof(eloop.kqueue_events));
 278 if (eloop.kqueue_events == NULL) {
 279 wpa_printf(MSG_ERROR, "%s: malloc for kqueue failed. "
 280 "%s\n", __func__, strerror(errno));
 281 return -1;
 282 }
 283
 284 eloop.kqueue_nevents = next;
 285 }
 286#endif /* CONFIG_ELOOP_KQUEUE */
241 287
242 eloop_trace_sock_remove_ref(table); 288 eloop_trace_sock_remove_ref(table);
243 tmp = os_realloc_array(table->table, table->count + 1, 289 tmp = os_realloc_array(table->table, table->count + 1,
244 sizeof(struct eloop_sock)); 290 sizeof(struct eloop_sock));
245 if (tmp == NULL) { 291 if (tmp == NULL) {
246 eloop_trace_sock_add_ref(table); 292 eloop_trace_sock_add_ref(table);
247 return -1; 293 return -1;
248 } 294 }
249 295
250 tmp[table->count].sock = sock; 296 tmp[table->count].sock = sock;
251 tmp[table->count].eloop_data = eloop_data; 297 tmp[table->count].eloop_data = eloop_data;
252 tmp[table->count].user_data = user_data; 298 tmp[table->count].user_data = user_data;
253 tmp[table->count].handler = handler; 299 tmp[table->count].handler = handler;
254 wpa_trace_record(&tmp[table->count]); 300 wpa_trace_record(&tmp[table->count]);
255 table->count++; 301 table->count++;
256 table->table = tmp; 302 table->table = tmp;
257 eloop.max_sock = new_max_sock; 303 eloop.max_sock = new_max_sock;
258 eloop.count++; 304 eloop.count++;
259#ifndef CONFIG_ELOOP_EPOLL 305#if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE)
260 table->changed = 1; 306 table->changed = 1;
261#endif /* CONFIG_ELOOP_EPOLL */ 307#endif /* CONFIG_ELOOP_EPOLL */
262 eloop_trace_sock_add_ref(table); 308 eloop_trace_sock_add_ref(table);
263 309
264#ifdef CONFIG_ELOOP_EPOLL 310#ifdef CONFIG_ELOOP_EPOLL
265 os_memset(&ev, 0, sizeof(ev)); 311 os_memset(&ev, 0, sizeof(ev));
266 switch (table->type) { 312 switch (table->type) {
267 case EVENT_TYPE_READ: 313 case EVENT_TYPE_READ:
268 ev.events = EPOLLIN; 314 ev.events = EPOLLIN;
269 break; 315 break;
270 case EVENT_TYPE_WRITE: 316 case EVENT_TYPE_WRITE:
271 ev.events = EPOLLOUT; 317 ev.events = EPOLLOUT;
272 break; 318 break;
273 /* 319 /*
274 * Exceptions are always checked when using epoll, but I suppose it's 320 * Exceptions are always checked when using epoll, but I suppose it's
275 * possible that someone registered a socket *only* for exception 321 * possible that someone registered a socket *only* for exception
276 * handling. 322 * handling.
277 */ 323 */
278 case EVENT_TYPE_EXCEPTION: 324 case EVENT_TYPE_EXCEPTION:
279 ev.events = EPOLLERR | EPOLLHUP; 325 ev.events = EPOLLERR | EPOLLHUP;
280 break; 326 break;
281 } 327 }
282 ev.data.fd = sock; 328 ev.data.fd = sock;
283 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) { 329 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
284 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d " 330 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
285 "failed. %s\n", __func__, sock, strerror(errno)); 331 "failed. %s\n", __func__, sock, strerror(errno));
286 return -1; 332 return -1;
287 } 333 }
288 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1], 334 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
289 sizeof(struct eloop_sock)); 335 sizeof(struct eloop_sock));
290#endif /* CONFIG_ELOOP_EPOLL */ 336#endif /* CONFIG_ELOOP_EPOLL */
 337#ifdef CONFIG_ELOOP_KQUEUE
 338 switch (table->type) {
 339 case EVENT_TYPE_READ:
 340 filter = EVFILT_READ;
 341 break;
 342 case EVENT_TYPE_WRITE:
 343 filter = EVFILT_WRITE;
 344 break;
 345 default:
 346 filter = 0;
 347 }
 348 EV_SET(&ke, sock, filter, EV_ADD, 0, 0, NULL);
 349 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
 350 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d "
 351 "failed. %s\n", __func__, sock, strerror(errno));
 352 return -1;
 353 }
 354 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
 355 sizeof(struct eloop_sock));
 356#endif /* CONFIG_ELOOP_KQUEUE */
291 return 0; 357 return 0;
292} 358}
293 359
294 360
295static void eloop_sock_table_remove_sock(struct eloop_sock_table *table, 361static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
296 int sock) 362 int sock)
297{ 363{
 364#ifdef CONFIG_ELOOP_KQUEUE
 365 struct kevent ke;
 366#endif
298 int i; 367 int i;
299 368
300 if (table == NULL || table->table == NULL || table->count == 0) 369 if (table == NULL || table->table == NULL || table->count == 0)
301 return; 370 return;
302 371
303 for (i = 0; i < table->count; i++) { 372 for (i = 0; i < table->count; i++) {
304 if (table->table[i].sock == sock) 373 if (table->table[i].sock == sock)
305 break; 374 break;
306 } 375 }
307 if (i == table->count) 376 if (i == table->count)
308 return; 377 return;
309 eloop_trace_sock_remove_ref(table); 378 eloop_trace_sock_remove_ref(table);
310 if (i != table->count - 1) { 379 if (i != table->count - 1) {
311 os_memmove(&table->table[i], &table->table[i + 1], 380 os_memmove(&table->table[i], &table->table[i + 1],
312 (table->count - i - 1) * 381 (table->count - i - 1) *
313 sizeof(struct eloop_sock)); 382 sizeof(struct eloop_sock));
314 } 383 }
315 table->count--; 384 table->count--;
316 eloop.count--; 385 eloop.count--;
317#ifndef CONFIG_ELOOP_EPOLL 386#if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE)
318 table->changed = 1; 387 table->changed = 1;
319#endif /* CONFIG_ELOOP_EPOLL */ 388#endif /* CONFIG_ELOOP_EPOLL */
320 eloop_trace_sock_add_ref(table); 389 eloop_trace_sock_add_ref(table);
321#ifdef CONFIG_ELOOP_EPOLL 390#ifdef CONFIG_ELOOP_EPOLL
322 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) { 391 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
323 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d " 392 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
324 "failed. %s\n", __func__, sock, strerror(errno)); 393 "failed. %s\n", __func__, sock, strerror(errno));
325 return; 394 return;
326 } 395 }
327 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock)); 396 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
328#endif /* CONFIG_ELOOP_EPOLL */ 397#endif /* CONFIG_ELOOP_EPOLL */
 398#ifdef CONFIG_ELOOP_KQUEUE
 399 EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, NULL);
 400 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
 401 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d "
 402 "failed. %s\n", __func__, sock, strerror(errno));
 403 return;
 404 }
 405 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
 406#endif /* CONFIG_ELOOP_KQUEUE */
329} 407}
330 408
331 409
332#ifdef CONFIG_ELOOP_POLL 410#ifdef CONFIG_ELOOP_POLL
333 411
334static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx) 412static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
335{ 413{
336 if (fd < mx && fd >= 0) 414 if (fd < mx && fd >= 0)
337 return pollfds_map[fd]; 415 return pollfds_map[fd];
338 return NULL; 416 return NULL;
339} 417}
340 418
341 419
342static int eloop_sock_table_set_fds(struct eloop_sock_table *readers, 420static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
343 struct eloop_sock_table *writers, 421 struct eloop_sock_table *writers,
344 struct eloop_sock_table *exceptions, 422 struct eloop_sock_table *exceptions,
345 struct pollfd *pollfds, 423 struct pollfd *pollfds,
346 struct pollfd **pollfds_map, 424 struct pollfd **pollfds_map,
347 int max_pollfd_map) 425 int max_pollfd_map)
348{ 426{
349 int i; 427 int i;
350 int nxt = 0; 428 int nxt = 0;
351 int fd; 429 int fd;
352 struct pollfd *pfd; 430 struct pollfd *pfd;
353 431
354 /* Clear pollfd lookup map. It will be re-populated below. */ 432 /* Clear pollfd lookup map. It will be re-populated below. */
355 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map); 433 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
356 434
357 if (readers && readers->table) { 435 if (readers && readers->table) {
358 for (i = 0; i < readers->count; i++) { 436 for (i = 0; i < readers->count; i++) {
359 fd = readers->table[i].sock; 437 fd = readers->table[i].sock;
360 assert(fd >= 0 && fd < max_pollfd_map); 438 assert(fd >= 0 && fd < max_pollfd_map);
361 pollfds[nxt].fd = fd; 439 pollfds[nxt].fd = fd;
362 pollfds[nxt].events = POLLIN; 440 pollfds[nxt].events = POLLIN;
363 pollfds[nxt].revents = 0; 441 pollfds[nxt].revents = 0;
364 pollfds_map[fd] = &(pollfds[nxt]); 442 pollfds_map[fd] = &(pollfds[nxt]);
365 nxt++; 443 nxt++;
366 } 444 }
367 } 445 }
368 446
369 if (writers && writers->table) { 447 if (writers && writers->table) {
370 for (i = 0; i < writers->count; i++) { 448 for (i = 0; i < writers->count; i++) {
371 /* 449 /*
372 * See if we already added this descriptor, update it 450 * See if we already added this descriptor, update it
373 * if so. 451 * if so.
374 */ 452 */
375 fd = writers->table[i].sock; 453 fd = writers->table[i].sock;
376 assert(fd >= 0 && fd < max_pollfd_map); 454 assert(fd >= 0 && fd < max_pollfd_map);
377 pfd = pollfds_map[fd]; 455 pfd = pollfds_map[fd];
378 if (!pfd) { 456 if (!pfd) {
379 pfd = &(pollfds[nxt]); 457 pfd = &(pollfds[nxt]);
380 pfd->events = 0; 458 pfd->events = 0;
381 pfd->fd = fd; 459 pfd->fd = fd;
382 pollfds[i].revents = 0; 460 pollfds[i].revents = 0;
383 pollfds_map[fd] = pfd; 461 pollfds_map[fd] = pfd;
384 nxt++; 462 nxt++;
385 } 463 }
386 pfd->events |= POLLOUT; 464 pfd->events |= POLLOUT;
387 } 465 }
388 } 466 }
389 467
390 /* 468 /*
391 * Exceptions are always checked when using poll, but I suppose it's 469 * Exceptions are always checked when using poll, but I suppose it's
392 * possible that someone registered a socket *only* for exception 470 * possible that someone registered a socket *only* for exception
393 * handling. Set the POLLIN bit in this case. 471 * handling. Set the POLLIN bit in this case.
394 */ 472 */
395 if (exceptions && exceptions->table) { 473 if (exceptions && exceptions->table) {
396 for (i = 0; i < exceptions->count; i++) { 474 for (i = 0; i < exceptions->count; i++) {
397 /* 475 /*
398 * See if we already added this descriptor, just use it 476 * See if we already added this descriptor, just use it
399 * if so. 477 * if so.
400 */ 478 */
401 fd = exceptions->table[i].sock; 479 fd = exceptions->table[i].sock;
402 assert(fd >= 0 && fd < max_pollfd_map); 480 assert(fd >= 0 && fd < max_pollfd_map);
403 pfd = pollfds_map[fd]; 481 pfd = pollfds_map[fd];
404 if (!pfd) { 482 if (!pfd) {
405 pfd = &(pollfds[nxt]); 483 pfd = &(pollfds[nxt]);
406 pfd->events = POLLIN; 484 pfd->events = POLLIN;
407 pfd->fd = fd; 485 pfd->fd = fd;
408 pollfds[i].revents = 0; 486 pollfds[i].revents = 0;
409 pollfds_map[fd] = pfd; 487 pollfds_map[fd] = pfd;
410 nxt++; 488 nxt++;
411 } 489 }
412 } 490 }
413 } 491 }
414 492
415 return nxt; 493 return nxt;
416} 494}
417 495
418 496
419static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table, 497static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
420 struct pollfd **pollfds_map, 498 struct pollfd **pollfds_map,
421 int max_pollfd_map, 499 int max_pollfd_map,
422 short int revents) 500 short int revents)
423{ 501{
424 int i; 502 int i;
425 struct pollfd *pfd; 503 struct pollfd *pfd;
426 504
427 if (!table || !table->table) 505 if (!table || !table->table)
428 return 0; 506 return 0;
429 507
430 table->changed = 0; 508 table->changed = 0;
431 for (i = 0; i < table->count; i++) { 509 for (i = 0; i < table->count; i++) {
432 pfd = find_pollfd(pollfds_map, table->table[i].sock, 510 pfd = find_pollfd(pollfds_map, table->table[i].sock,
433 max_pollfd_map); 511 max_pollfd_map);
434 if (!pfd) 512 if (!pfd)
435 continue; 513 continue;
436 514
437 if (!(pfd->revents & revents)) 515 if (!(pfd->revents & revents))
438 continue; 516 continue;
439 517
440 table->table[i].handler(table->table[i].sock, 518 table->table[i].handler(table->table[i].sock,
441 table->table[i].eloop_data, 519 table->table[i].eloop_data,
442 table->table[i].user_data); 520 table->table[i].user_data);
443 if (table->changed) 521 if (table->changed)
444 return 1; 522 return 1;
445 } 523 }
446 524
447 return 0; 525 return 0;
448} 526}
449 527
450 528
451static void eloop_sock_table_dispatch(struct eloop_sock_table *readers, 529static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
452 struct eloop_sock_table *writers, 530 struct eloop_sock_table *writers,
453 struct eloop_sock_table *exceptions, 531 struct eloop_sock_table *exceptions,
454 struct pollfd **pollfds_map, 532 struct pollfd **pollfds_map,
455 int max_pollfd_map) 533 int max_pollfd_map)
456{ 534{
457 if (eloop_sock_table_dispatch_table(readers, pollfds_map, 535 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
458 max_pollfd_map, POLLIN | POLLERR | 536 max_pollfd_map, POLLIN | POLLERR |
459 POLLHUP)) 537 POLLHUP))
460 return; /* pollfds may be invalid at this point */ 538 return; /* pollfds may be invalid at this point */
461 539
462 if (eloop_sock_table_dispatch_table(writers, pollfds_map, 540 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
463 max_pollfd_map, POLLOUT)) 541 max_pollfd_map, POLLOUT))
464 return; /* pollfds may be invalid at this point */ 542 return; /* pollfds may be invalid at this point */
465 543
466 eloop_sock_table_dispatch_table(exceptions, pollfds_map, 544 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
467 max_pollfd_map, POLLERR | POLLHUP); 545 max_pollfd_map, POLLERR | POLLHUP);
468} 546}
469 547
470#endif /* CONFIG_ELOOP_POLL */ 548#endif /* CONFIG_ELOOP_POLL */
471 549
472#ifdef CONFIG_ELOOP_SELECT 550#ifdef CONFIG_ELOOP_SELECT
473 551
474static void eloop_sock_table_set_fds(struct eloop_sock_table *table, 552static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
475 fd_set *fds) 553 fd_set *fds)
476{ 554{
477 int i; 555 int i;
478 556
479 FD_ZERO(fds); 557 FD_ZERO(fds);
480 558
481 if (table->table == NULL) 559 if (table->table == NULL)
482 return; 560 return;
483 561
484 for (i = 0; i < table->count; i++) { 562 for (i = 0; i < table->count; i++) {
485 assert(table->table[i].sock >= 0); 563 assert(table->table[i].sock >= 0);
486 FD_SET(table->table[i].sock, fds); 564 FD_SET(table->table[i].sock, fds);
487 } 565 }
488} 566}
489 567
490 568
491static void eloop_sock_table_dispatch(struct eloop_sock_table *table, 569static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
492 fd_set *fds) 570 fd_set *fds)
493{ 571{
494 int i; 572 int i;
495 573
496 if (table == NULL || table->table == NULL) 574 if (table == NULL || table->table == NULL)
497 return; 575 return;
498 576
499 table->changed = 0; 577 table->changed = 0;
500 for (i = 0; i < table->count; i++) { 578 for (i = 0; i < table->count; i++) {
501 if (FD_ISSET(table->table[i].sock, fds)) { 579 if (FD_ISSET(table->table[i].sock, fds)) {
502 table->table[i].handler(table->table[i].sock, 580 table->table[i].handler(table->table[i].sock,
503 table->table[i].eloop_data, 581 table->table[i].eloop_data,
504 table->table[i].user_data); 582 table->table[i].user_data);
505 if (table->changed) 583 if (table->changed)
506 break; 584 break;
507 } 585 }
508 } 586 }
509} 587}
510 588
511#endif /* CONFIG_ELOOP_SELECT */ 589#endif /* CONFIG_ELOOP_SELECT */
512 590
513 591
514#ifdef CONFIG_ELOOP_EPOLL 592#ifdef CONFIG_ELOOP_EPOLL
515static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds) 593static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
516{ 594{
517 struct eloop_sock *table; 595 struct eloop_sock *table;
518 int i; 596 int i;
519 597
520 for (i = 0; i < nfds; i++) { 598 for (i = 0; i < nfds; i++) {
521 table = &eloop.epoll_table[events[i].data.fd]; 599 table = &eloop.fd_table[events[i].data.fd];
522 if (table->handler == NULL) 600 if (table->handler == NULL)
523 continue; 601 continue;
524 table->handler(table->sock, table->eloop_data, 602 table->handler(table->sock, table->eloop_data,
525 table->user_data); 603 table->user_data);
526 } 604 }
527} 605}
528#endif /* CONFIG_ELOOP_EPOLL */ 606#endif /* CONFIG_ELOOP_EPOLL */
529 607
530 608
 609#ifdef CONFIG_ELOOP_KQUEUE
 610static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
 611{
 612 struct eloop_sock *table;
 613 int i;
 614
 615 for (i = 0; i < nfds; i++) {
 616 table = &eloop.fd_table[events[i].ident];
 617 if (table->handler == NULL)
 618 continue;
 619 table->handler(table->sock, table->eloop_data,
 620 table->user_data);
 621 }
 622}
 623#endif /* CONFIG_ELOOP_KQUEUE */
 624
531static void eloop_sock_table_destroy(struct eloop_sock_table *table) 625static void eloop_sock_table_destroy(struct eloop_sock_table *table)
532{ 626{
533 if (table) { 627 if (table) {
534 int i; 628 int i;
535 for (i = 0; i < table->count && table->table; i++) { 629 for (i = 0; i < table->count && table->table; i++) {
536 wpa_printf(MSG_INFO, "ELOOP: remaining socket: " 630 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
537 "sock=%d eloop_data=%p user_data=%p " 631 "sock=%d eloop_data=%p user_data=%p "
538 "handler=%p", 632 "handler=%p",
539 table->table[i].sock, 633 table->table[i].sock,
540 table->table[i].eloop_data, 634 table->table[i].eloop_data,
541 table->table[i].user_data, 635 table->table[i].user_data,
542 table->table[i].handler); 636 table->table[i].handler);
543 wpa_trace_dump_funcname("eloop unregistered socket " 637 wpa_trace_dump_funcname("eloop unregistered socket "
544 "handler", 638 "handler",
545 table->table[i].handler); 639 table->table[i].handler);
546 wpa_trace_dump("eloop sock", &table->table[i]); 640 wpa_trace_dump("eloop sock", &table->table[i]);
547 } 641 }
548 os_free(table->table); 642 os_free(table->table);
549 } 643 }
550} 644}
551 645
552 646
553int eloop_register_read_sock(int sock, eloop_sock_handler handler, 647int eloop_register_read_sock(int sock, eloop_sock_handler handler,
554 void *eloop_data, void *user_data) 648 void *eloop_data, void *user_data)
555{ 649{
556 return eloop_register_sock(sock, EVENT_TYPE_READ, handler, 650 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
557 eloop_data, user_data); 651 eloop_data, user_data);
558} 652}
559 653
560 654
561void eloop_unregister_read_sock(int sock) 655void eloop_unregister_read_sock(int sock)
562{ 656{
563 eloop_unregister_sock(sock, EVENT_TYPE_READ); 657 eloop_unregister_sock(sock, EVENT_TYPE_READ);
564} 658}
565 659
566 660
567static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type) 661static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
568{ 662{
569 switch (type) { 663 switch (type) {
570 case EVENT_TYPE_READ: 664 case EVENT_TYPE_READ:
571 return &eloop.readers; 665 return &eloop.readers;
572 case EVENT_TYPE_WRITE: 666 case EVENT_TYPE_WRITE:
573 return &eloop.writers; 667 return &eloop.writers;
574 case EVENT_TYPE_EXCEPTION: 668 case EVENT_TYPE_EXCEPTION:
575 return &eloop.exceptions; 669 return &eloop.exceptions;
576 } 670 }
577 671
578 return NULL; 672 return NULL;
579} 673}
580 674
581 675
582int eloop_register_sock(int sock, eloop_event_type type, 676int eloop_register_sock(int sock, eloop_event_type type,
583 eloop_sock_handler handler, 677 eloop_sock_handler handler,
584 void *eloop_data, void *user_data) 678 void *eloop_data, void *user_data)
585{ 679{
586 struct eloop_sock_table *table; 680 struct eloop_sock_table *table;
587 681
588 assert(sock >= 0); 682 assert(sock >= 0);
589 table = eloop_get_sock_table(type); 683 table = eloop_get_sock_table(type);
590 return eloop_sock_table_add_sock(table, sock, handler, 684 return eloop_sock_table_add_sock(table, sock, handler,
591 eloop_data, user_data); 685 eloop_data, user_data);
592} 686}
593 687
594 688
595void eloop_unregister_sock(int sock, eloop_event_type type) 689void eloop_unregister_sock(int sock, eloop_event_type type)
596{ 690{
597 struct eloop_sock_table *table; 691 struct eloop_sock_table *table;
598 692
599 table = eloop_get_sock_table(type); 693 table = eloop_get_sock_table(type);
600 eloop_sock_table_remove_sock(table, sock); 694 eloop_sock_table_remove_sock(table, sock);
601} 695}
602 696
603 697
604int eloop_register_timeout(unsigned int secs, unsigned int usecs, 698int eloop_register_timeout(unsigned int secs, unsigned int usecs,
605 eloop_timeout_handler handler, 699 eloop_timeout_handler handler,
606 void *eloop_data, void *user_data) 700 void *eloop_data, void *user_data)
607{ 701{
608 struct eloop_timeout *timeout, *tmp; 702 struct eloop_timeout *timeout, *tmp;
609 os_time_t now_sec; 703 os_time_t now_sec;
610 704
611 timeout = os_zalloc(sizeof(*timeout)); 705 timeout = os_zalloc(sizeof(*timeout));
612 if (timeout == NULL) 706 if (timeout == NULL)
613 return -1; 707 return -1;
614 if (os_get_reltime(&timeout->time) < 0) { 708 if (os_get_reltime(&timeout->time) < 0) {
615 os_free(timeout); 709 os_free(timeout);
616 return -1; 710 return -1;
617 } 711 }
618 now_sec = timeout->time.sec; 712 now_sec = timeout->time.sec;
619 timeout->time.sec += secs; 713 timeout->time.sec += secs;
620 if (timeout->time.sec < now_sec) { 714 if (timeout->time.sec < now_sec) {
621 /* 715 /*
622 * Integer overflow - assume long enough timeout to be assumed 716 * Integer overflow - assume long enough timeout to be assumed
623 * to be infinite, i.e., the timeout would never happen. 717 * to be infinite, i.e., the timeout would never happen.
624 */ 718 */
625 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to " 719 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
626 "ever happen - ignore it", secs); 720 "ever happen - ignore it", secs);
627 os_free(timeout); 721 os_free(timeout);
628 return 0; 722 return 0;
629 } 723 }
630 timeout->time.usec += usecs; 724 timeout->time.usec += usecs;
631 while (timeout->time.usec >= 1000000) { 725 while (timeout->time.usec >= 1000000) {
632 timeout->time.sec++; 726 timeout->time.sec++;
633 timeout->time.usec -= 1000000; 727 timeout->time.usec -= 1000000;
634 } 728 }
635 timeout->eloop_data = eloop_data; 729 timeout->eloop_data = eloop_data;
636 timeout->user_data = user_data; 730 timeout->user_data = user_data;
637 timeout->handler = handler; 731 timeout->handler = handler;
638 wpa_trace_add_ref(timeout, eloop, eloop_data); 732 wpa_trace_add_ref(timeout, eloop, eloop_data);
639 wpa_trace_add_ref(timeout, user, user_data); 733 wpa_trace_add_ref(timeout, user, user_data);
640 wpa_trace_record(timeout); 734 wpa_trace_record(timeout);
641 735
642 /* Maintain timeouts in order of increasing time */ 736 /* Maintain timeouts in order of increasing time */
643 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 737 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
644 if (os_reltime_before(&timeout->time, &tmp->time)) { 738 if (os_reltime_before(&timeout->time, &tmp->time)) {
645 dl_list_add(tmp->list.prev, &timeout->list); 739 dl_list_add(tmp->list.prev, &timeout->list);
646 return 0; 740 return 0;
647 } 741 }
648 } 742 }
649 dl_list_add_tail(&eloop.timeout, &timeout->list); 743 dl_list_add_tail(&eloop.timeout, &timeout->list);
650 744
651 return 0; 745 return 0;
652} 746}
653 747
654 748
655static void eloop_remove_timeout(struct eloop_timeout *timeout) 749static void eloop_remove_timeout(struct eloop_timeout *timeout)
656{ 750{
657 dl_list_del(&timeout->list); 751 dl_list_del(&timeout->list);
658 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data); 752 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
659 wpa_trace_remove_ref(timeout, user, timeout->user_data); 753 wpa_trace_remove_ref(timeout, user, timeout->user_data);
660 os_free(timeout); 754 os_free(timeout);
661} 755}
662 756
663 757
664int eloop_cancel_timeout(eloop_timeout_handler handler, 758int eloop_cancel_timeout(eloop_timeout_handler handler,
665 void *eloop_data, void *user_data) 759 void *eloop_data, void *user_data)
666{ 760{
667 struct eloop_timeout *timeout, *prev; 761 struct eloop_timeout *timeout, *prev;
668 int removed = 0; 762 int removed = 0;
669 763
670 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 764 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
671 struct eloop_timeout, list) { 765 struct eloop_timeout, list) {
672 if (timeout->handler == handler && 766 if (timeout->handler == handler &&
673 (timeout->eloop_data == eloop_data || 767 (timeout->eloop_data == eloop_data ||
674 eloop_data == ELOOP_ALL_CTX) && 768 eloop_data == ELOOP_ALL_CTX) &&
675 (timeout->user_data == user_data || 769 (timeout->user_data == user_data ||
676 user_data == ELOOP_ALL_CTX)) { 770 user_data == ELOOP_ALL_CTX)) {
677 eloop_remove_timeout(timeout); 771 eloop_remove_timeout(timeout);
678 removed++; 772 removed++;
679 } 773 }
680 } 774 }
681 775
682 return removed; 776 return removed;
683} 777}
684 778
685 779
686int eloop_cancel_timeout_one(eloop_timeout_handler handler, 780int eloop_cancel_timeout_one(eloop_timeout_handler handler,
687 void *eloop_data, void *user_data, 781 void *eloop_data, void *user_data,
688 struct os_reltime *remaining) 782 struct os_reltime *remaining)
689{ 783{
690 struct eloop_timeout *timeout, *prev; 784 struct eloop_timeout *timeout, *prev;
691 int removed = 0; 785 int removed = 0;
692 struct os_reltime now; 786 struct os_reltime now;
693 787
694 os_get_reltime(&now); 788 os_get_reltime(&now);
695 remaining->sec = remaining->usec = 0; 789 remaining->sec = remaining->usec = 0;
696 790
697 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 791 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
698 struct eloop_timeout, list) { 792 struct eloop_timeout, list) {
699 if (timeout->handler == handler && 793 if (timeout->handler == handler &&
700 (timeout->eloop_data == eloop_data) && 794 (timeout->eloop_data == eloop_data) &&
701 (timeout->user_data == user_data)) { 795 (timeout->user_data == user_data)) {
702 removed = 1; 796 removed = 1;
703 if (os_reltime_before(&now, &timeout->time)) 797 if (os_reltime_before(&now, &timeout->time))
704 os_reltime_sub(&timeout->time, &now, remaining); 798 os_reltime_sub(&timeout->time, &now, remaining);
705 eloop_remove_timeout(timeout); 799 eloop_remove_timeout(timeout);
706 break; 800 break;
707 } 801 }
708 } 802 }
709 return removed; 803 return removed;
710} 804}
711 805
712 806
713int eloop_is_timeout_registered(eloop_timeout_handler handler, 807int eloop_is_timeout_registered(eloop_timeout_handler handler,
714 void *eloop_data, void *user_data) 808 void *eloop_data, void *user_data)
715{ 809{
716 struct eloop_timeout *tmp; 810 struct eloop_timeout *tmp;
717 811
718 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 812 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
719 if (tmp->handler == handler && 813 if (tmp->handler == handler &&
720 tmp->eloop_data == eloop_data && 814 tmp->eloop_data == eloop_data &&
721 tmp->user_data == user_data) 815 tmp->user_data == user_data)
722 return 1; 816 return 1;
723 } 817 }
724 818
725 return 0; 819 return 0;
726} 820}
727 821
728 822
729int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs, 823int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
730 eloop_timeout_handler handler, void *eloop_data, 824 eloop_timeout_handler handler, void *eloop_data,
731 void *user_data) 825 void *user_data)
732{ 826{
733 struct os_reltime now, requested, remaining; 827 struct os_reltime now, requested, remaining;
734 struct eloop_timeout *tmp; 828 struct eloop_timeout *tmp;
735 829
736 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 830 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
737 if (tmp->handler == handler && 831 if (tmp->handler == handler &&
738 tmp->eloop_data == eloop_data && 832 tmp->eloop_data == eloop_data &&
739 tmp->user_data == user_data) { 833 tmp->user_data == user_data) {
740 requested.sec = req_secs; 834 requested.sec = req_secs;
741 requested.usec = req_usecs; 835 requested.usec = req_usecs;
742 os_get_reltime(&now); 836 os_get_reltime(&now);
743 os_reltime_sub(&tmp->time, &now, &remaining); 837 os_reltime_sub(&tmp->time, &now, &remaining);
744 if (os_reltime_before(&requested, &remaining)) { 838 if (os_reltime_before(&requested, &remaining)) {
745 eloop_cancel_timeout(handler, eloop_data, 839 eloop_cancel_timeout(handler, eloop_data,
746 user_data); 840 user_data);
747 eloop_register_timeout(requested.sec, 841 eloop_register_timeout(requested.sec,
748 requested.usec, 842 requested.usec,
749 handler, eloop_data, 843 handler, eloop_data,
750 user_data); 844 user_data);
751 return 1; 845 return 1;
752 } 846 }
753 return 0; 847 return 0;
754 } 848 }
755 } 849 }
756 850
757 return -1; 851 return -1;
758} 852}
759 853
760 854
761int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs, 855int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
762 eloop_timeout_handler handler, void *eloop_data, 856 eloop_timeout_handler handler, void *eloop_data,
763 void *user_data) 857 void *user_data)
764{ 858{
765 struct os_reltime now, requested, remaining; 859 struct os_reltime now, requested, remaining;
766 struct eloop_timeout *tmp; 860 struct eloop_timeout *tmp;
767 861
768 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) { 862 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
769 if (tmp->handler == handler && 863 if (tmp->handler == handler &&
770 tmp->eloop_data == eloop_data && 864 tmp->eloop_data == eloop_data &&
771 tmp->user_data == user_data) { 865 tmp->user_data == user_data) {
772 requested.sec = req_secs; 866 requested.sec = req_secs;
773 requested.usec = req_usecs; 867 requested.usec = req_usecs;
774 os_get_reltime(&now); 868 os_get_reltime(&now);
775 os_reltime_sub(&tmp->time, &now, &remaining); 869 os_reltime_sub(&tmp->time, &now, &remaining);
776 if (os_reltime_before(&remaining, &requested)) { 870 if (os_reltime_before(&remaining, &requested)) {
777 eloop_cancel_timeout(handler, eloop_data, 871 eloop_cancel_timeout(handler, eloop_data,
778 user_data); 872 user_data);
779 eloop_register_timeout(requested.sec, 873 eloop_register_timeout(requested.sec,
780 requested.usec, 874 requested.usec,
781 handler, eloop_data, 875 handler, eloop_data,
782 user_data); 876 user_data);
783 return 1; 877 return 1;
784 } 878 }
785 return 0; 879 return 0;
786 } 880 }
787 } 881 }
788 882
789 return -1; 883 return -1;
790} 884}
791 885
792 886
793#ifndef CONFIG_NATIVE_WINDOWS 887#ifndef CONFIG_NATIVE_WINDOWS
794static void eloop_handle_alarm(int sig) 888static void eloop_handle_alarm(int sig)
795{ 889{
796 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in " 890 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
797 "two seconds. Looks like there\n" 891 "two seconds. Looks like there\n"
798 "is a bug that ends up in a busy loop that " 892 "is a bug that ends up in a busy loop that "
799 "prevents clean shutdown.\n" 893 "prevents clean shutdown.\n"
800 "Killing program forcefully.\n"); 894 "Killing program forcefully.\n");
801 exit(1); 895 exit(1);
802} 896}
803#endif /* CONFIG_NATIVE_WINDOWS */ 897#endif /* CONFIG_NATIVE_WINDOWS */
804 898
805 899
806static void eloop_handle_signal(int sig) 900static void eloop_handle_signal(int sig)
807{ 901{
808 int i; 902 int i;
809 903
810#ifndef CONFIG_NATIVE_WINDOWS 904#ifndef CONFIG_NATIVE_WINDOWS
811 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) { 905 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
812 /* Use SIGALRM to break out from potential busy loops that 906 /* Use SIGALRM to break out from potential busy loops that
813 * would not allow the program to be killed. */ 907 * would not allow the program to be killed. */
814 eloop.pending_terminate = 1; 908 eloop.pending_terminate = 1;
815 signal(SIGALRM, eloop_handle_alarm); 909 signal(SIGALRM, eloop_handle_alarm);
816 alarm(2); 910 alarm(2);
817 } 911 }
818#endif /* CONFIG_NATIVE_WINDOWS */ 912#endif /* CONFIG_NATIVE_WINDOWS */
819 913
820 eloop.signaled++; 914 eloop.signaled++;
821 for (i = 0; i < eloop.signal_count; i++) { 915 for (i = 0; i < eloop.signal_count; i++) {
822 if (eloop.signals[i].sig == sig) { 916 if (eloop.signals[i].sig == sig) {
823 eloop.signals[i].signaled++; 917 eloop.signals[i].signaled++;
824 break; 918 break;
825 } 919 }
826 } 920 }
827} 921}
828 922
829 923
830static void eloop_process_pending_signals(void) 924static void eloop_process_pending_signals(void)
831{ 925{
832 int i; 926 int i;
833 927
834 if (eloop.signaled == 0) 928 if (eloop.signaled == 0)
835 return; 929 return;
836 eloop.signaled = 0; 930 eloop.signaled = 0;
837 931
838 if (eloop.pending_terminate) { 932 if (eloop.pending_terminate) {
839#ifndef CONFIG_NATIVE_WINDOWS 933#ifndef CONFIG_NATIVE_WINDOWS
840 alarm(0); 934 alarm(0);
841#endif /* CONFIG_NATIVE_WINDOWS */ 935#endif /* CONFIG_NATIVE_WINDOWS */
842 eloop.pending_terminate = 0; 936 eloop.pending_terminate = 0;
843 } 937 }
844 938
845 for (i = 0; i < eloop.signal_count; i++) { 939 for (i = 0; i < eloop.signal_count; i++) {
846 if (eloop.signals[i].signaled) { 940 if (eloop.signals[i].signaled) {
847 eloop.signals[i].signaled = 0; 941 eloop.signals[i].signaled = 0;
848 eloop.signals[i].handler(eloop.signals[i].sig, 942 eloop.signals[i].handler(eloop.signals[i].sig,
849 eloop.signals[i].user_data); 943 eloop.signals[i].user_data);
850 } 944 }
851 } 945 }
852} 946}
853 947
854 948
855int eloop_register_signal(int sig, eloop_signal_handler handler, 949int eloop_register_signal(int sig, eloop_signal_handler handler,
856 void *user_data) 950 void *user_data)
857{ 951{
858 struct eloop_signal *tmp; 952 struct eloop_signal *tmp;
859 953
860 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1, 954 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
861 sizeof(struct eloop_signal)); 955 sizeof(struct eloop_signal));
862 if (tmp == NULL) 956 if (tmp == NULL)
863 return -1; 957 return -1;
864 958
865 tmp[eloop.signal_count].sig = sig; 959 tmp[eloop.signal_count].sig = sig;
866 tmp[eloop.signal_count].user_data = user_data; 960 tmp[eloop.signal_count].user_data = user_data;
867 tmp[eloop.signal_count].handler = handler; 961 tmp[eloop.signal_count].handler = handler;
868 tmp[eloop.signal_count].signaled = 0; 962 tmp[eloop.signal_count].signaled = 0;
869 eloop.signal_count++; 963 eloop.signal_count++;
870 eloop.signals = tmp; 964 eloop.signals = tmp;
871 signal(sig, eloop_handle_signal); 965 signal(sig, eloop_handle_signal);
872 966
873 return 0; 967 return 0;
874} 968}
875 969
876 970
877int eloop_register_signal_terminate(eloop_signal_handler handler, 971int eloop_register_signal_terminate(eloop_signal_handler handler,
878 void *user_data) 972 void *user_data)
879{ 973{
880 int ret = eloop_register_signal(SIGINT, handler, user_data); 974 int ret = eloop_register_signal(SIGINT, handler, user_data);
881 if (ret == 0) 975 if (ret == 0)
882 ret = eloop_register_signal(SIGTERM, handler, user_data); 976 ret = eloop_register_signal(SIGTERM, handler, user_data);
883 return ret; 977 return ret;
884} 978}
885 979
886 980
887int eloop_register_signal_reconfig(eloop_signal_handler handler, 981int eloop_register_signal_reconfig(eloop_signal_handler handler,
888 void *user_data) 982 void *user_data)
889{ 983{
890#ifdef CONFIG_NATIVE_WINDOWS 984#ifdef CONFIG_NATIVE_WINDOWS
891 return 0; 985 return 0;
892#else /* CONFIG_NATIVE_WINDOWS */ 986#else /* CONFIG_NATIVE_WINDOWS */
893 return eloop_register_signal(SIGHUP, handler, user_data); 987 return eloop_register_signal(SIGHUP, handler, user_data);
894#endif /* CONFIG_NATIVE_WINDOWS */ 988#endif /* CONFIG_NATIVE_WINDOWS */
895} 989}
896 990
897 991
898void eloop_run(void) 992void eloop_run(void)
899{ 993{
900#ifdef CONFIG_ELOOP_POLL 994#ifdef CONFIG_ELOOP_POLL
901 int num_poll_fds; 995 int num_poll_fds;
902 int timeout_ms = 0; 996 int timeout_ms = 0;
903#endif /* CONFIG_ELOOP_POLL */ 997#endif /* CONFIG_ELOOP_POLL */
904#ifdef CONFIG_ELOOP_SELECT 998#ifdef CONFIG_ELOOP_SELECT
905 fd_set *rfds, *wfds, *efds; 999 fd_set *rfds, *wfds, *efds;
906 struct timeval _tv; 1000 struct timeval _tv;
907#endif /* CONFIG_ELOOP_SELECT */ 1001#endif /* CONFIG_ELOOP_SELECT */
908#ifdef CONFIG_ELOOP_EPOLL 1002#ifdef CONFIG_ELOOP_EPOLL
909 int timeout_ms = -1; 1003 int timeout_ms = -1;
910#endif /* CONFIG_ELOOP_EPOLL */ 1004#endif /* CONFIG_ELOOP_EPOLL */
 1005#ifdef CONFIG_ELOOP_KQUEUE
 1006 struct timespec ts;
 1007#endif /* CONFIG_ELOOP_KQUEUE */
911 int res; 1008 int res;
912 struct os_reltime tv, now; 1009 struct os_reltime tv, now;
913 1010
914#ifdef CONFIG_ELOOP_SELECT 1011#ifdef CONFIG_ELOOP_SELECT
915 rfds = os_malloc(sizeof(*rfds)); 1012 rfds = os_malloc(sizeof(*rfds));
916 wfds = os_malloc(sizeof(*wfds)); 1013 wfds = os_malloc(sizeof(*wfds));
917 efds = os_malloc(sizeof(*efds)); 1014 efds = os_malloc(sizeof(*efds));
918 if (rfds == NULL || wfds == NULL || efds == NULL) 1015 if (rfds == NULL || wfds == NULL || efds == NULL)
919 goto out; 1016 goto out;
920#endif /* CONFIG_ELOOP_SELECT */ 1017#endif /* CONFIG_ELOOP_SELECT */
921 1018
922 while (!eloop.terminate && 1019 while (!eloop.terminate &&
923 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || 1020 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
924 eloop.writers.count > 0 || eloop.exceptions.count > 0)) { 1021 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
925 struct eloop_timeout *timeout; 1022 struct eloop_timeout *timeout;
926 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 1023 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
927 list); 1024 list);
928 if (timeout) { 1025 if (timeout) {
929 os_get_reltime(&now); 1026 os_get_reltime(&now);
930 if (os_reltime_before(&now, &timeout->time)) 1027 if (os_reltime_before(&now, &timeout->time))
931 os_reltime_sub(&timeout->time, &now, &tv); 1028 os_reltime_sub(&timeout->time, &now, &tv);
932 else 1029 else
933 tv.sec = tv.usec = 0; 1030 tv.sec = tv.usec = 0;
934#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) 1031#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
935 timeout_ms = tv.sec * 1000 + tv.usec / 1000; 1032 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
936#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */ 1033#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
937#ifdef CONFIG_ELOOP_SELECT 1034#ifdef CONFIG_ELOOP_SELECT
938 _tv.tv_sec = tv.sec; 1035 _tv.tv_sec = tv.sec;
939 _tv.tv_usec = tv.usec; 1036 _tv.tv_usec = tv.usec;
940#endif /* CONFIG_ELOOP_SELECT */ 1037#endif /* CONFIG_ELOOP_SELECT */
 1038#ifdef CONFIG_ELOOP_KQUEUE
 1039 ts.tv_sec = tv.sec;
 1040 ts.tv_nsec = tv.usec * 1000L;
 1041#endif /* CONFIG_ELOOP_KQUEUE */
941 } 1042 }
942 1043
943#ifdef CONFIG_ELOOP_POLL 1044#ifdef CONFIG_ELOOP_POLL
944 num_poll_fds = eloop_sock_table_set_fds( 1045 num_poll_fds = eloop_sock_table_set_fds(
945 &eloop.readers, &eloop.writers, &eloop.exceptions, 1046 &eloop.readers, &eloop.writers, &eloop.exceptions,
946 eloop.pollfds, eloop.pollfds_map, 1047 eloop.pollfds, eloop.pollfds_map,
947 eloop.max_pollfd_map); 1048 eloop.max_pollfd_map);
948 res = poll(eloop.pollfds, num_poll_fds, 1049 res = poll(eloop.pollfds, num_poll_fds,
949 timeout ? timeout_ms : -1); 1050 timeout ? timeout_ms : -1);
950#endif /* CONFIG_ELOOP_POLL */ 1051#endif /* CONFIG_ELOOP_POLL */
951#ifdef CONFIG_ELOOP_SELECT 1052#ifdef CONFIG_ELOOP_SELECT
952 eloop_sock_table_set_fds(&eloop.readers, rfds); 1053 eloop_sock_table_set_fds(&eloop.readers, rfds);
953 eloop_sock_table_set_fds(&eloop.writers, wfds); 1054 eloop_sock_table_set_fds(&eloop.writers, wfds);
954 eloop_sock_table_set_fds(&eloop.exceptions, efds); 1055 eloop_sock_table_set_fds(&eloop.exceptions, efds);
955 res = select(eloop.max_sock + 1, rfds, wfds, efds, 1056 res = select(eloop.max_sock + 1, rfds, wfds, efds,
956 timeout ? &_tv : NULL); 1057 timeout ? &_tv : NULL);
957#endif /* CONFIG_ELOOP_SELECT */ 1058#endif /* CONFIG_ELOOP_SELECT */
958#ifdef CONFIG_ELOOP_EPOLL 1059#ifdef CONFIG_ELOOP_EPOLL
959 if (eloop.count == 0) { 1060 if (eloop.count == 0) {
960 res = 0; 1061 res = 0;
961 } else { 1062 } else {
962 res = epoll_wait(eloop.epollfd, eloop.epoll_events, 1063 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
963 eloop.count, timeout_ms); 1064 eloop.count, timeout_ms);
964 } 1065 }
965#endif /* CONFIG_ELOOP_EPOLL */ 1066#endif /* CONFIG_ELOOP_EPOLL */
 1067#ifdef CONFIG_ELOOP_KQUEUE
 1068 if (eloop.count == 0) {
 1069 res = 0;
 1070 } else {
 1071 res = kevent(eloop.kqueuefd, NULL, 0,
 1072 eloop.kqueue_events, eloop.kqueue_nevents,
 1073 timeout ? &ts : NULL);
 1074 }
 1075#endif /* CONFIG_ELOOP_KQUEUE */
966 if (res < 0 && errno != EINTR && errno != 0) { 1076 if (res < 0 && errno != EINTR && errno != 0) {
967 wpa_printf(MSG_ERROR, "eloop: %s: %s", 1077 wpa_printf(MSG_ERROR, "eloop: %s: %s",
968#ifdef CONFIG_ELOOP_POLL 1078#ifdef CONFIG_ELOOP_POLL
969 "poll" 1079 "poll"
970#endif /* CONFIG_ELOOP_POLL */ 1080#endif /* CONFIG_ELOOP_POLL */
971#ifdef CONFIG_ELOOP_SELECT 1081#ifdef CONFIG_ELOOP_SELECT
972 "select" 1082 "select"
973#endif /* CONFIG_ELOOP_SELECT */ 1083#endif /* CONFIG_ELOOP_SELECT */
974#ifdef CONFIG_ELOOP_EPOLL 1084#ifdef CONFIG_ELOOP_EPOLL
975 "epoll" 1085 "epoll"
976#endif /* CONFIG_ELOOP_EPOLL */ 1086#endif /* CONFIG_ELOOP_EPOLL */
 1087#ifdef CONFIG_ELOOP_KQUEUE
 1088 "kqueue"
 1089#endif /* CONFIG_ELOOP_EKQUEUE */
 1090
977 , strerror(errno)); 1091 , strerror(errno));
978 goto out; 1092 goto out;
979 } 1093 }
980 eloop_process_pending_signals(); 1094 eloop_process_pending_signals();
981 1095
982 /* check if some registered timeouts have occurred */ 1096 /* check if some registered timeouts have occurred */
983 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 1097 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
984 list); 1098 list);
985 if (timeout) { 1099 if (timeout) {
986 os_get_reltime(&now); 1100 os_get_reltime(&now);
987 if (!os_reltime_before(&now, &timeout->time)) { 1101 if (!os_reltime_before(&now, &timeout->time)) {
988 void *eloop_data = timeout->eloop_data; 1102 void *eloop_data = timeout->eloop_data;
989 void *user_data = timeout->user_data; 1103 void *user_data = timeout->user_data;
990 eloop_timeout_handler handler = 1104 eloop_timeout_handler handler =
991 timeout->handler; 1105 timeout->handler;
992 eloop_remove_timeout(timeout); 1106 eloop_remove_timeout(timeout);
993 handler(eloop_data, user_data); 1107 handler(eloop_data, user_data);
994 } 1108 }
995 1109
996 } 1110 }
997 1111
998 if (res <= 0) 1112 if (res <= 0)
999 continue; 1113 continue;
1000 1114
1001#ifdef CONFIG_ELOOP_POLL 1115#ifdef CONFIG_ELOOP_POLL
1002 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, 1116 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1003 &eloop.exceptions, eloop.pollfds_map, 1117 &eloop.exceptions, eloop.pollfds_map,
1004 eloop.max_pollfd_map); 1118 eloop.max_pollfd_map);
1005#endif /* CONFIG_ELOOP_POLL */ 1119#endif /* CONFIG_ELOOP_POLL */
1006#ifdef CONFIG_ELOOP_SELECT 1120#ifdef CONFIG_ELOOP_SELECT
1007 eloop_sock_table_dispatch(&eloop.readers, rfds); 1121 eloop_sock_table_dispatch(&eloop.readers, rfds);
1008 eloop_sock_table_dispatch(&eloop.writers, wfds); 1122 eloop_sock_table_dispatch(&eloop.writers, wfds);
1009 eloop_sock_table_dispatch(&eloop.exceptions, efds); 1123 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1010#endif /* CONFIG_ELOOP_SELECT */ 1124#endif /* CONFIG_ELOOP_SELECT */
1011#ifdef CONFIG_ELOOP_EPOLL 1125#ifdef CONFIG_ELOOP_EPOLL
1012 eloop_sock_table_dispatch(eloop.epoll_events, res); 1126 eloop_sock_table_dispatch(eloop.epoll_events, res);
1013#endif /* CONFIG_ELOOP_EPOLL */ 1127#endif /* CONFIG_ELOOP_EPOLL */
 1128#ifdef CONFIG_ELOOP_KQUEUE
 1129 eloop_sock_table_dispatch(eloop.kqueue_events, res);
 1130#endif /* CONFIG_ELOOP_KQUEUE */
1014 } 1131 }
1015 1132
1016 eloop.terminate = 0; 1133 eloop.terminate = 0;
1017out: 1134out:
1018#ifdef CONFIG_ELOOP_SELECT 1135#ifdef CONFIG_ELOOP_SELECT
1019 os_free(rfds); 1136 os_free(rfds);
1020 os_free(wfds); 1137 os_free(wfds);
1021 os_free(efds); 1138 os_free(efds);
1022#endif /* CONFIG_ELOOP_SELECT */ 1139#endif /* CONFIG_ELOOP_SELECT */
1023 return; 1140 return;
1024} 1141}
1025 1142
1026 1143
1027void eloop_terminate(void) 1144void eloop_terminate(void)
1028{ 1145{
1029 eloop.terminate = 1; 1146 eloop.terminate = 1;
1030} 1147}
1031 1148
1032 1149
1033void eloop_destroy(void) 1150void eloop_destroy(void)
1034{ 1151{
1035 struct eloop_timeout *timeout, *prev; 1152 struct eloop_timeout *timeout, *prev;
1036 struct os_reltime now; 1153 struct os_reltime now;
1037 1154
1038 os_get_reltime(&now); 1155 os_get_reltime(&now);
1039 dl_list_for_each_safe(timeout, prev, &eloop.timeout, 1156 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1040 struct eloop_timeout, list) { 1157 struct eloop_timeout, list) {
1041 int sec, usec; 1158 int sec, usec;
1042 sec = timeout->time.sec - now.sec; 1159 sec = timeout->time.sec - now.sec;
1043 usec = timeout->time.usec - now.usec; 1160 usec = timeout->time.usec - now.usec;
1044 if (timeout->time.usec < now.usec) { 1161 if (timeout->time.usec < now.usec) {
1045 sec--; 1162 sec--;
1046 usec += 1000000; 1163 usec += 1000000;
1047 } 1164 }
1048 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d " 1165 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1049 "eloop_data=%p user_data=%p handler=%p", 1166 "eloop_data=%p user_data=%p handler=%p",
1050 sec, usec, timeout->eloop_data, timeout->user_data, 1167 sec, usec, timeout->eloop_data, timeout->user_data,
1051 timeout->handler); 1168 timeout->handler);
1052 wpa_trace_dump_funcname("eloop unregistered timeout handler", 1169 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1053 timeout->handler); 1170 timeout->handler);
1054 wpa_trace_dump("eloop timeout", timeout); 1171 wpa_trace_dump("eloop timeout", timeout);
1055 eloop_remove_timeout(timeout); 1172 eloop_remove_timeout(timeout);
1056 } 1173 }
1057 eloop_sock_table_destroy(&eloop.readers); 1174 eloop_sock_table_destroy(&eloop.readers);
1058 eloop_sock_table_destroy(&eloop.writers); 1175 eloop_sock_table_destroy(&eloop.writers);
1059 eloop_sock_table_destroy(&eloop.exceptions); 1176 eloop_sock_table_destroy(&eloop.exceptions);
1060 os_free(eloop.signals); 1177 os_free(eloop.signals);
1061 1178
1062#ifdef CONFIG_ELOOP_POLL 1179#ifdef CONFIG_ELOOP_POLL
1063 os_free(eloop.pollfds); 1180 os_free(eloop.pollfds);
1064 os_free(eloop.pollfds_map); 1181 os_free(eloop.pollfds_map);
1065#endif /* CONFIG_ELOOP_POLL */ 1182#endif /* CONFIG_ELOOP_POLL */
 1183#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
 1184 os_free(eloop.fd_table);
 1185#endif
1066#ifdef CONFIG_ELOOP_EPOLL 1186#ifdef CONFIG_ELOOP_EPOLL
1067 os_free(eloop.epoll_table); 
1068 os_free(eloop.epoll_events); 1187 os_free(eloop.epoll_events);
1069 close(eloop.epollfd); 1188 close(eloop.epollfd);
1070#endif /* CONFIG_ELOOP_EPOLL */ 1189#endif /* CONFIG_ELOOP_EPOLL */
 1190#ifdef CONFIG_ELOOP_KQUEUE
 1191 os_free(eloop.kqueue_events);
 1192 close(eloop.kqueuefd);
 1193#endif /* CONFIG_ELOOP_KQUEUE */
1071} 1194}
1072 1195
1073 1196
1074int eloop_terminated(void) 1197int eloop_terminated(void)
1075{ 1198{
1076 return eloop.terminate; 1199 return eloop.terminate;
1077} 1200}
1078 1201
1079 1202
1080void eloop_wait_for_read_sock(int sock) 1203void eloop_wait_for_read_sock(int sock)
1081{ 1204{
1082#ifdef CONFIG_ELOOP_POLL 1205#ifdef CONFIG_ELOOP_POLL
1083 struct pollfd pfd; 1206 struct pollfd pfd;
1084 1207
1085 if (sock < 0) 1208 if (sock < 0)
1086 return; 1209 return;
1087 1210
1088 os_memset(&pfd, 0, sizeof(pfd)); 1211 os_memset(&pfd, 0, sizeof(pfd));
1089 pfd.fd = sock; 1212 pfd.fd = sock;
1090 pfd.events = POLLIN; 1213 pfd.events = POLLIN;
1091 1214
1092 poll(&pfd, 1, -1); 1215 poll(&pfd, 1, -1);
1093#endif /* CONFIG_ELOOP_POLL */ 1216#endif /* CONFIG_ELOOP_POLL */
1094#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) 1217#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1095 /* 1218 /*
1096 * We can use epoll() here. But epoll() requres 4 system calls. 1219 * We can use epoll() here. But epoll() requres 4 system calls.
1097 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for 1220 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1098 * epoll fd. So select() is better for performance here. 1221 * epoll fd. So select() is better for performance here.
1099 */ 1222 */
1100 fd_set rfds; 1223 fd_set rfds;
1101 1224
1102 if (sock < 0) 1225 if (sock < 0)
1103 return; 1226 return;
1104 1227
1105 FD_ZERO(&rfds); 1228 FD_ZERO(&rfds);
1106 FD_SET(sock, &rfds); 1229 FD_SET(sock, &rfds);
1107 select(sock + 1, &rfds, NULL, NULL, NULL); 1230 select(sock + 1, &rfds, NULL, NULL, NULL);
1108#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */ 1231#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
 1232#ifdef CONFIG_ELOOP_KQUEUE
 1233 int kfd;
 1234 struct kevent ke1, ke2;
 1235
 1236 kfd = kqueue();
 1237 if (kfd == -1)
 1238 return;
 1239 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, NULL);
 1240 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
 1241 close(kfd);
 1242#endif /* CONFIG_ELOOP_KQUEUE */
1109} 1243}
1110 1244
1111#ifdef CONFIG_ELOOP_SELECT 1245#ifdef CONFIG_ELOOP_SELECT
1112#undef CONFIG_ELOOP_SELECT 1246#undef CONFIG_ELOOP_SELECT
1113#endif /* CONFIG_ELOOP_SELECT */ 1247#endif /* CONFIG_ELOOP_SELECT */