Wed Jan 20 15:07:52 2016 UTC ()
Add kqueue(2) support.


(roy)
diff -r1.6 -r1.7 src/external/bsd/wpa/dist/src/utils/eloop.c

cvs diff -r1.6 -r1.7 src/external/bsd/wpa/dist/src/utils/eloop.c (expand / switch to unified diff)

--- src/external/bsd/wpa/dist/src/utils/eloop.c 2015/04/01 19:45:15 1.6
+++ src/external/bsd/wpa/dist/src/utils/eloop.c 2016/01/20 15:07:52 1.7
@@ -8,38 +8,47 @@ @@ -8,38 +8,47 @@
8 8
9#include "includes.h" 9#include "includes.h"
10#include <assert.h> 10#include <assert.h>
11 11
12#include "common.h" 12#include "common.h"
13#include "trace.h" 13#include "trace.h"
14#include "list.h" 14#include "list.h"
15#include "eloop.h" 15#include "eloop.h"
16 16
17#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL) 17#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll 18#error Do not define both of poll and epoll
19#endif 19#endif
20 20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) 21#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
 22#error Do not define both of poll and kqueue
 23#endif
 24
 25#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
 26 !defined(CONFIG_ELOOP_KQUEUE)
22#define CONFIG_ELOOP_SELECT 27#define CONFIG_ELOOP_SELECT
23#endif 28#endif
24 29
25#ifdef CONFIG_ELOOP_POLL 30#ifdef CONFIG_ELOOP_POLL
26#include <poll.h> 31#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */ 32#endif /* CONFIG_ELOOP_POLL */
28 33
29#ifdef CONFIG_ELOOP_EPOLL 34#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h> 35#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */ 36#endif /* CONFIG_ELOOP_EPOLL */
32 37
 38#ifdef CONFIG_ELOOP_KQUEUE
 39#include <sys/event.h>
 40#endif /* CONFIG_ELOOP_KQUEUE */
 41
33struct eloop_sock { 42struct eloop_sock {
34 int sock; 43 int sock;
35 void *eloop_data; 44 void *eloop_data;
36 void *user_data; 45 void *user_data;
37 eloop_sock_handler handler; 46 eloop_sock_handler handler;
38 WPA_TRACE_REF(eloop) 47 WPA_TRACE_REF(eloop)
39 WPA_TRACE_REF(user) 48 WPA_TRACE_REF(user)
40 WPA_TRACE_INFO 49 WPA_TRACE_INFO
41}; 50};
42 51
43struct eloop_timeout { 52struct eloop_timeout {
44 struct dl_list list; 53 struct dl_list list;
45 struct os_reltime time; 54 struct os_reltime time;
@@ -51,50 +60,57 @@ struct eloop_timeout { @@ -51,50 +60,57 @@ struct eloop_timeout {
51 WPA_TRACE_INFO 60 WPA_TRACE_INFO
52}; 61};
53 62
54struct eloop_signal { 63struct eloop_signal {
55 int sig; 64 int sig;
56 void *user_data; 65 void *user_data;
57 eloop_signal_handler handler; 66 eloop_signal_handler handler;
58 int signaled; 67 int signaled;
59}; 68};
60 69
61struct eloop_sock_table { 70struct eloop_sock_table {
62 int count; 71 int count;
63 struct eloop_sock *table; 72 struct eloop_sock *table;
64#ifdef CONFIG_ELOOP_EPOLL 73#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
65 eloop_event_type type; 74 eloop_event_type type;
66#else /* CONFIG_ELOOP_EPOLL */ 75#else /* CONFIG_ELOOP_EPOLL */
67 int changed; 76 int changed;
68#endif /* CONFIG_ELOOP_EPOLL */ 77#endif /* CONFIG_ELOOP_EPOLL */
69}; 78};
70 79
71struct eloop_data { 80struct eloop_data {
72 int max_sock; 81 int max_sock;
73 82
74 int count; /* sum of all table counts */ 83 int count; /* sum of all table counts */
75#ifdef CONFIG_ELOOP_POLL 84#ifdef CONFIG_ELOOP_POLL
76 int max_pollfd_map; /* number of pollfds_map currently allocated */ 85 int max_pollfd_map; /* number of pollfds_map currently allocated */
77 int max_poll_fds; /* number of pollfds currently allocated */ 86 int max_poll_fds; /* number of pollfds currently allocated */
78 struct pollfd *pollfds; 87 struct pollfd *pollfds;
79 struct pollfd **pollfds_map; 88 struct pollfd **pollfds_map;
80#endif /* CONFIG_ELOOP_POLL */ 89#endif /* CONFIG_ELOOP_POLL */
 90#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
 91 int max_fd;
 92 struct eloop_sock *fd_table;
 93#endif
81#ifdef CONFIG_ELOOP_EPOLL 94#ifdef CONFIG_ELOOP_EPOLL
82 int epollfd; 95 int epollfd;
83 int epoll_max_event_num; 96 int epoll_max_event_num;
84 int epoll_max_fd; 
85 struct eloop_sock *epoll_table; 
86 struct epoll_event *epoll_events; 97 struct epoll_event *epoll_events;
87#endif /* CONFIG_ELOOP_EPOLL */ 98#endif /* CONFIG_ELOOP_EPOLL */
 99#ifdef CONFIG_ELOOP_KQUEUE
 100 int kqueuefd;
 101 int kqueue_nevents;
 102 struct kevent *kqueue_events;
 103#endif /* CONFIG_ELOOP_KQUEUE */
88 struct eloop_sock_table readers; 104 struct eloop_sock_table readers;
89 struct eloop_sock_table writers; 105 struct eloop_sock_table writers;
90 struct eloop_sock_table exceptions; 106 struct eloop_sock_table exceptions;
91 107
92 struct dl_list timeout; 108 struct dl_list timeout;
93 109
94 int signal_count; 110 int signal_count;
95 struct eloop_signal *signals; 111 struct eloop_signal *signals;
96 int signaled; 112 int signaled;
97 int pending_terminate; 113 int pending_terminate;
98 114
99 int terminate; 115 int terminate;
100}; 116};
@@ -150,42 +166,55 @@ int eloop_init(void) @@ -150,42 +166,55 @@ int eloop_init(void)
150 os_memset(&eloop, 0, sizeof(eloop)); 166 os_memset(&eloop, 0, sizeof(eloop));
151 dl_list_init(&eloop.timeout); 167 dl_list_init(&eloop.timeout);
152#ifdef CONFIG_ELOOP_EPOLL 168#ifdef CONFIG_ELOOP_EPOLL
153 eloop.epollfd = epoll_create1(0); 169 eloop.epollfd = epoll_create1(0);
154 if (eloop.epollfd < 0) { 170 if (eloop.epollfd < 0) {
155 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n", 171 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
156 __func__, strerror(errno)); 172 __func__, strerror(errno));
157 return -1; 173 return -1;
158 } 174 }
159 eloop.readers.type = EVENT_TYPE_READ; 175 eloop.readers.type = EVENT_TYPE_READ;
160 eloop.writers.type = EVENT_TYPE_WRITE; 176 eloop.writers.type = EVENT_TYPE_WRITE;
161 eloop.exceptions.type = EVENT_TYPE_EXCEPTION; 177 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
162#endif /* CONFIG_ELOOP_EPOLL */ 178#endif /* CONFIG_ELOOP_EPOLL */
 179#ifdef CONFIG_ELOOP_KQUEUE
 180 eloop.kqueuefd = kqueue();
 181 if (eloop.kqueuefd < 0) {
 182 wpa_printf(MSG_ERROR, "%s: kqueue failed. %s\n",
 183 __func__, strerror(errno));
 184 return -1;
 185 }
 186#endif /* CONFIG_ELOOP_KQUEUE */
163#ifdef WPA_TRACE 187#ifdef WPA_TRACE
164 signal(SIGSEGV, eloop_sigsegv_handler); 188 signal(SIGSEGV, eloop_sigsegv_handler);
165#endif /* WPA_TRACE */ 189#endif /* WPA_TRACE */
166 return 0; 190 return 0;
167} 191}
168 192
169 193
170static int eloop_sock_table_add_sock(struct eloop_sock_table *table, 194static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
171 int sock, eloop_sock_handler handler, 195 int sock, eloop_sock_handler handler,
172 void *eloop_data, void *user_data) 196 void *eloop_data, void *user_data)
173{ 197{
174#ifdef CONFIG_ELOOP_EPOLL 198#ifdef CONFIG_ELOOP_EPOLL
175 struct eloop_sock *temp_table; 199 struct eloop_sock *temp_table;
176 struct epoll_event ev, *temp_events; 200 struct epoll_event ev, *temp_events;
177 int next; 201 int next;
178#endif /* CONFIG_ELOOP_EPOLL */ 202#endif /* CONFIG_ELOOP_EPOLL */
 203#ifdef CONFIG_ELOOP_KQUEUE
 204 struct eloop_sock *temp_table;
 205 int next, filter;
 206 struct kevent ke;
 207#endif
179 struct eloop_sock *tmp; 208 struct eloop_sock *tmp;
180 int new_max_sock; 209 int new_max_sock;
181 210
182 if (sock > eloop.max_sock) 211 if (sock > eloop.max_sock)
183 new_max_sock = sock; 212 new_max_sock = sock;
184 else 213 else
185 new_max_sock = eloop.max_sock; 214 new_max_sock = eloop.max_sock;
186 215
187 if (table == NULL) 216 if (table == NULL)
188 return -1; 217 return -1;
189 218
190#ifdef CONFIG_ELOOP_POLL 219#ifdef CONFIG_ELOOP_POLL
191 if (new_max_sock >= eloop.max_pollfd_map) { 220 if (new_max_sock >= eloop.max_pollfd_map) {
@@ -201,72 +230,89 @@ static int eloop_sock_table_add_sock(str @@ -201,72 +230,89 @@ static int eloop_sock_table_add_sock(str
201 230
202 if (eloop.count + 1 > eloop.max_poll_fds) { 231 if (eloop.count + 1 > eloop.max_poll_fds) {
203 struct pollfd *n; 232 struct pollfd *n;
204 int nmax = eloop.count + 1 + 50; 233 int nmax = eloop.count + 1 + 50;
205 n = os_realloc_array(eloop.pollfds, nmax, 234 n = os_realloc_array(eloop.pollfds, nmax,
206 sizeof(struct pollfd)); 235 sizeof(struct pollfd));
207 if (n == NULL) 236 if (n == NULL)
208 return -1; 237 return -1;
209 238
210 eloop.max_poll_fds = nmax; 239 eloop.max_poll_fds = nmax;
211 eloop.pollfds = n; 240 eloop.pollfds = n;
212 } 241 }
213#endif /* CONFIG_ELOOP_POLL */ 242#endif /* CONFIG_ELOOP_POLL */
214#ifdef CONFIG_ELOOP_EPOLL 243#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
215 if (new_max_sock >= eloop.epoll_max_fd) { 244 if (new_max_sock >= eloop.max_fd) {
216 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2; 245 next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
217 temp_table = os_realloc_array(eloop.epoll_table, next, 246 temp_table = os_realloc_array(eloop.fd_table, next,
218 sizeof(struct eloop_sock)); 247 sizeof(struct eloop_sock));
219 if (temp_table == NULL) 248 if (temp_table == NULL)
220 return -1; 249 return -1;
221 250
222 eloop.epoll_max_fd = next; 251 eloop.max_fd = next;
223 eloop.epoll_table = temp_table; 252 eloop.fd_table = temp_table;
224 } 253 }
 254#endif
225 255
 256#ifdef CONFIG_ELOOP_EPOLL
226 if (eloop.count + 1 > eloop.epoll_max_event_num) { 257 if (eloop.count + 1 > eloop.epoll_max_event_num) {
227 next = eloop.epoll_max_event_num == 0 ? 8 : 258 next = eloop.epoll_max_event_num == 0 ? 8 :
228 eloop.epoll_max_event_num * 2; 259 eloop.epoll_max_event_num * 2;
229 temp_events = os_realloc_array(eloop.epoll_events, next, 260 temp_events = os_realloc_array(eloop.epoll_events, next,
230 sizeof(struct epoll_event)); 261 sizeof(struct epoll_event));
231 if (temp_events == NULL) { 262 if (temp_events == NULL) {
232 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. " 263 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
233 "%s\n", __func__, strerror(errno)); 264 "%s\n", __func__, strerror(errno));
234 return -1; 265 return -1;
235 } 266 }
236 267
237 eloop.epoll_max_event_num = next; 268 eloop.epoll_max_event_num = next;
238 eloop.epoll_events = temp_events; 269 eloop.epoll_events = temp_events;
239 } 270 }
240#endif /* CONFIG_ELOOP_EPOLL */ 271#endif /* CONFIG_ELOOP_EPOLL */
 272#ifdef CONFIG_ELOOP_KQUEUE
 273 if (eloop.count + 1 > eloop.kqueue_nevents) {
 274 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
 275 os_free(eloop.kqueue_events);
 276 eloop.kqueue_events = os_malloc(next *
 277 sizeof(eloop.kqueue_events));
 278 if (eloop.kqueue_events == NULL) {
 279 wpa_printf(MSG_ERROR, "%s: malloc for kqueue failed. "
 280 "%s\n", __func__, strerror(errno));
 281 return -1;
 282 }
 283
 284 eloop.kqueue_nevents = next;
 285 }
 286#endif /* CONFIG_ELOOP_KQUEUE */
241 287
242 eloop_trace_sock_remove_ref(table); 288 eloop_trace_sock_remove_ref(table);
243 tmp = os_realloc_array(table->table, table->count + 1, 289 tmp = os_realloc_array(table->table, table->count + 1,
244 sizeof(struct eloop_sock)); 290 sizeof(struct eloop_sock));
245 if (tmp == NULL) { 291 if (tmp == NULL) {
246 eloop_trace_sock_add_ref(table); 292 eloop_trace_sock_add_ref(table);
247 return -1; 293 return -1;
248 } 294 }
249 295
250 tmp[table->count].sock = sock; 296 tmp[table->count].sock = sock;
251 tmp[table->count].eloop_data = eloop_data; 297 tmp[table->count].eloop_data = eloop_data;
252 tmp[table->count].user_data = user_data; 298 tmp[table->count].user_data = user_data;
253 tmp[table->count].handler = handler; 299 tmp[table->count].handler = handler;
254 wpa_trace_record(&tmp[table->count]); 300 wpa_trace_record(&tmp[table->count]);
255 table->count++; 301 table->count++;
256 table->table = tmp; 302 table->table = tmp;
257 eloop.max_sock = new_max_sock; 303 eloop.max_sock = new_max_sock;
258 eloop.count++; 304 eloop.count++;
259#ifndef CONFIG_ELOOP_EPOLL 305#if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE)
260 table->changed = 1; 306 table->changed = 1;
261#endif /* CONFIG_ELOOP_EPOLL */ 307#endif /* CONFIG_ELOOP_EPOLL */
262 eloop_trace_sock_add_ref(table); 308 eloop_trace_sock_add_ref(table);
263 309
264#ifdef CONFIG_ELOOP_EPOLL 310#ifdef CONFIG_ELOOP_EPOLL
265 os_memset(&ev, 0, sizeof(ev)); 311 os_memset(&ev, 0, sizeof(ev));
266 switch (table->type) { 312 switch (table->type) {
267 case EVENT_TYPE_READ: 313 case EVENT_TYPE_READ:
268 ev.events = EPOLLIN; 314 ev.events = EPOLLIN;
269 break; 315 break;
270 case EVENT_TYPE_WRITE: 316 case EVENT_TYPE_WRITE:
271 ev.events = EPOLLOUT; 317 ev.events = EPOLLOUT;
272 break; 318 break;
@@ -275,67 +321,99 @@ static int eloop_sock_table_add_sock(str @@ -275,67 +321,99 @@ static int eloop_sock_table_add_sock(str
275 * possible that someone registered a socket *only* for exception 321 * possible that someone registered a socket *only* for exception
276 * handling. 322 * handling.
277 */ 323 */
278 case EVENT_TYPE_EXCEPTION: 324 case EVENT_TYPE_EXCEPTION:
279 ev.events = EPOLLERR | EPOLLHUP; 325 ev.events = EPOLLERR | EPOLLHUP;
280 break; 326 break;
281 } 327 }
282 ev.data.fd = sock; 328 ev.data.fd = sock;
283 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) { 329 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
284 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d " 330 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
285 "failed. %s\n", __func__, sock, strerror(errno)); 331 "failed. %s\n", __func__, sock, strerror(errno));
286 return -1; 332 return -1;
287 } 333 }
288 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1], 334 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
289 sizeof(struct eloop_sock)); 335 sizeof(struct eloop_sock));
290#endif /* CONFIG_ELOOP_EPOLL */ 336#endif /* CONFIG_ELOOP_EPOLL */
 337#ifdef CONFIG_ELOOP_KQUEUE
 338 switch (table->type) {
 339 case EVENT_TYPE_READ:
 340 filter = EVFILT_READ;
 341 break;
 342 case EVENT_TYPE_WRITE:
 343 filter = EVFILT_WRITE;
 344 break;
 345 default:
 346 filter = 0;
 347 }
 348 EV_SET(&ke, sock, filter, EV_ADD, 0, 0, NULL);
 349 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
 350 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d "
 351 "failed. %s\n", __func__, sock, strerror(errno));
 352 return -1;
 353 }
 354 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
 355 sizeof(struct eloop_sock));
 356#endif /* CONFIG_ELOOP_KQUEUE */
291 return 0; 357 return 0;
292} 358}
293 359
294 360
295static void eloop_sock_table_remove_sock(struct eloop_sock_table *table, 361static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
296 int sock) 362 int sock)
297{ 363{
 364#ifdef CONFIG_ELOOP_KQUEUE
 365 struct kevent ke;
 366#endif
298 int i; 367 int i;
299 368
300 if (table == NULL || table->table == NULL || table->count == 0) 369 if (table == NULL || table->table == NULL || table->count == 0)
301 return; 370 return;
302 371
303 for (i = 0; i < table->count; i++) { 372 for (i = 0; i < table->count; i++) {
304 if (table->table[i].sock == sock) 373 if (table->table[i].sock == sock)
305 break; 374 break;
306 } 375 }
307 if (i == table->count) 376 if (i == table->count)
308 return; 377 return;
309 eloop_trace_sock_remove_ref(table); 378 eloop_trace_sock_remove_ref(table);
310 if (i != table->count - 1) { 379 if (i != table->count - 1) {
311 os_memmove(&table->table[i], &table->table[i + 1], 380 os_memmove(&table->table[i], &table->table[i + 1],
312 (table->count - i - 1) * 381 (table->count - i - 1) *
313 sizeof(struct eloop_sock)); 382 sizeof(struct eloop_sock));
314 } 383 }
315 table->count--; 384 table->count--;
316 eloop.count--; 385 eloop.count--;
317#ifndef CONFIG_ELOOP_EPOLL 386#if !defined(CONFIG_ELOOP_EPOLL) && !defined(CONFIG_ELOOP_KQUEUE)
318 table->changed = 1; 387 table->changed = 1;
319#endif /* CONFIG_ELOOP_EPOLL */ 388#endif /* CONFIG_ELOOP_EPOLL */
320 eloop_trace_sock_add_ref(table); 389 eloop_trace_sock_add_ref(table);
321#ifdef CONFIG_ELOOP_EPOLL 390#ifdef CONFIG_ELOOP_EPOLL
322 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) { 391 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
323 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d " 392 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
324 "failed. %s\n", __func__, sock, strerror(errno)); 393 "failed. %s\n", __func__, sock, strerror(errno));
325 return; 394 return;
326 } 395 }
327 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock)); 396 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
328#endif /* CONFIG_ELOOP_EPOLL */ 397#endif /* CONFIG_ELOOP_EPOLL */
 398#ifdef CONFIG_ELOOP_KQUEUE
 399 EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, NULL);
 400 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
 401 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d "
 402 "failed. %s\n", __func__, sock, strerror(errno));
 403 return;
 404 }
 405 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
 406#endif /* CONFIG_ELOOP_KQUEUE */
329} 407}
330 408
331 409
332#ifdef CONFIG_ELOOP_POLL 410#ifdef CONFIG_ELOOP_POLL
333 411
334static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx) 412static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
335{ 413{
336 if (fd < mx && fd >= 0) 414 if (fd < mx && fd >= 0)
337 return pollfds_map[fd]; 415 return pollfds_map[fd];
338 return NULL; 416 return NULL;
339} 417}
340 418
341 419
@@ -508,36 +586,52 @@ static void eloop_sock_table_dispatch(st @@ -508,36 +586,52 @@ static void eloop_sock_table_dispatch(st
508 } 586 }
509} 587}
510 588
511#endif /* CONFIG_ELOOP_SELECT */ 589#endif /* CONFIG_ELOOP_SELECT */
512 590
513 591
514#ifdef CONFIG_ELOOP_EPOLL 592#ifdef CONFIG_ELOOP_EPOLL
515static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds) 593static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
516{ 594{
517 struct eloop_sock *table; 595 struct eloop_sock *table;
518 int i; 596 int i;
519 597
520 for (i = 0; i < nfds; i++) { 598 for (i = 0; i < nfds; i++) {
521 table = &eloop.epoll_table[events[i].data.fd]; 599 table = &eloop.fd_table[events[i].data.fd];
522 if (table->handler == NULL) 600 if (table->handler == NULL)
523 continue; 601 continue;
524 table->handler(table->sock, table->eloop_data, 602 table->handler(table->sock, table->eloop_data,
525 table->user_data); 603 table->user_data);
526 } 604 }
527} 605}
528#endif /* CONFIG_ELOOP_EPOLL */ 606#endif /* CONFIG_ELOOP_EPOLL */
529 607
530 608
 609#ifdef CONFIG_ELOOP_KQUEUE
 610static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
 611{
 612 struct eloop_sock *table;
 613 int i;
 614
 615 for (i = 0; i < nfds; i++) {
 616 table = &eloop.fd_table[events[i].ident];
 617 if (table->handler == NULL)
 618 continue;
 619 table->handler(table->sock, table->eloop_data,
 620 table->user_data);
 621 }
 622}
 623#endif /* CONFIG_ELOOP_KQUEUE */
 624
531static void eloop_sock_table_destroy(struct eloop_sock_table *table) 625static void eloop_sock_table_destroy(struct eloop_sock_table *table)
532{ 626{
533 if (table) { 627 if (table) {
534 int i; 628 int i;
535 for (i = 0; i < table->count && table->table; i++) { 629 for (i = 0; i < table->count && table->table; i++) {
536 wpa_printf(MSG_INFO, "ELOOP: remaining socket: " 630 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
537 "sock=%d eloop_data=%p user_data=%p " 631 "sock=%d eloop_data=%p user_data=%p "
538 "handler=%p", 632 "handler=%p",
539 table->table[i].sock, 633 table->table[i].sock,
540 table->table[i].eloop_data, 634 table->table[i].eloop_data,
541 table->table[i].user_data, 635 table->table[i].user_data,
542 table->table[i].handler); 636 table->table[i].handler);
543 wpa_trace_dump_funcname("eloop unregistered socket " 637 wpa_trace_dump_funcname("eloop unregistered socket "
@@ -898,26 +992,29 @@ int eloop_register_signal_reconfig(eloop @@ -898,26 +992,29 @@ int eloop_register_signal_reconfig(eloop
898void eloop_run(void) 992void eloop_run(void)
899{ 993{
900#ifdef CONFIG_ELOOP_POLL 994#ifdef CONFIG_ELOOP_POLL
901 int num_poll_fds; 995 int num_poll_fds;
902 int timeout_ms = 0; 996 int timeout_ms = 0;
903#endif /* CONFIG_ELOOP_POLL */ 997#endif /* CONFIG_ELOOP_POLL */
904#ifdef CONFIG_ELOOP_SELECT 998#ifdef CONFIG_ELOOP_SELECT
905 fd_set *rfds, *wfds, *efds; 999 fd_set *rfds, *wfds, *efds;
906 struct timeval _tv; 1000 struct timeval _tv;
907#endif /* CONFIG_ELOOP_SELECT */ 1001#endif /* CONFIG_ELOOP_SELECT */
908#ifdef CONFIG_ELOOP_EPOLL 1002#ifdef CONFIG_ELOOP_EPOLL
909 int timeout_ms = -1; 1003 int timeout_ms = -1;
910#endif /* CONFIG_ELOOP_EPOLL */ 1004#endif /* CONFIG_ELOOP_EPOLL */
 1005#ifdef CONFIG_ELOOP_KQUEUE
 1006 struct timespec ts;
 1007#endif /* CONFIG_ELOOP_KQUEUE */
911 int res; 1008 int res;
912 struct os_reltime tv, now; 1009 struct os_reltime tv, now;
913 1010
914#ifdef CONFIG_ELOOP_SELECT 1011#ifdef CONFIG_ELOOP_SELECT
915 rfds = os_malloc(sizeof(*rfds)); 1012 rfds = os_malloc(sizeof(*rfds));
916 wfds = os_malloc(sizeof(*wfds)); 1013 wfds = os_malloc(sizeof(*wfds));
917 efds = os_malloc(sizeof(*efds)); 1014 efds = os_malloc(sizeof(*efds));
918 if (rfds == NULL || wfds == NULL || efds == NULL) 1015 if (rfds == NULL || wfds == NULL || efds == NULL)
919 goto out; 1016 goto out;
920#endif /* CONFIG_ELOOP_SELECT */ 1017#endif /* CONFIG_ELOOP_SELECT */
921 1018
922 while (!eloop.terminate && 1019 while (!eloop.terminate &&
923 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 || 1020 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
@@ -928,62 +1025,79 @@ void eloop_run(void) @@ -928,62 +1025,79 @@ void eloop_run(void)
928 if (timeout) { 1025 if (timeout) {
929 os_get_reltime(&now); 1026 os_get_reltime(&now);
930 if (os_reltime_before(&now, &timeout->time)) 1027 if (os_reltime_before(&now, &timeout->time))
931 os_reltime_sub(&timeout->time, &now, &tv); 1028 os_reltime_sub(&timeout->time, &now, &tv);
932 else 1029 else
933 tv.sec = tv.usec = 0; 1030 tv.sec = tv.usec = 0;
934#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) 1031#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
935 timeout_ms = tv.sec * 1000 + tv.usec / 1000; 1032 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
936#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */ 1033#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
937#ifdef CONFIG_ELOOP_SELECT 1034#ifdef CONFIG_ELOOP_SELECT
938 _tv.tv_sec = tv.sec; 1035 _tv.tv_sec = tv.sec;
939 _tv.tv_usec = tv.usec; 1036 _tv.tv_usec = tv.usec;
940#endif /* CONFIG_ELOOP_SELECT */ 1037#endif /* CONFIG_ELOOP_SELECT */
 1038#ifdef CONFIG_ELOOP_KQUEUE
 1039 ts.tv_sec = tv.sec;
 1040 ts.tv_nsec = tv.usec * 1000L;
 1041#endif /* CONFIG_ELOOP_KQUEUE */
941 } 1042 }
942 1043
943#ifdef CONFIG_ELOOP_POLL 1044#ifdef CONFIG_ELOOP_POLL
944 num_poll_fds = eloop_sock_table_set_fds( 1045 num_poll_fds = eloop_sock_table_set_fds(
945 &eloop.readers, &eloop.writers, &eloop.exceptions, 1046 &eloop.readers, &eloop.writers, &eloop.exceptions,
946 eloop.pollfds, eloop.pollfds_map, 1047 eloop.pollfds, eloop.pollfds_map,
947 eloop.max_pollfd_map); 1048 eloop.max_pollfd_map);
948 res = poll(eloop.pollfds, num_poll_fds, 1049 res = poll(eloop.pollfds, num_poll_fds,
949 timeout ? timeout_ms : -1); 1050 timeout ? timeout_ms : -1);
950#endif /* CONFIG_ELOOP_POLL */ 1051#endif /* CONFIG_ELOOP_POLL */
951#ifdef CONFIG_ELOOP_SELECT 1052#ifdef CONFIG_ELOOP_SELECT
952 eloop_sock_table_set_fds(&eloop.readers, rfds); 1053 eloop_sock_table_set_fds(&eloop.readers, rfds);
953 eloop_sock_table_set_fds(&eloop.writers, wfds); 1054 eloop_sock_table_set_fds(&eloop.writers, wfds);
954 eloop_sock_table_set_fds(&eloop.exceptions, efds); 1055 eloop_sock_table_set_fds(&eloop.exceptions, efds);
955 res = select(eloop.max_sock + 1, rfds, wfds, efds, 1056 res = select(eloop.max_sock + 1, rfds, wfds, efds,
956 timeout ? &_tv : NULL); 1057 timeout ? &_tv : NULL);
957#endif /* CONFIG_ELOOP_SELECT */ 1058#endif /* CONFIG_ELOOP_SELECT */
958#ifdef CONFIG_ELOOP_EPOLL 1059#ifdef CONFIG_ELOOP_EPOLL
959 if (eloop.count == 0) { 1060 if (eloop.count == 0) {
960 res = 0; 1061 res = 0;
961 } else { 1062 } else {
962 res = epoll_wait(eloop.epollfd, eloop.epoll_events, 1063 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
963 eloop.count, timeout_ms); 1064 eloop.count, timeout_ms);
964 } 1065 }
965#endif /* CONFIG_ELOOP_EPOLL */ 1066#endif /* CONFIG_ELOOP_EPOLL */
 1067#ifdef CONFIG_ELOOP_KQUEUE
 1068 if (eloop.count == 0) {
 1069 res = 0;
 1070 } else {
 1071 res = kevent(eloop.kqueuefd, NULL, 0,
 1072 eloop.kqueue_events, eloop.kqueue_nevents,
 1073 timeout ? &ts : NULL);
 1074 }
 1075#endif /* CONFIG_ELOOP_KQUEUE */
966 if (res < 0 && errno != EINTR && errno != 0) { 1076 if (res < 0 && errno != EINTR && errno != 0) {
967 wpa_printf(MSG_ERROR, "eloop: %s: %s", 1077 wpa_printf(MSG_ERROR, "eloop: %s: %s",
968#ifdef CONFIG_ELOOP_POLL 1078#ifdef CONFIG_ELOOP_POLL
969 "poll" 1079 "poll"
970#endif /* CONFIG_ELOOP_POLL */ 1080#endif /* CONFIG_ELOOP_POLL */
971#ifdef CONFIG_ELOOP_SELECT 1081#ifdef CONFIG_ELOOP_SELECT
972 "select" 1082 "select"
973#endif /* CONFIG_ELOOP_SELECT */ 1083#endif /* CONFIG_ELOOP_SELECT */
974#ifdef CONFIG_ELOOP_EPOLL 1084#ifdef CONFIG_ELOOP_EPOLL
975 "epoll" 1085 "epoll"
976#endif /* CONFIG_ELOOP_EPOLL */ 1086#endif /* CONFIG_ELOOP_EPOLL */
 1087#ifdef CONFIG_ELOOP_KQUEUE
 1088 "kqueue"
 1089#endif /* CONFIG_ELOOP_EKQUEUE */
 1090
977 , strerror(errno)); 1091 , strerror(errno));
978 goto out; 1092 goto out;
979 } 1093 }
980 eloop_process_pending_signals(); 1094 eloop_process_pending_signals();
981 1095
982 /* check if some registered timeouts have occurred */ 1096 /* check if some registered timeouts have occurred */
983 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout, 1097 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
984 list); 1098 list);
985 if (timeout) { 1099 if (timeout) {
986 os_get_reltime(&now); 1100 os_get_reltime(&now);
987 if (!os_reltime_before(&now, &timeout->time)) { 1101 if (!os_reltime_before(&now, &timeout->time)) {
988 void *eloop_data = timeout->eloop_data; 1102 void *eloop_data = timeout->eloop_data;
989 void *user_data = timeout->user_data; 1103 void *user_data = timeout->user_data;
@@ -1001,26 +1115,29 @@ void eloop_run(void) @@ -1001,26 +1115,29 @@ void eloop_run(void)
1001#ifdef CONFIG_ELOOP_POLL 1115#ifdef CONFIG_ELOOP_POLL
1002 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers, 1116 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1003 &eloop.exceptions, eloop.pollfds_map, 1117 &eloop.exceptions, eloop.pollfds_map,
1004 eloop.max_pollfd_map); 1118 eloop.max_pollfd_map);
1005#endif /* CONFIG_ELOOP_POLL */ 1119#endif /* CONFIG_ELOOP_POLL */
1006#ifdef CONFIG_ELOOP_SELECT 1120#ifdef CONFIG_ELOOP_SELECT
1007 eloop_sock_table_dispatch(&eloop.readers, rfds); 1121 eloop_sock_table_dispatch(&eloop.readers, rfds);
1008 eloop_sock_table_dispatch(&eloop.writers, wfds); 1122 eloop_sock_table_dispatch(&eloop.writers, wfds);
1009 eloop_sock_table_dispatch(&eloop.exceptions, efds); 1123 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1010#endif /* CONFIG_ELOOP_SELECT */ 1124#endif /* CONFIG_ELOOP_SELECT */
1011#ifdef CONFIG_ELOOP_EPOLL 1125#ifdef CONFIG_ELOOP_EPOLL
1012 eloop_sock_table_dispatch(eloop.epoll_events, res); 1126 eloop_sock_table_dispatch(eloop.epoll_events, res);
1013#endif /* CONFIG_ELOOP_EPOLL */ 1127#endif /* CONFIG_ELOOP_EPOLL */
 1128#ifdef CONFIG_ELOOP_KQUEUE
 1129 eloop_sock_table_dispatch(eloop.kqueue_events, res);
 1130#endif /* CONFIG_ELOOP_KQUEUE */
1014 } 1131 }
1015 1132
1016 eloop.terminate = 0; 1133 eloop.terminate = 0;
1017out: 1134out:
1018#ifdef CONFIG_ELOOP_SELECT 1135#ifdef CONFIG_ELOOP_SELECT
1019 os_free(rfds); 1136 os_free(rfds);
1020 os_free(wfds); 1137 os_free(wfds);
1021 os_free(efds); 1138 os_free(efds);
1022#endif /* CONFIG_ELOOP_SELECT */ 1139#endif /* CONFIG_ELOOP_SELECT */
1023 return; 1140 return;
1024} 1141}
1025 1142
1026 1143
@@ -1053,31 +1170,37 @@ void eloop_destroy(void) @@ -1053,31 +1170,37 @@ void eloop_destroy(void)
1053 timeout->handler); 1170 timeout->handler);
1054 wpa_trace_dump("eloop timeout", timeout); 1171 wpa_trace_dump("eloop timeout", timeout);
1055 eloop_remove_timeout(timeout); 1172 eloop_remove_timeout(timeout);
1056 } 1173 }
1057 eloop_sock_table_destroy(&eloop.readers); 1174 eloop_sock_table_destroy(&eloop.readers);
1058 eloop_sock_table_destroy(&eloop.writers); 1175 eloop_sock_table_destroy(&eloop.writers);
1059 eloop_sock_table_destroy(&eloop.exceptions); 1176 eloop_sock_table_destroy(&eloop.exceptions);
1060 os_free(eloop.signals); 1177 os_free(eloop.signals);
1061 1178
1062#ifdef CONFIG_ELOOP_POLL 1179#ifdef CONFIG_ELOOP_POLL
1063 os_free(eloop.pollfds); 1180 os_free(eloop.pollfds);
1064 os_free(eloop.pollfds_map); 1181 os_free(eloop.pollfds_map);
1065#endif /* CONFIG_ELOOP_POLL */ 1182#endif /* CONFIG_ELOOP_POLL */
 1183#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
 1184 os_free(eloop.fd_table);
 1185#endif
1066#ifdef CONFIG_ELOOP_EPOLL 1186#ifdef CONFIG_ELOOP_EPOLL
1067 os_free(eloop.epoll_table); 
1068 os_free(eloop.epoll_events); 1187 os_free(eloop.epoll_events);
1069 close(eloop.epollfd); 1188 close(eloop.epollfd);
1070#endif /* CONFIG_ELOOP_EPOLL */ 1189#endif /* CONFIG_ELOOP_EPOLL */
 1190#ifdef CONFIG_ELOOP_KQUEUE
 1191 os_free(eloop.kqueue_events);
 1192 close(eloop.kqueuefd);
 1193#endif /* CONFIG_ELOOP_KQUEUE */
1071} 1194}
1072 1195
1073 1196
1074int eloop_terminated(void) 1197int eloop_terminated(void)
1075{ 1198{
1076 return eloop.terminate; 1199 return eloop.terminate;
1077} 1200}
1078 1201
1079 1202
1080void eloop_wait_for_read_sock(int sock) 1203void eloop_wait_for_read_sock(int sock)
1081{ 1204{
1082#ifdef CONFIG_ELOOP_POLL 1205#ifdef CONFIG_ELOOP_POLL
1083 struct pollfd pfd; 1206 struct pollfd pfd;
@@ -1096,18 +1219,29 @@ void eloop_wait_for_read_sock(int sock) @@ -1096,18 +1219,29 @@ void eloop_wait_for_read_sock(int sock)
1096 * We can use epoll() here. But epoll() requres 4 system calls. 1219 * We can use epoll() here. But epoll() requres 4 system calls.
1097 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for 1220 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1098 * epoll fd. So select() is better for performance here. 1221 * epoll fd. So select() is better for performance here.
1099 */ 1222 */
1100 fd_set rfds; 1223 fd_set rfds;
1101 1224
1102 if (sock < 0) 1225 if (sock < 0)
1103 return; 1226 return;
1104 1227
1105 FD_ZERO(&rfds); 1228 FD_ZERO(&rfds);
1106 FD_SET(sock, &rfds); 1229 FD_SET(sock, &rfds);
1107 select(sock + 1, &rfds, NULL, NULL, NULL); 1230 select(sock + 1, &rfds, NULL, NULL, NULL);
1108#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */ 1231#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
 1232#ifdef CONFIG_ELOOP_KQUEUE
 1233 int kfd;
 1234 struct kevent ke1, ke2;
 1235
 1236 kfd = kqueue();
 1237 if (kfd == -1)
 1238 return;
 1239 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, NULL);
 1240 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
 1241 close(kfd);
 1242#endif /* CONFIG_ELOOP_KQUEUE */
1109} 1243}
1110 1244
1111#ifdef CONFIG_ELOOP_SELECT 1245#ifdef CONFIG_ELOOP_SELECT
1112#undef CONFIG_ELOOP_SELECT 1246#undef CONFIG_ELOOP_SELECT
1113#endif /* CONFIG_ELOOP_SELECT */ 1247#endif /* CONFIG_ELOOP_SELECT */