|
Lines 67-91
__FBSDID("$FreeBSD$");
Link Here
|
| 67 |
#include <compat/linux/linux_timer.h> |
67 |
#include <compat/linux/linux_timer.h> |
| 68 |
#include <compat/linux/linux_util.h> |
68 |
#include <compat/linux/linux_util.h> |
| 69 |
|
69 |
|
| 70 |
/* |
|
|
| 71 |
* epoll defines 'struct epoll_event' with the field 'data' as 64 bits |
| 72 |
* on all architectures. But on 32 bit architectures BSD 'struct kevent' only |
| 73 |
* has 32 bit opaque pointer as 'udata' field. So we can't pass epoll supplied |
| 74 |
* data verbatuim. Therefore we allocate 64-bit memory block to pass |
| 75 |
* user supplied data for every file descriptor. |
| 76 |
*/ |
| 77 |
|
| 78 |
typedef uint64_t epoll_udata_t; |
70 |
typedef uint64_t epoll_udata_t; |
| 79 |
|
71 |
|
| 80 |
struct epoll_emuldata { |
|
|
| 81 |
uint32_t fdc; /* epoll udata max index */ |
| 82 |
epoll_udata_t udata[1]; /* epoll user data vector */ |
| 83 |
}; |
| 84 |
|
| 85 |
#define EPOLL_DEF_SZ 16 |
| 86 |
#define EPOLL_SIZE(fdn) \ |
| 87 |
(sizeof(struct epoll_emuldata)+(fdn) * sizeof(epoll_udata_t)) |
| 88 |
|
| 89 |
struct epoll_event { |
72 |
struct epoll_event { |
| 90 |
uint32_t events; |
73 |
uint32_t events; |
| 91 |
epoll_udata_t data; |
74 |
epoll_udata_t data; |
|
Lines 97-111
__attribute__((packed))
Link Here
|
| 97 |
|
80 |
|
| 98 |
#define LINUX_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) |
81 |
#define LINUX_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) |
| 99 |
|
82 |
|
| 100 |
static void epoll_fd_install(struct thread *td, int fd, epoll_udata_t udata); |
|
|
| 101 |
static int epoll_to_kevent(struct thread *td, struct file *epfp, |
83 |
static int epoll_to_kevent(struct thread *td, struct file *epfp, |
| 102 |
int fd, struct epoll_event *l_event, int *kev_flags, |
84 |
int fd, struct epoll_event *l_event, int *kev_flags, |
| 103 |
struct kevent *kevent, int *nkevents); |
85 |
struct kevent *kevent, int *nkevents); |
| 104 |
static void kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event); |
86 |
static void kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event); |
| 105 |
static int epoll_kev_copyout(void *arg, struct kevent *kevp, int count); |
87 |
static int epoll_kev_copyout(void *arg, struct kevent *kevp, int count); |
| 106 |
static int epoll_kev_copyin(void *arg, struct kevent *kevp, int count); |
88 |
static int epoll_kev_copyin(void *arg, struct kevent *kevp, int count); |
| 107 |
static int epoll_delete_event(struct thread *td, struct file *epfp, |
89 |
static int epoll_register_kevent(struct thread *td, struct file *epfp, |
| 108 |
int fd, int filter); |
90 |
int fd, int filter, unsigned int flags); |
|
|
91 |
static int epoll_fd_registered(struct thread *td, struct file *epfp, |
| 92 |
struct file *fp, int fd); |
| 109 |
static int epoll_delete_all_events(struct thread *td, struct file *epfp, |
93 |
static int epoll_delete_all_events(struct thread *td, struct file *epfp, |
| 110 |
int fd); |
94 |
int fd); |
| 111 |
|
95 |
|
|
Lines 220-254
static void linux_timerfd_expire(void *);
Link Here
|
| 220 |
static void linux_timerfd_curval(struct timerfd *, struct itimerspec *); |
204 |
static void linux_timerfd_curval(struct timerfd *, struct itimerspec *); |
| 221 |
|
205 |
|
| 222 |
|
206 |
|
| 223 |
static void |
|
|
| 224 |
epoll_fd_install(struct thread *td, int fd, epoll_udata_t udata) |
| 225 |
{ |
| 226 |
struct linux_pemuldata *pem; |
| 227 |
struct epoll_emuldata *emd; |
| 228 |
struct proc *p; |
| 229 |
|
| 230 |
p = td->td_proc; |
| 231 |
|
| 232 |
pem = pem_find(p); |
| 233 |
KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); |
| 234 |
|
| 235 |
LINUX_PEM_XLOCK(pem); |
| 236 |
if (pem->epoll == NULL) { |
| 237 |
emd = malloc(EPOLL_SIZE(fd), M_EPOLL, M_WAITOK); |
| 238 |
emd->fdc = fd; |
| 239 |
pem->epoll = emd; |
| 240 |
} else { |
| 241 |
emd = pem->epoll; |
| 242 |
if (fd > emd->fdc) { |
| 243 |
emd = realloc(emd, EPOLL_SIZE(fd), M_EPOLL, M_WAITOK); |
| 244 |
emd->fdc = fd; |
| 245 |
pem->epoll = emd; |
| 246 |
} |
| 247 |
} |
| 248 |
emd->udata[fd] = udata; |
| 249 |
LINUX_PEM_XUNLOCK(pem); |
| 250 |
} |
| 251 |
|
| 252 |
static int |
207 |
static int |
| 253 |
epoll_create_common(struct thread *td, int flags) |
208 |
epoll_create_common(struct thread *td, int flags) |
| 254 |
{ |
209 |
{ |
|
Lines 258-265
epoll_create_common(struct thread *td, int flags)
Link Here
|
| 258 |
if (error != 0) |
213 |
if (error != 0) |
| 259 |
return (error); |
214 |
return (error); |
| 260 |
|
215 |
|
| 261 |
epoll_fd_install(td, EPOLL_DEF_SZ, 0); |
|
|
| 262 |
|
| 263 |
return (0); |
216 |
return (0); |
| 264 |
} |
217 |
} |
| 265 |
|
218 |
|
|
Lines 296-308
linux_epoll_create1(struct thread *td, struct linux_epoll_create1_args *args)
Link Here
|
| 296 |
|
249 |
|
| 297 |
/* Structure converting function from epoll to kevent. */ |
250 |
/* Structure converting function from epoll to kevent. */ |
| 298 |
static int |
251 |
static int |
| 299 |
epoll_to_kevent(struct thread *td, struct file *epfp, |
252 |
epoll_to_kevent(struct thread *td, struct file *fp, |
| 300 |
int fd, struct epoll_event *l_event, int *kev_flags, |
253 |
int fd, struct epoll_event *l_event, int *kev_flags, |
| 301 |
struct kevent *kevent, int *nkevents) |
254 |
struct kevent *kevent, int *nkevents) |
| 302 |
{ |
255 |
{ |
| 303 |
uint32_t levents = l_event->events; |
256 |
uint32_t levents = l_event->events; |
| 304 |
struct linux_pemuldata *pem; |
257 |
struct linux_pemuldata *pem; |
| 305 |
struct proc *p; |
258 |
struct proc *p; |
|
|
259 |
bool need_prot = false; |
| 306 |
|
260 |
|
| 307 |
/* flags related to how event is registered */ |
261 |
/* flags related to how event is registered */ |
| 308 |
if ((levents & LINUX_EPOLLONESHOT) != 0) |
262 |
if ((levents & LINUX_EPOLLONESHOT) != 0) |
|
Lines 316-326
epoll_to_kevent(struct thread *td, struct file *epfp,
Link Here
|
| 316 |
|
270 |
|
| 317 |
/* flags related to what event is registered */ |
271 |
/* flags related to what event is registered */ |
| 318 |
if ((levents & LINUX_EPOLL_EVRD) != 0) { |
272 |
if ((levents & LINUX_EPOLL_EVRD) != 0) { |
| 319 |
EV_SET(kevent++, fd, EVFILT_READ, *kev_flags, 0, 0, 0); |
273 |
EV_SET(kevent, fd, EVFILT_READ, *kev_flags, 0, 0, 0); |
|
|
274 |
kevent->ext[0] = l_event->data; |
| 275 |
kevent->ext[1] = fp->f_type; |
| 276 |
++kevent; |
| 277 |
++(*nkevents); |
| 278 |
} else if ((levents & LINUX_EPOLLRDHUP) != 0 && |
| 279 |
fp->f_type == DTYPE_SOCKET) { |
| 280 |
/* Set lowat arbitrary high to block data-flow events */ |
| 281 |
EV_SET(kevent, fd, EVFILT_READ, *kev_flags, NOTE_LOWAT, |
| 282 |
INT_MAX, 0); |
| 283 |
kevent->ext[0] = l_event->data; |
| 284 |
kevent->ext[1] = fp->f_type; |
| 285 |
++kevent; |
| 320 |
++(*nkevents); |
286 |
++(*nkevents); |
|
|
287 |
need_prot = true; |
| 321 |
} |
288 |
} |
| 322 |
if ((levents & LINUX_EPOLL_EVWR) != 0) { |
289 |
if ((levents & LINUX_EPOLL_EVWR) != 0) { |
| 323 |
EV_SET(kevent++, fd, EVFILT_WRITE, *kev_flags, 0, 0, 0); |
290 |
EV_SET(kevent, fd, EVFILT_WRITE, *kev_flags, 0, 0, 0); |
|
|
291 |
kevent->ext[0] = l_event->data; |
| 292 |
kevent->ext[1] = fp->f_type; |
| 293 |
++kevent; |
| 294 |
++(*nkevents); |
| 295 |
} else if (fp->f_type == DTYPE_SOCKET) { |
| 296 |
/* Always set EVFILT_WRITE to catch socket's EPOLLHUP events */ |
| 297 |
EV_SET(kevent, fd, EVFILT_WRITE, *kev_flags, NOTE_LOWAT, |
| 298 |
INT_MAX, 0); |
| 299 |
kevent->ext[0] = l_event->data; |
| 300 |
kevent->ext[1] = fp->f_type; |
| 301 |
++kevent; |
| 302 |
++(*nkevents); |
| 303 |
if ((levents & LINUX_EPOLL_EVRD) == 0) |
| 304 |
need_prot = true; |
| 305 |
} |
| 306 |
if (need_prot) { |
| 307 |
/* |
| 308 |
* Add protective event to prevent clobbering of fflags and |
| 309 |
* data fields of kevents in epoll_fd_registered() routine. |
| 310 |
*/ |
| 311 |
EV_SET(kevent, fd, EVFILT_EMPTY, EV_ADD | EV_DISABLE, 0, 0, 0); |
| 312 |
++kevent; |
| 324 |
++(*nkevents); |
313 |
++(*nkevents); |
| 325 |
} |
314 |
} |
| 326 |
|
315 |
|
|
Lines 329-335
epoll_to_kevent(struct thread *td, struct file *epfp,
Link Here
|
| 329 |
|
318 |
|
| 330 |
pem = pem_find(p); |
319 |
pem = pem_find(p); |
| 331 |
KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); |
320 |
KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); |
| 332 |
KASSERT(pem->epoll != NULL, ("epoll proc epolldata not found.\n")); |
|
|
| 333 |
|
321 |
|
| 334 |
LINUX_PEM_XLOCK(pem); |
322 |
LINUX_PEM_XLOCK(pem); |
| 335 |
if ((pem->flags & LINUX_XUNSUP_EPOLL) == 0) { |
323 |
if ((pem->flags & LINUX_XUNSUP_EPOLL) == 0) { |
|
Lines 354-373
static void
Link Here
|
| 354 |
kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event) |
342 |
kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event) |
| 355 |
{ |
343 |
{ |
| 356 |
|
344 |
|
|
|
345 |
l_event->data = kevent->ext[0]; |
| 346 |
|
| 357 |
if ((kevent->flags & EV_ERROR) != 0) { |
347 |
if ((kevent->flags & EV_ERROR) != 0) { |
| 358 |
l_event->events = LINUX_EPOLLERR; |
348 |
l_event->events = LINUX_EPOLLERR; |
| 359 |
return; |
349 |
return; |
| 360 |
} |
350 |
} |
| 361 |
|
351 |
|
| 362 |
/* XXX EPOLLPRI, EPOLLHUP */ |
352 |
/* XXX EPOLLPRI */ |
| 363 |
switch (kevent->filter) { |
353 |
switch (kevent->filter) { |
| 364 |
case EVFILT_READ: |
354 |
case EVFILT_READ: |
| 365 |
l_event->events = LINUX_EPOLLIN; |
|
|
| 366 |
if ((kevent->flags & EV_EOF) != 0) |
355 |
if ((kevent->flags & EV_EOF) != 0) |
| 367 |
l_event->events |= LINUX_EPOLLRDHUP; |
356 |
l_event->events = (kevent->ext[1] == DTYPE_SOCKET) ? |
|
|
357 |
LINUX_EPOLLRDHUP | LINUX_EPOLLIN : LINUX_EPOLLHUP; |
| 358 |
else |
| 359 |
l_event->events = LINUX_EPOLLIN; |
| 368 |
break; |
360 |
break; |
| 369 |
case EVFILT_WRITE: |
361 |
case EVFILT_WRITE: |
| 370 |
l_event->events = LINUX_EPOLLOUT; |
362 |
/* |
|
|
363 |
* XXX: socket's EPOLLHUP requires EV_EOF from both send and |
| 364 |
* receive directions at the same time. As now only send is |
| 365 |
* taken into account, it is posible to erroneously trigger |
| 366 |
* EPOLLHUP with shutdown(fd, SHUT_WR) call. |
| 367 |
*/ |
| 368 |
if ((kevent->flags & EV_EOF) != 0) |
| 369 |
l_event->events = LINUX_EPOLLHUP; |
| 370 |
else |
| 371 |
l_event->events = LINUX_EPOLLOUT; |
| 371 |
break; |
372 |
break; |
| 372 |
} |
373 |
} |
| 373 |
} |
374 |
} |
|
Lines 382-411
static int
Link Here
|
| 382 |
epoll_kev_copyout(void *arg, struct kevent *kevp, int count) |
383 |
epoll_kev_copyout(void *arg, struct kevent *kevp, int count) |
| 383 |
{ |
384 |
{ |
| 384 |
struct epoll_copyout_args *args; |
385 |
struct epoll_copyout_args *args; |
| 385 |
struct linux_pemuldata *pem; |
|
|
| 386 |
struct epoll_emuldata *emd; |
| 387 |
struct epoll_event *eep; |
386 |
struct epoll_event *eep; |
| 388 |
int error, fd, i; |
387 |
int error, i; |
| 389 |
|
388 |
|
| 390 |
args = (struct epoll_copyout_args*) arg; |
389 |
args = (struct epoll_copyout_args*) arg; |
| 391 |
eep = malloc(sizeof(*eep) * count, M_EPOLL, M_WAITOK | M_ZERO); |
390 |
eep = malloc(sizeof(*eep) * count, M_EPOLL, M_WAITOK | M_ZERO); |
| 392 |
|
391 |
|
| 393 |
pem = pem_find(args->p); |
392 |
for (i = 0; i < count; i++) |
| 394 |
KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); |
|
|
| 395 |
LINUX_PEM_SLOCK(pem); |
| 396 |
emd = pem->epoll; |
| 397 |
KASSERT(emd != NULL, ("epoll proc epolldata not found.\n")); |
| 398 |
|
| 399 |
for (i = 0; i < count; i++) { |
| 400 |
kevent_to_epoll(&kevp[i], &eep[i]); |
393 |
kevent_to_epoll(&kevp[i], &eep[i]); |
| 401 |
|
394 |
|
| 402 |
fd = kevp[i].ident; |
|
|
| 403 |
KASSERT(fd <= emd->fdc, ("epoll user data vector" |
| 404 |
" is too small.\n")); |
| 405 |
eep[i].data = emd->udata[fd]; |
| 406 |
} |
| 407 |
LINUX_PEM_SUNLOCK(pem); |
| 408 |
|
| 409 |
error = copyout(eep, args->leventlist, count * sizeof(*eep)); |
395 |
error = copyout(eep, args->leventlist, count * sizeof(*eep)); |
| 410 |
if (error == 0) { |
396 |
if (error == 0) { |
| 411 |
args->leventlist += count; |
397 |
args->leventlist += count; |
|
Lines 445-451
linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args)
Link Here
|
| 445 |
{ |
431 |
{ |
| 446 |
struct file *epfp, *fp; |
432 |
struct file *epfp, *fp; |
| 447 |
struct epoll_copyin_args ciargs; |
433 |
struct epoll_copyin_args ciargs; |
| 448 |
struct kevent kev[2]; |
434 |
struct kevent kev[3]; |
| 449 |
struct kevent_copyops k_ops = { &ciargs, |
435 |
struct kevent_copyops k_ops = { &ciargs, |
| 450 |
NULL, |
436 |
NULL, |
| 451 |
epoll_kev_copyin}; |
437 |
epoll_kev_copyin}; |
|
Lines 485-491
linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args)
Link Here
|
| 485 |
|
471 |
|
| 486 |
if (args->op != LINUX_EPOLL_CTL_DEL) { |
472 |
if (args->op != LINUX_EPOLL_CTL_DEL) { |
| 487 |
kev_flags = EV_ADD | EV_ENABLE; |
473 |
kev_flags = EV_ADD | EV_ENABLE; |
| 488 |
error = epoll_to_kevent(td, epfp, args->fd, &le, |
474 |
error = epoll_to_kevent(td, fp, args->fd, &le, |
| 489 |
&kev_flags, kev, &nchanges); |
475 |
&kev_flags, kev, &nchanges); |
| 490 |
if (error != 0) |
476 |
if (error != 0) |
| 491 |
goto leave0; |
477 |
goto leave0; |
|
Lines 499-517
linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args)
Link Here
|
| 499 |
break; |
485 |
break; |
| 500 |
|
486 |
|
| 501 |
case LINUX_EPOLL_CTL_ADD: |
487 |
case LINUX_EPOLL_CTL_ADD: |
| 502 |
/* |
488 |
if (epoll_fd_registered(td, epfp, fp, args->fd)) { |
| 503 |
* kqueue_register() return ENOENT if event does not exists |
|
|
| 504 |
* and the EV_ADD flag is not set. Reset EV_ENABLE flag to |
| 505 |
* avoid accidental activation of fired oneshot events. |
| 506 |
*/ |
| 507 |
kev[0].flags &= ~(EV_ADD | EV_ENABLE); |
| 508 |
error = kqfd_register(args->epfd, &kev[0], td, M_WAITOK); |
| 509 |
if (error != ENOENT) { |
| 510 |
error = EEXIST; |
489 |
error = EEXIST; |
| 511 |
goto leave0; |
490 |
goto leave0; |
| 512 |
} |
491 |
} |
| 513 |
error = 0; |
|
|
| 514 |
kev[0].flags |= (EV_ADD | EV_ENABLE); |
| 515 |
break; |
492 |
break; |
| 516 |
|
493 |
|
| 517 |
case LINUX_EPOLL_CTL_DEL: |
494 |
case LINUX_EPOLL_CTL_DEL: |
|
Lines 524-531
linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args)
Link Here
|
| 524 |
goto leave0; |
501 |
goto leave0; |
| 525 |
} |
502 |
} |
| 526 |
|
503 |
|
| 527 |
epoll_fd_install(td, args->fd, le.data); |
|
|
| 528 |
|
| 529 |
error = kern_kevent_fp(td, epfp, nchanges, 0, &k_ops, NULL); |
504 |
error = kern_kevent_fp(td, epfp, nchanges, 0, &k_ops, NULL); |
| 530 |
|
505 |
|
| 531 |
leave0: |
506 |
leave0: |
|
Lines 562-574
linux_epoll_wait_common(struct thread *td, int epfd, struct epoll_event *events,
Link Here
|
| 562 |
return (error); |
537 |
return (error); |
| 563 |
if (epfp->f_type != DTYPE_KQUEUE) { |
538 |
if (epfp->f_type != DTYPE_KQUEUE) { |
| 564 |
error = EINVAL; |
539 |
error = EINVAL; |
| 565 |
goto leave1; |
540 |
goto leave; |
| 566 |
} |
541 |
} |
| 567 |
if (uset != NULL) { |
542 |
if (uset != NULL) { |
| 568 |
error = kern_sigprocmask(td, SIG_SETMASK, uset, |
543 |
error = kern_sigprocmask(td, SIG_SETMASK, uset, |
| 569 |
&omask, 0); |
544 |
&omask, 0); |
| 570 |
if (error != 0) |
545 |
if (error != 0) |
| 571 |
goto leave1; |
546 |
goto leave; |
| 572 |
td->td_pflags |= TDP_OLDMASK; |
547 |
td->td_pflags |= TDP_OLDMASK; |
| 573 |
/* |
548 |
/* |
| 574 |
* Make sure that ast() is called on return to |
549 |
* Make sure that ast() is called on return to |
|
Lines 586-596
linux_epoll_wait_common(struct thread *td, int epfd, struct epoll_event *events,
Link Here
|
| 586 |
coargs.count = 0; |
561 |
coargs.count = 0; |
| 587 |
coargs.error = 0; |
562 |
coargs.error = 0; |
| 588 |
|
563 |
|
| 589 |
if (timeout != -1) { |
564 |
if (timeout >= 0) { |
| 590 |
if (timeout < 0) { |
|
|
| 591 |
error = EINVAL; |
| 592 |
goto leave0; |
| 593 |
} |
| 594 |
/* Convert from milliseconds to timespec. */ |
565 |
/* Convert from milliseconds to timespec. */ |
| 595 |
ts.tv_sec = timeout / 1000; |
566 |
ts.tv_sec = timeout / 1000; |
| 596 |
ts.tv_nsec = (timeout % 1000) * 1000000; |
567 |
ts.tv_nsec = (timeout % 1000) * 1000000; |
|
Lines 610-620
linux_epoll_wait_common(struct thread *td, int epfd, struct epoll_event *events,
Link Here
|
| 610 |
if (error == 0) |
581 |
if (error == 0) |
| 611 |
td->td_retval[0] = coargs.count; |
582 |
td->td_retval[0] = coargs.count; |
| 612 |
|
583 |
|
| 613 |
leave0: |
|
|
| 614 |
if (uset != NULL) |
584 |
if (uset != NULL) |
| 615 |
error = kern_sigprocmask(td, SIG_SETMASK, &omask, |
585 |
error = kern_sigprocmask(td, SIG_SETMASK, &omask, |
| 616 |
NULL, 0); |
586 |
NULL, 0); |
| 617 |
leave1: |
587 |
leave: |
| 618 |
fdrop(epfp, td); |
588 |
fdrop(epfp, td); |
| 619 |
return (error); |
589 |
return (error); |
| 620 |
} |
590 |
} |
|
Lines 651-657
linux_epoll_pwait(struct thread *td, struct linux_epoll_pwait_args *args)
Link Here
|
| 651 |
} |
621 |
} |
| 652 |
|
622 |
|
| 653 |
static int |
623 |
static int |
| 654 |
epoll_delete_event(struct thread *td, struct file *epfp, int fd, int filter) |
624 |
epoll_register_kevent(struct thread *td, struct file *epfp, int fd, int filter, |
|
|
625 |
unsigned int flags) |
| 655 |
{ |
626 |
{ |
| 656 |
struct epoll_copyin_args ciargs; |
627 |
struct epoll_copyin_args ciargs; |
| 657 |
struct kevent kev; |
628 |
struct kevent kev; |
|
Lines 660-680
epoll_delete_event(struct thread *td, struct file *epfp, int fd, int filter)
Link Here
|
| 660 |
epoll_kev_copyin}; |
631 |
epoll_kev_copyin}; |
| 661 |
|
632 |
|
| 662 |
ciargs.changelist = &kev; |
633 |
ciargs.changelist = &kev; |
| 663 |
EV_SET(&kev, fd, filter, EV_DELETE | EV_DISABLE, 0, 0, 0); |
634 |
EV_SET(&kev, fd, filter, flags, 0, 0, 0); |
| 664 |
|
635 |
|
| 665 |
return (kern_kevent_fp(td, epfp, 1, 0, &k_ops, NULL)); |
636 |
return (kern_kevent_fp(td, epfp, 1, 0, &k_ops, NULL)); |
| 666 |
} |
637 |
} |
| 667 |
|
638 |
|
|
|
639 |
static int |
| 640 |
epoll_fd_registered(struct thread *td, struct file *epfp, struct file *fp, |
| 641 |
int fd) |
| 642 |
{ |
| 643 |
/* |
| 644 |
* Set empty filter flags to avoid accidental modification of already |
| 645 |
* registered events. In the case of re-registration: |
| 646 |
* 1. If event does not exists kevent() does nothing and returns ENOENT |
| 647 |
* 2. If event does exists, it's enabled/disabled state is preserved |
| 648 |
* but fflags, data and udata fields are overwritten. |
| 649 |
* p.2 means that we can not store user's context pointer in udata. |
| 650 |
* The order of checking is important for sockets and should be in line |
| 651 |
* with epoll_to_kevent() routine. |
| 652 |
*/ |
| 653 |
if ((fp->f_type == DTYPE_SOCKET && |
| 654 |
epoll_register_kevent(td, epfp, fd, EVFILT_EMPTY, 0) != ENOENT) || |
| 655 |
epoll_register_kevent(td, epfp, fd, EVFILT_READ, 0) != ENOENT || |
| 656 |
epoll_register_kevent(td, epfp, fd, EVFILT_WRITE, 0) != ENOENT) |
| 657 |
return (1); |
| 658 |
|
| 659 |
return (0); |
| 660 |
} |
| 661 |
|
| 668 |
static int |
662 |
static int |
| 669 |
epoll_delete_all_events(struct thread *td, struct file *epfp, int fd) |
663 |
epoll_delete_all_events(struct thread *td, struct file *epfp, int fd) |
| 670 |
{ |
664 |
{ |
| 671 |
int error1, error2; |
665 |
int error1, error2, error3; |
| 672 |
|
666 |
|
| 673 |
error1 = epoll_delete_event(td, epfp, fd, EVFILT_READ); |
667 |
error1 = epoll_register_kevent(td, epfp, fd, EVFILT_READ, EV_DELETE); |
| 674 |
error2 = epoll_delete_event(td, epfp, fd, EVFILT_WRITE); |
668 |
error2 = epoll_register_kevent(td, epfp, fd, EVFILT_WRITE, EV_DELETE); |
|
|
669 |
error3 = epoll_register_kevent(td, epfp, fd, EVFILT_EMPTY, EV_DELETE); |
| 675 |
|
670 |
|
| 676 |
/* return 0 if at least one result positive */ |
671 |
/* return 0 if at least one result positive */ |
| 677 |
return (error1 == 0 ? 0 : error2); |
672 |
return (error1 == 0 ? 0 : (error2 == 0 ? 0 : error3)); |
| 678 |
} |
673 |
} |
| 679 |
|
674 |
|
| 680 |
static int |
675 |
static int |