eventpoll.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. /*
  2. * fs/eventpoll.c (Efficent event polling implementation)
  3. * Copyright (C) 2001,...,2007 Davide Libenzi
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * Davide Libenzi <davidel@xmailserver.org>
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/file.h>
  18. #include <linux/signal.h>
  19. #include <linux/errno.h>
  20. #include <linux/mm.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/string.h>
  24. #include <linux/list.h>
  25. #include <linux/hash.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/syscalls.h>
  28. #include <linux/rbtree.h>
  29. #include <linux/wait.h>
  30. #include <linux/eventpoll.h>
  31. #include <linux/mount.h>
  32. #include <linux/bitops.h>
  33. #include <linux/mutex.h>
  34. #include <linux/anon_inodes.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/system.h>
  37. #include <asm/io.h>
  38. #include <asm/mman.h>
  39. #include <asm/atomic.h>
  40. /*
  41. * LOCKING:
  42. * There are three level of locking required by epoll :
  43. *
  44. * 1) epmutex (mutex)
  45. * 2) ep->mtx (mutex)
  46. * 3) ep->lock (spinlock)
  47. *
  48. * The acquire order is the one listed above, from 1 to 3.
  49. * We need a spinlock (ep->lock) because we manipulate objects
  50. * from inside the poll callback, that might be triggered from
  51. * a wake_up() that in turn might be called from IRQ context.
  52. * So we can't sleep inside the poll callback and hence we need
  53. * a spinlock. During the event transfer loop (from kernel to
  54. * user space) we could end up sleeping due a copy_to_user(), so
  55. * we need a lock that will allow us to sleep. This lock is a
  56. * mutex (ep->mtx). It is acquired during the event transfer loop,
  57. * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
  58. * Then we also need a global mutex to serialize eventpoll_release_file()
  59. * and ep_free().
  60. * This mutex is acquired by ep_free() during the epoll file
  61. * cleanup path and it is also acquired by eventpoll_release_file()
  62. * if a file has been pushed inside an epoll set and it is then
  63. * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
  64. * It is possible to drop the "ep->mtx" and to use the global
  65. * mutex "epmutex" (together with "ep->lock") to have it working,
  66. * but having "ep->mtx" will make the interface more scalable.
  67. * Events that require holding "epmutex" are very rare, while for
  68. * normal operations the epoll private "ep->mtx" will guarantee
  69. * a better scalability.
  70. */
  71. #define DEBUG_EPOLL 0
  72. #if DEBUG_EPOLL > 0
  73. #define DPRINTK(x) printk x
  74. #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
  75. #else /* #if DEBUG_EPOLL > 0 */
  76. #define DPRINTK(x) (void) 0
  77. #define DNPRINTK(n, x) (void) 0
  78. #endif /* #if DEBUG_EPOLL > 0 */
  79. #define DEBUG_EPI 0
  80. #if DEBUG_EPI != 0
  81. #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
  82. #else /* #if DEBUG_EPI != 0 */
  83. #define EPI_SLAB_DEBUG 0
  84. #endif /* #if DEBUG_EPI != 0 */
  85. /* Epoll private bits inside the event mask */
  86. #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
  87. /* Maximum number of poll wake up nests we are allowing */
  88. #define EP_MAX_POLLWAKE_NESTS 4
  89. /* Maximum msec timeout value storeable in a long int */
  90. #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
  91. #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
  92. #define EP_UNACTIVE_PTR ((void *) -1L)
  93. struct epoll_filefd {
  94. struct file *file;
  95. int fd;
  96. };
  97. /*
  98. * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
  99. * It is used to keep track on all tasks that are currently inside the wake_up() code
  100. * to 1) short-circuit the one coming from the same task and same wait queue head
  101. * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting
  102. * 3) let go the ones coming from other tasks.
  103. */
  104. struct wake_task_node {
  105. struct list_head llink;
  106. struct task_struct *task;
  107. wait_queue_head_t *wq;
  108. };
  109. /*
  110. * This is used to implement the safe poll wake up avoiding to reenter
  111. * the poll callback from inside wake_up().
  112. */
  113. struct poll_safewake {
  114. struct list_head wake_task_list;
  115. spinlock_t lock;
  116. };
  117. /*
  118. * Each file descriptor added to the eventpoll interface will
  119. * have an entry of this type linked to the "rbr" RB tree.
  120. */
  121. struct epitem {
  122. /* RB tree node used to link this structure to the eventpoll RB tree */
  123. struct rb_node rbn;
  124. /* List header used to link this structure to the eventpoll ready list */
  125. struct list_head rdllink;
  126. /*
  127. * Works together "struct eventpoll"->ovflist in keeping the
  128. * single linked chain of items.
  129. */
  130. struct epitem *next;
  131. /* The file descriptor information this item refers to */
  132. struct epoll_filefd ffd;
  133. /* Number of active wait queue attached to poll operations */
  134. int nwait;
  135. /* List containing poll wait queues */
  136. struct list_head pwqlist;
  137. /* The "container" of this item */
  138. struct eventpoll *ep;
  139. /* List header used to link this item to the "struct file" items list */
  140. struct list_head fllink;
  141. /* The structure that describe the interested events and the source fd */
  142. struct epoll_event event;
  143. };
  144. /*
  145. * This structure is stored inside the "private_data" member of the file
  146. * structure and rapresent the main data sructure for the eventpoll
  147. * interface.
  148. */
  149. struct eventpoll {
  150. /* Protect the this structure access */
  151. spinlock_t lock;
  152. /*
  153. * This mutex is used to ensure that files are not removed
  154. * while epoll is using them. This is held during the event
  155. * collection loop, the file cleanup path, the epoll file exit
  156. * code and the ctl operations.
  157. */
  158. struct mutex mtx;
  159. /* Wait queue used by sys_epoll_wait() */
  160. wait_queue_head_t wq;
  161. /* Wait queue used by file->poll() */
  162. wait_queue_head_t poll_wait;
  163. /* List of ready file descriptors */
  164. struct list_head rdllist;
  165. /* RB tree root used to store monitored fd structs */
  166. struct rb_root rbr;
  167. /*
  168. * This is a single linked list that chains all the "struct epitem" that
  169. * happened while transfering ready events to userspace w/out
  170. * holding ->lock.
  171. */
  172. struct epitem *ovflist;
  173. };
  174. /* Wait structure used by the poll hooks */
  175. struct eppoll_entry {
  176. /* List header used to link this structure to the "struct epitem" */
  177. struct list_head llink;
  178. /* The "base" pointer is set to the container "struct epitem" */
  179. void *base;
  180. /*
  181. * Wait queue item that will be linked to the target file wait
  182. * queue head.
  183. */
  184. wait_queue_t wait;
  185. /* The wait queue head that linked the "wait" wait queue item */
  186. wait_queue_head_t *whead;
  187. };
  188. /* Wrapper struct used by poll queueing */
  189. struct ep_pqueue {
  190. poll_table pt;
  191. struct epitem *epi;
  192. };
  193. /*
  194. * This mutex is used to serialize ep_free() and eventpoll_release_file().
  195. */
  196. static struct mutex epmutex;
  197. /* Safe wake up implementation */
  198. static struct poll_safewake psw;
  199. /* Slab cache used to allocate "struct epitem" */
  200. static struct kmem_cache *epi_cache __read_mostly;
  201. /* Slab cache used to allocate "struct eppoll_entry" */
  202. static struct kmem_cache *pwq_cache __read_mostly;
  203. /* Setup the structure that is used as key for the RB tree */
  204. static inline void ep_set_ffd(struct epoll_filefd *ffd,
  205. struct file *file, int fd)
  206. {
  207. ffd->file = file;
  208. ffd->fd = fd;
  209. }
  210. /* Compare RB tree keys */
  211. static inline int ep_cmp_ffd(struct epoll_filefd *p1,
  212. struct epoll_filefd *p2)
  213. {
  214. return (p1->file > p2->file ? +1:
  215. (p1->file < p2->file ? -1 : p1->fd - p2->fd));
  216. }
  217. /* Special initialization for the RB tree node to detect linkage */
  218. static inline void ep_rb_initnode(struct rb_node *n)
  219. {
  220. rb_set_parent(n, n);
  221. }
  222. /* Removes a node from the RB tree and marks it for a fast is-linked check */
  223. static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
  224. {
  225. rb_erase(n, r);
  226. rb_set_parent(n, n);
  227. }
  228. /* Fast check to verify that the item is linked to the main RB tree */
  229. static inline int ep_rb_linked(struct rb_node *n)
  230. {
  231. return rb_parent(n) != n;
  232. }
  233. /* Tells us if the item is currently linked */
  234. static inline int ep_is_linked(struct list_head *p)
  235. {
  236. return !list_empty(p);
  237. }
  238. /* Get the "struct epitem" from a wait queue pointer */
  239. static inline struct epitem * ep_item_from_wait(wait_queue_t *p)
  240. {
  241. return container_of(p, struct eppoll_entry, wait)->base;
  242. }
  243. /* Get the "struct epitem" from an epoll queue wrapper */
  244. static inline struct epitem * ep_item_from_epqueue(poll_table *p)
  245. {
  246. return container_of(p, struct ep_pqueue, pt)->epi;
  247. }
  248. /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
  249. static inline int ep_op_has_event(int op)
  250. {
  251. return op != EPOLL_CTL_DEL;
  252. }
  253. /* Initialize the poll safe wake up structure */
  254. static void ep_poll_safewake_init(struct poll_safewake *psw)
  255. {
  256. INIT_LIST_HEAD(&psw->wake_task_list);
  257. spin_lock_init(&psw->lock);
  258. }
  259. /*
  260. * Perform a safe wake up of the poll wait list. The problem is that
  261. * with the new callback'd wake up system, it is possible that the
  262. * poll callback is reentered from inside the call to wake_up() done
  263. * on the poll wait queue head. The rule is that we cannot reenter the
  264. * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
  265. * and we cannot reenter the same wait queue head at all. This will
  266. * enable to have a hierarchy of epoll file descriptor of no more than
  267. * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
  268. * because this one gets called by the poll callback, that in turn is called
  269. * from inside a wake_up(), that might be called from irq context.
  270. */
  271. static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
  272. {
  273. int wake_nests = 0;
  274. unsigned long flags;
  275. struct task_struct *this_task = current;
  276. struct list_head *lsthead = &psw->wake_task_list;
  277. struct wake_task_node *tncur;
  278. struct wake_task_node tnode;
  279. spin_lock_irqsave(&psw->lock, flags);
  280. /* Try to see if the current task is already inside this wakeup call */
  281. list_for_each_entry(tncur, lsthead, llink) {
  282. if (tncur->wq == wq ||
  283. (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
  284. /*
  285. * Ops ... loop detected or maximum nest level reached.
  286. * We abort this wake by breaking the cycle itself.
  287. */
  288. spin_unlock_irqrestore(&psw->lock, flags);
  289. return;
  290. }
  291. }
  292. /* Add the current task to the list */
  293. tnode.task = this_task;
  294. tnode.wq = wq;
  295. list_add(&tnode.llink, lsthead);
  296. spin_unlock_irqrestore(&psw->lock, flags);
  297. /* Do really wake up now */
  298. wake_up_nested(wq, 1 + wake_nests);
  299. /* Remove the current task from the list */
  300. spin_lock_irqsave(&psw->lock, flags);
  301. list_del(&tnode.llink);
  302. spin_unlock_irqrestore(&psw->lock, flags);
  303. }
  304. /*
  305. * This function unregister poll callbacks from the associated file descriptor.
  306. * Since this must be called without holding "ep->lock" the atomic exchange trick
  307. * will protect us from multiple unregister.
  308. */
  309. static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
  310. {
  311. int nwait;
  312. struct list_head *lsthead = &epi->pwqlist;
  313. struct eppoll_entry *pwq;
  314. /* This is called without locks, so we need the atomic exchange */
  315. nwait = xchg(&epi->nwait, 0);
  316. if (nwait) {
  317. while (!list_empty(lsthead)) {
  318. pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
  319. list_del_init(&pwq->llink);
  320. remove_wait_queue(pwq->whead, &pwq->wait);
  321. kmem_cache_free(pwq_cache, pwq);
  322. }
  323. }
  324. }
  325. /*
  326. * Removes a "struct epitem" from the eventpoll RB tree and deallocates
  327. * all the associated resources. Must be called with "mtx" held.
  328. */
  329. static int ep_remove(struct eventpoll *ep, struct epitem *epi)
  330. {
  331. unsigned long flags;
  332. struct file *file = epi->ffd.file;
  333. /*
  334. * Removes poll wait queue hooks. We _have_ to do this without holding
  335. * the "ep->lock" otherwise a deadlock might occur. This because of the
  336. * sequence of the lock acquisition. Here we do "ep->lock" then the wait
  337. * queue head lock when unregistering the wait queue. The wakeup callback
  338. * will run by holding the wait queue head lock and will call our callback
  339. * that will try to get "ep->lock".
  340. */
  341. ep_unregister_pollwait(ep, epi);
  342. /* Remove the current item from the list of epoll hooks */
  343. spin_lock(&file->f_ep_lock);
  344. if (ep_is_linked(&epi->fllink))
  345. list_del_init(&epi->fllink);
  346. spin_unlock(&file->f_ep_lock);
  347. if (ep_rb_linked(&epi->rbn))
  348. ep_rb_erase(&epi->rbn, &ep->rbr);
  349. spin_lock_irqsave(&ep->lock, flags);
  350. if (ep_is_linked(&epi->rdllink))
  351. list_del_init(&epi->rdllink);
  352. spin_unlock_irqrestore(&ep->lock, flags);
  353. /* At this point it is safe to free the eventpoll item */
  354. kmem_cache_free(epi_cache, epi);
  355. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
  356. current, ep, file));
  357. return 0;
  358. }
  359. static void ep_free(struct eventpoll *ep)
  360. {
  361. struct rb_node *rbp;
  362. struct epitem *epi;
  363. /* We need to release all tasks waiting for these file */
  364. if (waitqueue_active(&ep->poll_wait))
  365. ep_poll_safewake(&psw, &ep->poll_wait);
  366. /*
  367. * We need to lock this because we could be hit by
  368. * eventpoll_release_file() while we're freeing the "struct eventpoll".
  369. * We do not need to hold "ep->mtx" here because the epoll file
  370. * is on the way to be removed and no one has references to it
  371. * anymore. The only hit might come from eventpoll_release_file() but
  372. * holding "epmutex" is sufficent here.
  373. */
  374. mutex_lock(&epmutex);
  375. /*
  376. * Walks through the whole tree by unregistering poll callbacks.
  377. */
  378. for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  379. epi = rb_entry(rbp, struct epitem, rbn);
  380. ep_unregister_pollwait(ep, epi);
  381. }
  382. /*
  383. * Walks through the whole tree by freeing each "struct epitem". At this
  384. * point we are sure no poll callbacks will be lingering around, and also by
  385. * holding "epmutex" we can be sure that no file cleanup code will hit
  386. * us during this operation. So we can avoid the lock on "ep->lock".
  387. */
  388. while ((rbp = rb_first(&ep->rbr)) != NULL) {
  389. epi = rb_entry(rbp, struct epitem, rbn);
  390. ep_remove(ep, epi);
  391. }
  392. mutex_unlock(&epmutex);
  393. mutex_destroy(&ep->mtx);
  394. kfree(ep);
  395. }
  396. static int ep_eventpoll_release(struct inode *inode, struct file *file)
  397. {
  398. struct eventpoll *ep = file->private_data;
  399. if (ep)
  400. ep_free(ep);
  401. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
  402. return 0;
  403. }
  404. static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
  405. {
  406. unsigned int pollflags = 0;
  407. unsigned long flags;
  408. struct eventpoll *ep = file->private_data;
  409. /* Insert inside our poll wait queue */
  410. poll_wait(file, &ep->poll_wait, wait);
  411. /* Check our condition */
  412. spin_lock_irqsave(&ep->lock, flags);
  413. if (!list_empty(&ep->rdllist))
  414. pollflags = POLLIN | POLLRDNORM;
  415. spin_unlock_irqrestore(&ep->lock, flags);
  416. return pollflags;
  417. }
  418. /* File callbacks that implement the eventpoll file behaviour */
  419. static const struct file_operations eventpoll_fops = {
  420. .release = ep_eventpoll_release,
  421. .poll = ep_eventpoll_poll
  422. };
  423. /* Fast test to see if the file is an evenpoll file */
  424. static inline int is_file_epoll(struct file *f)
  425. {
  426. return f->f_op == &eventpoll_fops;
  427. }
  428. /*
  429. * This is called from eventpoll_release() to unlink files from the eventpoll
  430. * interface. We need to have this facility to cleanup correctly files that are
  431. * closed without being removed from the eventpoll interface.
  432. */
  433. void eventpoll_release_file(struct file *file)
  434. {
  435. struct list_head *lsthead = &file->f_ep_links;
  436. struct eventpoll *ep;
  437. struct epitem *epi;
  438. /*
  439. * We don't want to get "file->f_ep_lock" because it is not
  440. * necessary. It is not necessary because we're in the "struct file"
  441. * cleanup path, and this means that noone is using this file anymore.
  442. * So, for example, epoll_ctl() cannot hit here sicne if we reach this
  443. * point, the file counter already went to zero and fget() would fail.
  444. * The only hit might come from ep_free() but by holding the mutex
  445. * will correctly serialize the operation. We do need to acquire
  446. * "ep->mtx" after "epmutex" because ep_remove() requires it when called
  447. * from anywhere but ep_free().
  448. */
  449. mutex_lock(&epmutex);
  450. while (!list_empty(lsthead)) {
  451. epi = list_first_entry(lsthead, struct epitem, fllink);
  452. ep = epi->ep;
  453. list_del_init(&epi->fllink);
  454. mutex_lock(&ep->mtx);
  455. ep_remove(ep, epi);
  456. mutex_unlock(&ep->mtx);
  457. }
  458. mutex_unlock(&epmutex);
  459. }
  460. static int ep_alloc(struct eventpoll **pep)
  461. {
  462. struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  463. if (!ep)
  464. return -ENOMEM;
  465. spin_lock_init(&ep->lock);
  466. mutex_init(&ep->mtx);
  467. init_waitqueue_head(&ep->wq);
  468. init_waitqueue_head(&ep->poll_wait);
  469. INIT_LIST_HEAD(&ep->rdllist);
  470. ep->rbr = RB_ROOT;
  471. ep->ovflist = EP_UNACTIVE_PTR;
  472. *pep = ep;
  473. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n",
  474. current, ep));
  475. return 0;
  476. }
  477. /*
  478. * Search the file inside the eventpoll tree. The RB tree operations
  479. * are protected by the "mtx" mutex, and ep_find() must be called with
  480. * "mtx" held.
  481. */
  482. static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
  483. {
  484. int kcmp;
  485. struct rb_node *rbp;
  486. struct epitem *epi, *epir = NULL;
  487. struct epoll_filefd ffd;
  488. ep_set_ffd(&ffd, file, fd);
  489. for (rbp = ep->rbr.rb_node; rbp; ) {
  490. epi = rb_entry(rbp, struct epitem, rbn);
  491. kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
  492. if (kcmp > 0)
  493. rbp = rbp->rb_right;
  494. else if (kcmp < 0)
  495. rbp = rbp->rb_left;
  496. else {
  497. epir = epi;
  498. break;
  499. }
  500. }
  501. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
  502. current, file, epir));
  503. return epir;
  504. }
  505. /*
  506. * This is the callback that is passed to the wait queue wakeup
  507. * machanism. It is called by the stored file descriptors when they
  508. * have events to report.
  509. */
  510. static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
  511. {
  512. int pwake = 0;
  513. unsigned long flags;
  514. struct epitem *epi = ep_item_from_wait(wait);
  515. struct eventpoll *ep = epi->ep;
  516. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
  517. current, epi->ffd.file, epi, ep));
  518. spin_lock_irqsave(&ep->lock, flags);
  519. /*
  520. * If the event mask does not contain any poll(2) event, we consider the
  521. * descriptor to be disabled. This condition is likely the effect of the
  522. * EPOLLONESHOT bit that disables the descriptor when an event is received,
  523. * until the next EPOLL_CTL_MOD will be issued.
  524. */
  525. if (!(epi->event.events & ~EP_PRIVATE_BITS))
  526. goto out_unlock;
  527. /*
  528. * If we are trasfering events to userspace, we can hold no locks
  529. * (because we're accessing user memory, and because of linux f_op->poll()
  530. * semantics). All the events that happens during that period of time are
  531. * chained in ep->ovflist and requeued later on.
  532. */
  533. if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
  534. if (epi->next == EP_UNACTIVE_PTR) {
  535. epi->next = ep->ovflist;
  536. ep->ovflist = epi;
  537. }
  538. goto out_unlock;
  539. }
  540. /* If this file is already in the ready list we exit soon */
  541. if (ep_is_linked(&epi->rdllink))
  542. goto is_linked;
  543. list_add_tail(&epi->rdllink, &ep->rdllist);
  544. is_linked:
  545. /*
  546. * Wake up ( if active ) both the eventpoll wait list and the ->poll()
  547. * wait list.
  548. */
  549. if (waitqueue_active(&ep->wq))
  550. wake_up_locked(&ep->wq);
  551. if (waitqueue_active(&ep->poll_wait))
  552. pwake++;
  553. out_unlock:
  554. spin_unlock_irqrestore(&ep->lock, flags);
  555. /* We have to call this outside the lock */
  556. if (pwake)
  557. ep_poll_safewake(&psw, &ep->poll_wait);
  558. return 1;
  559. }
  560. /*
  561. * This is the callback that is used to add our wait queue to the
  562. * target file wakeup lists.
  563. */
  564. static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
  565. poll_table *pt)
  566. {
  567. struct epitem *epi = ep_item_from_epqueue(pt);
  568. struct eppoll_entry *pwq;
  569. if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
  570. init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
  571. pwq->whead = whead;
  572. pwq->base = epi;
  573. add_wait_queue(whead, &pwq->wait);
  574. list_add_tail(&pwq->llink, &epi->pwqlist);
  575. epi->nwait++;
  576. } else {
  577. /* We have to signal that an error occurred */
  578. epi->nwait = -1;
  579. }
  580. }
  581. static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
  582. {
  583. int kcmp;
  584. struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
  585. struct epitem *epic;
  586. while (*p) {
  587. parent = *p;
  588. epic = rb_entry(parent, struct epitem, rbn);
  589. kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
  590. if (kcmp > 0)
  591. p = &parent->rb_right;
  592. else
  593. p = &parent->rb_left;
  594. }
  595. rb_link_node(&epi->rbn, parent, p);
  596. rb_insert_color(&epi->rbn, &ep->rbr);
  597. }
  598. /*
  599. * Must be called with "mtx" held.
  600. */
  601. static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
  602. struct file *tfile, int fd)
  603. {
  604. int error, revents, pwake = 0;
  605. unsigned long flags;
  606. struct epitem *epi;
  607. struct ep_pqueue epq;
  608. error = -ENOMEM;
  609. if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
  610. goto error_return;
  611. /* Item initialization follow here ... */
  612. ep_rb_initnode(&epi->rbn);
  613. INIT_LIST_HEAD(&epi->rdllink);
  614. INIT_LIST_HEAD(&epi->fllink);
  615. INIT_LIST_HEAD(&epi->pwqlist);
  616. epi->ep = ep;
  617. ep_set_ffd(&epi->ffd, tfile, fd);
  618. epi->event = *event;
  619. epi->nwait = 0;
  620. epi->next = EP_UNACTIVE_PTR;
  621. /* Initialize the poll table using the queue callback */
  622. epq.epi = epi;
  623. init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
  624. /*
  625. * Attach the item to the poll hooks and get current event bits.
  626. * We can safely use the file* here because its usage count has
  627. * been increased by the caller of this function. Note that after
  628. * this operation completes, the poll callback can start hitting
  629. * the new item.
  630. */
  631. revents = tfile->f_op->poll(tfile, &epq.pt);
  632. /*
  633. * We have to check if something went wrong during the poll wait queue
  634. * install process. Namely an allocation for a wait queue failed due
  635. * high memory pressure.
  636. */
  637. if (epi->nwait < 0)
  638. goto error_unregister;
  639. /* Add the current item to the list of active epoll hook for this file */
  640. spin_lock(&tfile->f_ep_lock);
  641. list_add_tail(&epi->fllink, &tfile->f_ep_links);
  642. spin_unlock(&tfile->f_ep_lock);
  643. /*
  644. * Add the current item to the RB tree. All RB tree operations are
  645. * protected by "mtx", and ep_insert() is called with "mtx" held.
  646. */
  647. ep_rbtree_insert(ep, epi);
  648. /* We have to drop the new item inside our item list to keep track of it */
  649. spin_lock_irqsave(&ep->lock, flags);
  650. /* If the file is already "ready" we drop it inside the ready list */
  651. if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
  652. list_add_tail(&epi->rdllink, &ep->rdllist);
  653. /* Notify waiting tasks that events are available */
  654. if (waitqueue_active(&ep->wq))
  655. wake_up_locked(&ep->wq);
  656. if (waitqueue_active(&ep->poll_wait))
  657. pwake++;
  658. }
  659. spin_unlock_irqrestore(&ep->lock, flags);
  660. /* We have to call this outside the lock */
  661. if (pwake)
  662. ep_poll_safewake(&psw, &ep->poll_wait);
  663. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n",
  664. current, ep, tfile, fd));
  665. return 0;
  666. error_unregister:
  667. ep_unregister_pollwait(ep, epi);
  668. /*
  669. * We need to do this because an event could have been arrived on some
  670. * allocated wait queue. Note that we don't care about the ep->ovflist
  671. * list, since that is used/cleaned only inside a section bound by "mtx".
  672. * And ep_insert() is called with "mtx" held.
  673. */
  674. spin_lock_irqsave(&ep->lock, flags);
  675. if (ep_is_linked(&epi->rdllink))
  676. list_del_init(&epi->rdllink);
  677. spin_unlock_irqrestore(&ep->lock, flags);
  678. kmem_cache_free(epi_cache, epi);
  679. error_return:
  680. return error;
  681. }
  682. /*
  683. * Modify the interest event mask by dropping an event if the new mask
  684. * has a match in the current file status. Must be called with "mtx" held.
  685. */
  686. static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
  687. {
  688. int pwake = 0;
  689. unsigned int revents;
  690. unsigned long flags;
  691. /*
  692. * Set the new event interest mask before calling f_op->poll(), otherwise
  693. * a potential race might occur. In fact if we do this operation inside
  694. * the lock, an event might happen between the f_op->poll() call and the
  695. * new event set registering.
  696. */
  697. epi->event.events = event->events;
  698. /*
  699. * Get current event bits. We can safely use the file* here because
  700. * its usage count has been increased by the caller of this function.
  701. */
  702. revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
  703. spin_lock_irqsave(&ep->lock, flags);
  704. /* Copy the data member from inside the lock */
  705. epi->event.data = event->data;
  706. /*
  707. * If the item is "hot" and it is not registered inside the ready
  708. * list, push it inside.
  709. */
  710. if (revents & event->events) {
  711. if (!ep_is_linked(&epi->rdllink)) {
  712. list_add_tail(&epi->rdllink, &ep->rdllist);
  713. /* Notify waiting tasks that events are available */
  714. if (waitqueue_active(&ep->wq))
  715. wake_up_locked(&ep->wq);
  716. if (waitqueue_active(&ep->poll_wait))
  717. pwake++;
  718. }
  719. }
  720. spin_unlock_irqrestore(&ep->lock, flags);
  721. /* We have to call this outside the lock */
  722. if (pwake)
  723. ep_poll_safewake(&psw, &ep->poll_wait);
  724. return 0;
  725. }
  726. static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
  727. int maxevents)
  728. {
  729. int eventcnt, error = -EFAULT, pwake = 0;
  730. unsigned int revents;
  731. unsigned long flags;
  732. struct epitem *epi, *nepi;
  733. struct list_head txlist;
  734. INIT_LIST_HEAD(&txlist);
  735. /*
  736. * We need to lock this because we could be hit by
  737. * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
  738. */
  739. mutex_lock(&ep->mtx);
  740. /*
  741. * Steal the ready list, and re-init the original one to the
  742. * empty list. Also, set ep->ovflist to NULL so that events
  743. * happening while looping w/out locks, are not lost. We cannot
  744. * have the poll callback to queue directly on ep->rdllist,
  745. * because we are doing it in the loop below, in a lockless way.
  746. */
  747. spin_lock_irqsave(&ep->lock, flags);
  748. list_splice(&ep->rdllist, &txlist);
  749. INIT_LIST_HEAD(&ep->rdllist);
  750. ep->ovflist = NULL;
  751. spin_unlock_irqrestore(&ep->lock, flags);
  752. /*
  753. * We can loop without lock because this is a task private list.
  754. * We just splice'd out the ep->rdllist in ep_collect_ready_items().
  755. * Items cannot vanish during the loop because we are holding "mtx".
  756. */
  757. for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) {
  758. epi = list_first_entry(&txlist, struct epitem, rdllink);
  759. list_del_init(&epi->rdllink);
  760. /*
  761. * Get the ready file event set. We can safely use the file
  762. * because we are holding the "mtx" and this will guarantee
  763. * that both the file and the item will not vanish.
  764. */
  765. revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
  766. revents &= epi->event.events;
  767. /*
  768. * Is the event mask intersect the caller-requested one,
  769. * deliver the event to userspace. Again, we are holding
  770. * "mtx", so no operations coming from userspace can change
  771. * the item.
  772. */
  773. if (revents) {
  774. if (__put_user(revents,
  775. &events[eventcnt].events) ||
  776. __put_user(epi->event.data,
  777. &events[eventcnt].data))
  778. goto errxit;
  779. if (epi->event.events & EPOLLONESHOT)
  780. epi->event.events &= EP_PRIVATE_BITS;
  781. eventcnt++;
  782. }
  783. /*
  784. * At this point, noone can insert into ep->rdllist besides
  785. * us. The epoll_ctl() callers are locked out by us holding
  786. * "mtx" and the poll callback will queue them in ep->ovflist.
  787. */
  788. if (!(epi->event.events & EPOLLET) &&
  789. (revents & epi->event.events))
  790. list_add_tail(&epi->rdllink, &ep->rdllist);
  791. }
  792. error = 0;
  793. errxit:
  794. spin_lock_irqsave(&ep->lock, flags);
  795. /*
  796. * During the time we spent in the loop above, some other events
  797. * might have been queued by the poll callback. We re-insert them
  798. * here (in case they are not already queued, or they're one-shot).
  799. */
  800. for (nepi = ep->ovflist; (epi = nepi) != NULL;
  801. nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
  802. if (!ep_is_linked(&epi->rdllink) &&
  803. (epi->event.events & ~EP_PRIVATE_BITS))
  804. list_add_tail(&epi->rdllink, &ep->rdllist);
  805. }
  806. /*
  807. * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
  808. * releasing the lock, events will be queued in the normal way inside
  809. * ep->rdllist.
  810. */
  811. ep->ovflist = EP_UNACTIVE_PTR;
  812. /*
  813. * In case of error in the event-send loop, or in case the number of
  814. * ready events exceeds the userspace limit, we need to splice the
  815. * "txlist" back inside ep->rdllist.
  816. */
  817. list_splice(&txlist, &ep->rdllist);
  818. if (!list_empty(&ep->rdllist)) {
  819. /*
  820. * Wake up (if active) both the eventpoll wait list and the ->poll()
  821. * wait list (delayed after we release the lock).
  822. */
  823. if (waitqueue_active(&ep->wq))
  824. wake_up_locked(&ep->wq);
  825. if (waitqueue_active(&ep->poll_wait))
  826. pwake++;
  827. }
  828. spin_unlock_irqrestore(&ep->lock, flags);
  829. mutex_unlock(&ep->mtx);
  830. /* We have to call this outside the lock */
  831. if (pwake)
  832. ep_poll_safewake(&psw, &ep->poll_wait);
  833. return eventcnt == 0 ? error: eventcnt;
  834. }
  835. static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  836. int maxevents, long timeout)
  837. {
  838. int res, eavail;
  839. unsigned long flags;
  840. long jtimeout;
  841. wait_queue_t wait;
  842. /*
  843. * Calculate the timeout by checking for the "infinite" value ( -1 )
  844. * and the overflow condition. The passed timeout is in milliseconds,
  845. * that why (t * HZ) / 1000.
  846. */
  847. jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
  848. MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
  849. retry:
  850. spin_lock_irqsave(&ep->lock, flags);
  851. res = 0;
  852. if (list_empty(&ep->rdllist)) {
  853. /*
  854. * We don't have any available event to return to the caller.
  855. * We need to sleep here, and we will be wake up by
  856. * ep_poll_callback() when events will become available.
  857. */
  858. init_waitqueue_entry(&wait, current);
  859. wait.flags |= WQ_FLAG_EXCLUSIVE;
  860. __add_wait_queue(&ep->wq, &wait);
  861. for (;;) {
  862. /*
  863. * We don't want to sleep if the ep_poll_callback() sends us
  864. * a wakeup in between. That's why we set the task state
  865. * to TASK_INTERRUPTIBLE before doing the checks.
  866. */
  867. set_current_state(TASK_INTERRUPTIBLE);
  868. if (!list_empty(&ep->rdllist) || !jtimeout)
  869. break;
  870. if (signal_pending(current)) {
  871. res = -EINTR;
  872. break;
  873. }
  874. spin_unlock_irqrestore(&ep->lock, flags);
  875. jtimeout = schedule_timeout(jtimeout);
  876. spin_lock_irqsave(&ep->lock, flags);
  877. }
  878. __remove_wait_queue(&ep->wq, &wait);
  879. set_current_state(TASK_RUNNING);
  880. }
  881. /* Is it worth to try to dig for events ? */
  882. eavail = !list_empty(&ep->rdllist);
  883. spin_unlock_irqrestore(&ep->lock, flags);
  884. /*
  885. * Try to transfer events to user space. In case we get 0 events and
  886. * there's still timeout left over, we go trying again in search of
  887. * more luck.
  888. */
  889. if (!res && eavail &&
  890. !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
  891. goto retry;
  892. return res;
  893. }
  894. /*
  895. * It opens an eventpoll file descriptor. The "size" parameter is there
  896. * for historical reasons, when epoll was using an hash instead of an
  897. * RB tree. With the current implementation, the "size" parameter is ignored
  898. * (besides sanity checks).
  899. */
  900. asmlinkage long sys_epoll_create(int size)
  901. {
  902. int error, fd = -1;
  903. struct eventpoll *ep;
  904. struct inode *inode;
  905. struct file *file;
  906. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
  907. current, size));
  908. /*
  909. * Sanity check on the size parameter, and create the internal data
  910. * structure ( "struct eventpoll" ).
  911. */
  912. error = -EINVAL;
  913. if (size <= 0 || (error = ep_alloc(&ep)) != 0)
  914. goto error_return;
  915. /*
  916. * Creates all the items needed to setup an eventpoll file. That is,
  917. * a file structure, and inode and a free file descriptor.
  918. */
  919. error = anon_inode_getfd(&fd, &inode, &file, "[eventpoll]",
  920. &eventpoll_fops, ep);
  921. if (error)
  922. goto error_free;
  923. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
  924. current, size, fd));
  925. return fd;
  926. error_free:
  927. ep_free(ep);
  928. error_return:
  929. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
  930. current, size, error));
  931. return error;
  932. }
  933. /*
  934. * The following function implements the controller interface for
  935. * the eventpoll file that enables the insertion/removal/change of
  936. * file descriptors inside the interest set.
  937. */
  938. asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
  939. struct epoll_event __user *event)
  940. {
  941. int error;
  942. struct file *file, *tfile;
  943. struct eventpoll *ep;
  944. struct epitem *epi;
  945. struct epoll_event epds;
  946. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
  947. current, epfd, op, fd, event));
  948. error = -EFAULT;
  949. if (ep_op_has_event(op) &&
  950. copy_from_user(&epds, event, sizeof(struct epoll_event)))
  951. goto error_return;
  952. /* Get the "struct file *" for the eventpoll file */
  953. error = -EBADF;
  954. file = fget(epfd);
  955. if (!file)
  956. goto error_return;
  957. /* Get the "struct file *" for the target file */
  958. tfile = fget(fd);
  959. if (!tfile)
  960. goto error_fput;
  961. /* The target file descriptor must support poll */
  962. error = -EPERM;
  963. if (!tfile->f_op || !tfile->f_op->poll)
  964. goto error_tgt_fput;
  965. /*
  966. * We have to check that the file structure underneath the file descriptor
  967. * the user passed to us _is_ an eventpoll file. And also we do not permit
  968. * adding an epoll file descriptor inside itself.
  969. */
  970. error = -EINVAL;
  971. if (file == tfile || !is_file_epoll(file))
  972. goto error_tgt_fput;
  973. /*
  974. * At this point it is safe to assume that the "private_data" contains
  975. * our own data structure.
  976. */
  977. ep = file->private_data;
  978. mutex_lock(&ep->mtx);
  979. /*
  980. * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
  981. * above, we can be sure to be able to use the item looked up by
  982. * ep_find() till we release the mutex.
  983. */
  984. epi = ep_find(ep, tfile, fd);
  985. error = -EINVAL;
  986. switch (op) {
  987. case EPOLL_CTL_ADD:
  988. if (!epi) {
  989. epds.events |= POLLERR | POLLHUP;
  990. error = ep_insert(ep, &epds, tfile, fd);
  991. } else
  992. error = -EEXIST;
  993. break;
  994. case EPOLL_CTL_DEL:
  995. if (epi)
  996. error = ep_remove(ep, epi);
  997. else
  998. error = -ENOENT;
  999. break;
  1000. case EPOLL_CTL_MOD:
  1001. if (epi) {
  1002. epds.events |= POLLERR | POLLHUP;
  1003. error = ep_modify(ep, epi, &epds);
  1004. } else
  1005. error = -ENOENT;
  1006. break;
  1007. }
  1008. mutex_unlock(&ep->mtx);
  1009. error_tgt_fput:
  1010. fput(tfile);
  1011. error_fput:
  1012. fput(file);
  1013. error_return:
  1014. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
  1015. current, epfd, op, fd, event, error));
  1016. return error;
  1017. }
  1018. /*
  1019. * Implement the event wait interface for the eventpoll file. It is the kernel
  1020. * part of the user space epoll_wait(2).
  1021. */
  1022. asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
  1023. int maxevents, int timeout)
  1024. {
  1025. int error;
  1026. struct file *file;
  1027. struct eventpoll *ep;
  1028. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
  1029. current, epfd, events, maxevents, timeout));
  1030. /* The maximum number of event must be greater than zero */
  1031. if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
  1032. return -EINVAL;
  1033. /* Verify that the area passed by the user is writeable */
  1034. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
  1035. error = -EFAULT;
  1036. goto error_return;
  1037. }
  1038. /* Get the "struct file *" for the eventpoll file */
  1039. error = -EBADF;
  1040. file = fget(epfd);
  1041. if (!file)
  1042. goto error_return;
  1043. /*
  1044. * We have to check that the file structure underneath the fd
  1045. * the user passed to us _is_ an eventpoll file.
  1046. */
  1047. error = -EINVAL;
  1048. if (!is_file_epoll(file))
  1049. goto error_fput;
  1050. /*
  1051. * At this point it is safe to assume that the "private_data" contains
  1052. * our own data structure.
  1053. */
  1054. ep = file->private_data;
  1055. /* Time to fish for events ... */
  1056. error = ep_poll(ep, events, maxevents, timeout);
  1057. error_fput:
  1058. fput(file);
  1059. error_return:
  1060. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
  1061. current, epfd, events, maxevents, timeout, error));
  1062. return error;
  1063. }
  1064. #ifdef TIF_RESTORE_SIGMASK
  1065. /*
  1066. * Implement the event wait interface for the eventpoll file. It is the kernel
  1067. * part of the user space epoll_pwait(2).
  1068. */
  1069. asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
  1070. int maxevents, int timeout, const sigset_t __user *sigmask,
  1071. size_t sigsetsize)
  1072. {
  1073. int error;
  1074. sigset_t ksigmask, sigsaved;
  1075. /*
  1076. * If the caller wants a certain signal mask to be set during the wait,
  1077. * we apply it here.
  1078. */
  1079. if (sigmask) {
  1080. if (sigsetsize != sizeof(sigset_t))
  1081. return -EINVAL;
  1082. if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
  1083. return -EFAULT;
  1084. sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
  1085. sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
  1086. }
  1087. error = sys_epoll_wait(epfd, events, maxevents, timeout);
  1088. /*
  1089. * If we changed the signal mask, we need to restore the original one.
  1090. * In case we've got a signal while waiting, we do not restore the
  1091. * signal mask yet, and we allow do_signal() to deliver the signal on
  1092. * the way back to userspace, before the signal mask is restored.
  1093. */
  1094. if (sigmask) {
  1095. if (error == -EINTR) {
  1096. memcpy(&current->saved_sigmask, &sigsaved,
  1097. sizeof(sigsaved));
  1098. set_thread_flag(TIF_RESTORE_SIGMASK);
  1099. } else
  1100. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  1101. }
  1102. return error;
  1103. }
  1104. #endif /* #ifdef TIF_RESTORE_SIGMASK */
  1105. static int __init eventpoll_init(void)
  1106. {
  1107. mutex_init(&epmutex);
  1108. /* Initialize the structure used to perform safe poll wait head wake ups */
  1109. ep_poll_safewake_init(&psw);
  1110. /* Allocates slab cache used to allocate "struct epitem" items */
  1111. epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
  1112. 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
  1113. NULL);
  1114. /* Allocates slab cache used to allocate "struct eppoll_entry" */
  1115. pwq_cache = kmem_cache_create("eventpoll_pwq",
  1116. sizeof(struct eppoll_entry), 0,
  1117. EPI_SLAB_DEBUG|SLAB_PANIC, NULL);
  1118. return 0;
  1119. }
  1120. fs_initcall(eventpoll_init);