eventpoll.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. /*
  2. * fs/eventpoll.c ( Efficent event polling implementation )
  3. * Copyright (C) 2001,...,2003 Davide Libenzi
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * Davide Libenzi <davidel@xmailserver.org>
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/kernel.h>
  16. #include <linux/sched.h>
  17. #include <linux/fs.h>
  18. #include <linux/file.h>
  19. #include <linux/signal.h>
  20. #include <linux/errno.h>
  21. #include <linux/mm.h>
  22. #include <linux/slab.h>
  23. #include <linux/poll.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/string.h>
  26. #include <linux/list.h>
  27. #include <linux/hash.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/syscalls.h>
  30. #include <linux/rwsem.h>
  31. #include <linux/rbtree.h>
  32. #include <linux/wait.h>
  33. #include <linux/eventpoll.h>
  34. #include <linux/mount.h>
  35. #include <linux/bitops.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/system.h>
  38. #include <asm/io.h>
  39. #include <asm/mman.h>
  40. #include <asm/atomic.h>
  41. #include <asm/semaphore.h>
  42. /*
  43. * LOCKING:
  44. * There are three level of locking required by epoll :
  45. *
  46. * 1) epsem (semaphore)
  47. * 2) ep->sem (rw_semaphore)
  48. * 3) ep->lock (rw_lock)
  49. *
  50. * The acquire order is the one listed above, from 1 to 3.
  51. * We need a spinlock (ep->lock) because we manipulate objects
  52. * from inside the poll callback, that might be triggered from
  53. * a wake_up() that in turn might be called from IRQ context.
  54. * So we can't sleep inside the poll callback and hence we need
  55. * a spinlock. During the event transfer loop (from kernel to
  56. * user space) we could end up sleeping due a copy_to_user(), so
  57. * we need a lock that will allow us to sleep. This lock is a
  58. * read-write semaphore (ep->sem). It is acquired on read during
  59. * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
  60. * and during eventpoll_release_file(). Then we also need a global
  61. * semaphore to serialize eventpoll_release_file() and ep_free().
  62. * This semaphore is acquired by ep_free() during the epoll file
  63. * cleanup path and it is also acquired by eventpoll_release_file()
  64. * if a file has been pushed inside an epoll set and it is then
  65. * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
  66. * It is possible to drop the "ep->sem" and to use the global
  67. * semaphore "epsem" (together with "ep->lock") to have it working,
  68. * but having "ep->sem" will make the interface more scalable.
  69. * Events that require holding "epsem" are very rare, while for
  70. * normal operations the epoll private "ep->sem" will guarantee
  71. * a greater scalability.
  72. */
  73. #define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */
  74. #define DEBUG_EPOLL 0
  75. #if DEBUG_EPOLL > 0
  76. #define DPRINTK(x) printk x
  77. #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
  78. #else /* #if DEBUG_EPOLL > 0 */
  79. #define DPRINTK(x) (void) 0
  80. #define DNPRINTK(n, x) (void) 0
  81. #endif /* #if DEBUG_EPOLL > 0 */
  82. #define DEBUG_EPI 0
  83. #if DEBUG_EPI != 0
  84. #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
  85. #else /* #if DEBUG_EPI != 0 */
  86. #define EPI_SLAB_DEBUG 0
  87. #endif /* #if DEBUG_EPI != 0 */
  88. /* Epoll private bits inside the event mask */
  89. #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
  90. /* Maximum number of poll wake up nests we are allowing */
  91. #define EP_MAX_POLLWAKE_NESTS 4
  92. /* Macro to allocate a "struct epitem" from the slab cache */
  93. #define EPI_MEM_ALLOC() (struct epitem *) kmem_cache_alloc(epi_cache, SLAB_KERNEL)
  94. /* Macro to free a "struct epitem" to the slab cache */
  95. #define EPI_MEM_FREE(p) kmem_cache_free(epi_cache, p)
  96. /* Macro to allocate a "struct eppoll_entry" from the slab cache */
  97. #define PWQ_MEM_ALLOC() (struct eppoll_entry *) kmem_cache_alloc(pwq_cache, SLAB_KERNEL)
  98. /* Macro to free a "struct eppoll_entry" to the slab cache */
  99. #define PWQ_MEM_FREE(p) kmem_cache_free(pwq_cache, p)
  100. /* Fast test to see if the file is an evenpoll file */
  101. #define IS_FILE_EPOLL(f) ((f)->f_op == &eventpoll_fops)
  102. /* Setup the structure that is used as key for the rb-tree */
  103. #define EP_SET_FFD(p, f, d) do { (p)->file = (f); (p)->fd = (d); } while (0)
  104. /* Compare rb-tree keys */
  105. #define EP_CMP_FFD(p1, p2) ((p1)->file > (p2)->file ? +1: \
  106. ((p1)->file < (p2)->file ? -1: (p1)->fd - (p2)->fd))
  107. /* Special initialization for the rb-tree node to detect linkage */
  108. #define EP_RB_INITNODE(n) (n)->rb_parent = (n)
  109. /* Removes a node from the rb-tree and marks it for a fast is-linked check */
  110. #define EP_RB_ERASE(n, r) do { rb_erase(n, r); (n)->rb_parent = (n); } while (0)
  111. /* Fast check to verify that the item is linked to the main rb-tree */
  112. #define EP_RB_LINKED(n) ((n)->rb_parent != (n))
  113. /*
  114. * Remove the item from the list and perform its initialization.
  115. * This is useful for us because we can test if the item is linked
  116. * using "EP_IS_LINKED(p)".
  117. */
  118. #define EP_LIST_DEL(p) do { list_del(p); INIT_LIST_HEAD(p); } while (0)
  119. /* Tells us if the item is currently linked */
  120. #define EP_IS_LINKED(p) (!list_empty(p))
  121. /* Get the "struct epitem" from a wait queue pointer */
  122. #define EP_ITEM_FROM_WAIT(p) ((struct epitem *) container_of(p, struct eppoll_entry, wait)->base)
  123. /* Get the "struct epitem" from an epoll queue wrapper */
  124. #define EP_ITEM_FROM_EPQUEUE(p) (container_of(p, struct ep_pqueue, pt)->epi)
  125. /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
  126. #define EP_OP_HASH_EVENT(op) ((op) != EPOLL_CTL_DEL)
  127. struct epoll_filefd {
  128. struct file *file;
  129. int fd;
  130. };
  131. /*
  132. * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
  133. * It is used to keep track on all tasks that are currently inside the wake_up() code
  134. * to 1) short-circuit the one coming from the same task and same wait queue head
  135. * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
  136. * 3) let go the ones coming from other tasks.
  137. */
  138. struct wake_task_node {
  139. struct list_head llink;
  140. task_t *task;
  141. wait_queue_head_t *wq;
  142. };
  143. /*
  144. * This is used to implement the safe poll wake up avoiding to reenter
  145. * the poll callback from inside wake_up().
  146. */
  147. struct poll_safewake {
  148. struct list_head wake_task_list;
  149. spinlock_t lock;
  150. };
  151. /*
  152. * This structure is stored inside the "private_data" member of the file
  153. * structure and rapresent the main data sructure for the eventpoll
  154. * interface.
  155. */
  156. struct eventpoll {
  157. /* Protect the this structure access */
  158. rwlock_t lock;
  159. /*
  160. * This semaphore is used to ensure that files are not removed
  161. * while epoll is using them. This is read-held during the event
  162. * collection loop and it is write-held during the file cleanup
  163. * path, the epoll file exit code and the ctl operations.
  164. */
  165. struct rw_semaphore sem;
  166. /* Wait queue used by sys_epoll_wait() */
  167. wait_queue_head_t wq;
  168. /* Wait queue used by file->poll() */
  169. wait_queue_head_t poll_wait;
  170. /* List of ready file descriptors */
  171. struct list_head rdllist;
  172. /* RB-Tree root used to store monitored fd structs */
  173. struct rb_root rbr;
  174. };
  175. /* Wait structure used by the poll hooks */
  176. struct eppoll_entry {
  177. /* List header used to link this structure to the "struct epitem" */
  178. struct list_head llink;
  179. /* The "base" pointer is set to the container "struct epitem" */
  180. void *base;
  181. /*
  182. * Wait queue item that will be linked to the target file wait
  183. * queue head.
  184. */
  185. wait_queue_t wait;
  186. /* The wait queue head that linked the "wait" wait queue item */
  187. wait_queue_head_t *whead;
  188. };
  189. /*
  190. * Each file descriptor added to the eventpoll interface will
  191. * have an entry of this type linked to the hash.
  192. */
  193. struct epitem {
  194. /* RB-Tree node used to link this structure to the eventpoll rb-tree */
  195. struct rb_node rbn;
  196. /* List header used to link this structure to the eventpoll ready list */
  197. struct list_head rdllink;
  198. /* The file descriptor information this item refers to */
  199. struct epoll_filefd ffd;
  200. /* Number of active wait queue attached to poll operations */
  201. int nwait;
  202. /* List containing poll wait queues */
  203. struct list_head pwqlist;
  204. /* The "container" of this item */
  205. struct eventpoll *ep;
  206. /* The structure that describe the interested events and the source fd */
  207. struct epoll_event event;
  208. /*
  209. * Used to keep track of the usage count of the structure. This avoids
  210. * that the structure will desappear from underneath our processing.
  211. */
  212. atomic_t usecnt;
  213. /* List header used to link this item to the "struct file" items list */
  214. struct list_head fllink;
  215. /* List header used to link the item to the transfer list */
  216. struct list_head txlink;
  217. /*
  218. * This is used during the collection/transfer of events to userspace
  219. * to pin items empty events set.
  220. */
  221. unsigned int revents;
  222. };
  223. /* Wrapper struct used by poll queueing */
  224. struct ep_pqueue {
  225. poll_table pt;
  226. struct epitem *epi;
  227. };
  228. static void ep_poll_safewake_init(struct poll_safewake *psw);
  229. static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
  230. static int ep_getfd(int *efd, struct inode **einode, struct file **efile);
  231. static int ep_file_init(struct file *file);
  232. static void ep_free(struct eventpoll *ep);
  233. static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
  234. static void ep_use_epitem(struct epitem *epi);
  235. static void ep_release_epitem(struct epitem *epi);
  236. static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
  237. poll_table *pt);
  238. static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi);
  239. static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
  240. struct file *tfile, int fd);
  241. static int ep_modify(struct eventpoll *ep, struct epitem *epi,
  242. struct epoll_event *event);
  243. static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi);
  244. static int ep_unlink(struct eventpoll *ep, struct epitem *epi);
  245. static int ep_remove(struct eventpoll *ep, struct epitem *epi);
  246. static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key);
  247. static int ep_eventpoll_close(struct inode *inode, struct file *file);
  248. static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait);
  249. static int ep_collect_ready_items(struct eventpoll *ep,
  250. struct list_head *txlist, int maxevents);
  251. static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
  252. struct epoll_event __user *events);
  253. static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist);
  254. static int ep_events_transfer(struct eventpoll *ep,
  255. struct epoll_event __user *events,
  256. int maxevents);
  257. static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  258. int maxevents, long timeout);
  259. static int eventpollfs_delete_dentry(struct dentry *dentry);
  260. static struct inode *ep_eventpoll_inode(void);
  261. static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
  262. int flags, const char *dev_name,
  263. void *data);
  264. /*
  265. * This semaphore is used to serialize ep_free() and eventpoll_release_file().
  266. */
  267. static struct semaphore epsem;
  268. /* Safe wake up implementation */
  269. static struct poll_safewake psw;
  270. /* Slab cache used to allocate "struct epitem" */
  271. static kmem_cache_t *epi_cache;
  272. /* Slab cache used to allocate "struct eppoll_entry" */
  273. static kmem_cache_t *pwq_cache;
  274. /* Virtual fs used to allocate inodes for eventpoll files */
  275. static struct vfsmount *eventpoll_mnt;
  276. /* File callbacks that implement the eventpoll file behaviour */
  277. static struct file_operations eventpoll_fops = {
  278. .release = ep_eventpoll_close,
  279. .poll = ep_eventpoll_poll
  280. };
  281. /*
  282. * This is used to register the virtual file system from where
  283. * eventpoll inodes are allocated.
  284. */
  285. static struct file_system_type eventpoll_fs_type = {
  286. .name = "eventpollfs",
  287. .get_sb = eventpollfs_get_sb,
  288. .kill_sb = kill_anon_super,
  289. };
  290. /* Very basic directory entry operations for the eventpoll virtual file system */
  291. static struct dentry_operations eventpollfs_dentry_operations = {
  292. .d_delete = eventpollfs_delete_dentry,
  293. };
  294. /* Initialize the poll safe wake up structure */
  295. static void ep_poll_safewake_init(struct poll_safewake *psw)
  296. {
  297. INIT_LIST_HEAD(&psw->wake_task_list);
  298. spin_lock_init(&psw->lock);
  299. }
  300. /*
  301. * Perform a safe wake up of the poll wait list. The problem is that
  302. * with the new callback'd wake up system, it is possible that the
  303. * poll callback is reentered from inside the call to wake_up() done
  304. * on the poll wait queue head. The rule is that we cannot reenter the
  305. * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
  306. * and we cannot reenter the same wait queue head at all. This will
  307. * enable to have a hierarchy of epoll file descriptor of no more than
  308. * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
  309. * because this one gets called by the poll callback, that in turn is called
  310. * from inside a wake_up(), that might be called from irq context.
  311. */
  312. static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
  313. {
  314. int wake_nests = 0;
  315. unsigned long flags;
  316. task_t *this_task = current;
  317. struct list_head *lsthead = &psw->wake_task_list, *lnk;
  318. struct wake_task_node *tncur;
  319. struct wake_task_node tnode;
  320. spin_lock_irqsave(&psw->lock, flags);
  321. /* Try to see if the current task is already inside this wakeup call */
  322. list_for_each(lnk, lsthead) {
  323. tncur = list_entry(lnk, struct wake_task_node, llink);
  324. if (tncur->wq == wq ||
  325. (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
  326. /*
  327. * Ops ... loop detected or maximum nest level reached.
  328. * We abort this wake by breaking the cycle itself.
  329. */
  330. spin_unlock_irqrestore(&psw->lock, flags);
  331. return;
  332. }
  333. }
  334. /* Add the current task to the list */
  335. tnode.task = this_task;
  336. tnode.wq = wq;
  337. list_add(&tnode.llink, lsthead);
  338. spin_unlock_irqrestore(&psw->lock, flags);
  339. /* Do really wake up now */
  340. wake_up(wq);
  341. /* Remove the current task from the list */
  342. spin_lock_irqsave(&psw->lock, flags);
  343. list_del(&tnode.llink);
  344. spin_unlock_irqrestore(&psw->lock, flags);
  345. }
  346. /* Used to initialize the epoll bits inside the "struct file" */
  347. void eventpoll_init_file(struct file *file)
  348. {
  349. INIT_LIST_HEAD(&file->f_ep_links);
  350. spin_lock_init(&file->f_ep_lock);
  351. }
  352. /*
  353. * This is called from eventpoll_release() to unlink files from the eventpoll
  354. * interface. We need to have this facility to cleanup correctly files that are
  355. * closed without being removed from the eventpoll interface.
  356. */
  357. void eventpoll_release_file(struct file *file)
  358. {
  359. struct list_head *lsthead = &file->f_ep_links;
  360. struct eventpoll *ep;
  361. struct epitem *epi;
  362. /*
  363. * We don't want to get "file->f_ep_lock" because it is not
  364. * necessary. It is not necessary because we're in the "struct file"
  365. * cleanup path, and this means that noone is using this file anymore.
  366. * The only hit might come from ep_free() but by holding the semaphore
  367. * will correctly serialize the operation. We do need to acquire
  368. * "ep->sem" after "epsem" because ep_remove() requires it when called
  369. * from anywhere but ep_free().
  370. */
  371. down(&epsem);
  372. while (!list_empty(lsthead)) {
  373. epi = list_entry(lsthead->next, struct epitem, fllink);
  374. ep = epi->ep;
  375. EP_LIST_DEL(&epi->fllink);
  376. down_write(&ep->sem);
  377. ep_remove(ep, epi);
  378. up_write(&ep->sem);
  379. }
  380. up(&epsem);
  381. }
  382. /*
  383. * It opens an eventpoll file descriptor by suggesting a storage of "size"
  384. * file descriptors. The size parameter is just an hint about how to size
  385. * data structures. It won't prevent the user to store more than "size"
  386. * file descriptors inside the epoll interface. It is the kernel part of
  387. * the userspace epoll_create(2).
  388. */
  389. asmlinkage long sys_epoll_create(int size)
  390. {
  391. int error, fd;
  392. struct inode *inode;
  393. struct file *file;
  394. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
  395. current, size));
  396. /* Sanity check on the size parameter */
  397. error = -EINVAL;
  398. if (size <= 0)
  399. goto eexit_1;
  400. /*
  401. * Creates all the items needed to setup an eventpoll file. That is,
  402. * a file structure, and inode and a free file descriptor.
  403. */
  404. error = ep_getfd(&fd, &inode, &file);
  405. if (error)
  406. goto eexit_1;
  407. /* Setup the file internal data structure ( "struct eventpoll" ) */
  408. error = ep_file_init(file);
  409. if (error)
  410. goto eexit_2;
  411. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
  412. current, size, fd));
  413. return fd;
  414. eexit_2:
  415. sys_close(fd);
  416. eexit_1:
  417. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
  418. current, size, error));
  419. return error;
  420. }
  421. /*
  422. * The following function implements the controller interface for
  423. * the eventpoll file that enables the insertion/removal/change of
  424. * file descriptors inside the interest set. It represents
  425. * the kernel part of the user space epoll_ctl(2).
  426. */
  427. asmlinkage long
  428. sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event)
  429. {
  430. int error;
  431. struct file *file, *tfile;
  432. struct eventpoll *ep;
  433. struct epitem *epi;
  434. struct epoll_event epds;
  435. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
  436. current, epfd, op, fd, event));
  437. error = -EFAULT;
  438. if (EP_OP_HASH_EVENT(op) &&
  439. copy_from_user(&epds, event, sizeof(struct epoll_event)))
  440. goto eexit_1;
  441. /* Get the "struct file *" for the eventpoll file */
  442. error = -EBADF;
  443. file = fget(epfd);
  444. if (!file)
  445. goto eexit_1;
  446. /* Get the "struct file *" for the target file */
  447. tfile = fget(fd);
  448. if (!tfile)
  449. goto eexit_2;
  450. /* The target file descriptor must support poll */
  451. error = -EPERM;
  452. if (!tfile->f_op || !tfile->f_op->poll)
  453. goto eexit_3;
  454. /*
  455. * We have to check that the file structure underneath the file descriptor
  456. * the user passed to us _is_ an eventpoll file. And also we do not permit
  457. * adding an epoll file descriptor inside itself.
  458. */
  459. error = -EINVAL;
  460. if (file == tfile || !IS_FILE_EPOLL(file))
  461. goto eexit_3;
  462. /*
  463. * At this point it is safe to assume that the "private_data" contains
  464. * our own data structure.
  465. */
  466. ep = file->private_data;
  467. down_write(&ep->sem);
  468. /* Try to lookup the file inside our hash table */
  469. epi = ep_find(ep, tfile, fd);
  470. error = -EINVAL;
  471. switch (op) {
  472. case EPOLL_CTL_ADD:
  473. if (!epi) {
  474. epds.events |= POLLERR | POLLHUP;
  475. error = ep_insert(ep, &epds, tfile, fd);
  476. } else
  477. error = -EEXIST;
  478. break;
  479. case EPOLL_CTL_DEL:
  480. if (epi)
  481. error = ep_remove(ep, epi);
  482. else
  483. error = -ENOENT;
  484. break;
  485. case EPOLL_CTL_MOD:
  486. if (epi) {
  487. epds.events |= POLLERR | POLLHUP;
  488. error = ep_modify(ep, epi, &epds);
  489. } else
  490. error = -ENOENT;
  491. break;
  492. }
  493. /*
  494. * The function ep_find() increments the usage count of the structure
  495. * so, if this is not NULL, we need to release it.
  496. */
  497. if (epi)
  498. ep_release_epitem(epi);
  499. up_write(&ep->sem);
  500. eexit_3:
  501. fput(tfile);
  502. eexit_2:
  503. fput(file);
  504. eexit_1:
  505. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
  506. current, epfd, op, fd, event, error));
  507. return error;
  508. }
  509. #define MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
  510. /*
  511. * Implement the event wait interface for the eventpoll file. It is the kernel
  512. * part of the user space epoll_wait(2).
  513. */
  514. asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
  515. int maxevents, int timeout)
  516. {
  517. int error;
  518. struct file *file;
  519. struct eventpoll *ep;
  520. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
  521. current, epfd, events, maxevents, timeout));
  522. /* The maximum number of event must be greater than zero */
  523. if (maxevents <= 0 || maxevents > MAX_EVENTS)
  524. return -EINVAL;
  525. /* Verify that the area passed by the user is writeable */
  526. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
  527. error = -EFAULT;
  528. goto eexit_1;
  529. }
  530. /* Get the "struct file *" for the eventpoll file */
  531. error = -EBADF;
  532. file = fget(epfd);
  533. if (!file)
  534. goto eexit_1;
  535. /*
  536. * We have to check that the file structure underneath the fd
  537. * the user passed to us _is_ an eventpoll file.
  538. */
  539. error = -EINVAL;
  540. if (!IS_FILE_EPOLL(file))
  541. goto eexit_2;
  542. /*
  543. * At this point it is safe to assume that the "private_data" contains
  544. * our own data structure.
  545. */
  546. ep = file->private_data;
  547. /* Time to fish for events ... */
  548. error = ep_poll(ep, events, maxevents, timeout);
  549. eexit_2:
  550. fput(file);
  551. eexit_1:
  552. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
  553. current, epfd, events, maxevents, timeout, error));
  554. return error;
  555. }
  556. /*
  557. * Creates the file descriptor to be used by the epoll interface.
  558. */
  559. static int ep_getfd(int *efd, struct inode **einode, struct file **efile)
  560. {
  561. struct qstr this;
  562. char name[32];
  563. struct dentry *dentry;
  564. struct inode *inode;
  565. struct file *file;
  566. int error, fd;
  567. /* Get an ready to use file */
  568. error = -ENFILE;
  569. file = get_empty_filp();
  570. if (!file)
  571. goto eexit_1;
  572. /* Allocates an inode from the eventpoll file system */
  573. inode = ep_eventpoll_inode();
  574. error = PTR_ERR(inode);
  575. if (IS_ERR(inode))
  576. goto eexit_2;
  577. /* Allocates a free descriptor to plug the file onto */
  578. error = get_unused_fd();
  579. if (error < 0)
  580. goto eexit_3;
  581. fd = error;
  582. /*
  583. * Link the inode to a directory entry by creating a unique name
  584. * using the inode number.
  585. */
  586. error = -ENOMEM;
  587. sprintf(name, "[%lu]", inode->i_ino);
  588. this.name = name;
  589. this.len = strlen(name);
  590. this.hash = inode->i_ino;
  591. dentry = d_alloc(eventpoll_mnt->mnt_sb->s_root, &this);
  592. if (!dentry)
  593. goto eexit_4;
  594. dentry->d_op = &eventpollfs_dentry_operations;
  595. d_add(dentry, inode);
  596. file->f_vfsmnt = mntget(eventpoll_mnt);
  597. file->f_dentry = dentry;
  598. file->f_mapping = inode->i_mapping;
  599. file->f_pos = 0;
  600. file->f_flags = O_RDONLY;
  601. file->f_op = &eventpoll_fops;
  602. file->f_mode = FMODE_READ;
  603. file->f_version = 0;
  604. file->private_data = NULL;
  605. /* Install the new setup file into the allocated fd. */
  606. fd_install(fd, file);
  607. *efd = fd;
  608. *einode = inode;
  609. *efile = file;
  610. return 0;
  611. eexit_4:
  612. put_unused_fd(fd);
  613. eexit_3:
  614. iput(inode);
  615. eexit_2:
  616. put_filp(file);
  617. eexit_1:
  618. return error;
  619. }
  620. static int ep_file_init(struct file *file)
  621. {
  622. struct eventpoll *ep;
  623. if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL)))
  624. return -ENOMEM;
  625. memset(ep, 0, sizeof(*ep));
  626. rwlock_init(&ep->lock);
  627. init_rwsem(&ep->sem);
  628. init_waitqueue_head(&ep->wq);
  629. init_waitqueue_head(&ep->poll_wait);
  630. INIT_LIST_HEAD(&ep->rdllist);
  631. ep->rbr = RB_ROOT;
  632. file->private_data = ep;
  633. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_file_init() ep=%p\n",
  634. current, ep));
  635. return 0;
  636. }
  637. static void ep_free(struct eventpoll *ep)
  638. {
  639. struct rb_node *rbp;
  640. struct epitem *epi;
  641. /* We need to release all tasks waiting for these file */
  642. if (waitqueue_active(&ep->poll_wait))
  643. ep_poll_safewake(&psw, &ep->poll_wait);
  644. /*
  645. * We need to lock this because we could be hit by
  646. * eventpoll_release_file() while we're freeing the "struct eventpoll".
  647. * We do not need to hold "ep->sem" here because the epoll file
  648. * is on the way to be removed and no one has references to it
  649. * anymore. The only hit might come from eventpoll_release_file() but
  650. * holding "epsem" is sufficent here.
  651. */
  652. down(&epsem);
  653. /*
  654. * Walks through the whole tree by unregistering poll callbacks.
  655. */
  656. for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  657. epi = rb_entry(rbp, struct epitem, rbn);
  658. ep_unregister_pollwait(ep, epi);
  659. }
  660. /*
  661. * Walks through the whole hash by freeing each "struct epitem". At this
  662. * point we are sure no poll callbacks will be lingering around, and also by
  663. * write-holding "sem" we can be sure that no file cleanup code will hit
  664. * us during this operation. So we can avoid the lock on "ep->lock".
  665. */
  666. while ((rbp = rb_first(&ep->rbr)) != 0) {
  667. epi = rb_entry(rbp, struct epitem, rbn);
  668. ep_remove(ep, epi);
  669. }
  670. up(&epsem);
  671. }
  672. /*
  673. * Search the file inside the eventpoll hash. It add usage count to
  674. * the returned item, so the caller must call ep_release_epitem()
  675. * after finished using the "struct epitem".
  676. */
  677. static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
  678. {
  679. int kcmp;
  680. unsigned long flags;
  681. struct rb_node *rbp;
  682. struct epitem *epi, *epir = NULL;
  683. struct epoll_filefd ffd;
  684. EP_SET_FFD(&ffd, file, fd);
  685. read_lock_irqsave(&ep->lock, flags);
  686. for (rbp = ep->rbr.rb_node; rbp; ) {
  687. epi = rb_entry(rbp, struct epitem, rbn);
  688. kcmp = EP_CMP_FFD(&ffd, &epi->ffd);
  689. if (kcmp > 0)
  690. rbp = rbp->rb_right;
  691. else if (kcmp < 0)
  692. rbp = rbp->rb_left;
  693. else {
  694. ep_use_epitem(epi);
  695. epir = epi;
  696. break;
  697. }
  698. }
  699. read_unlock_irqrestore(&ep->lock, flags);
  700. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
  701. current, file, epir));
  702. return epir;
  703. }
  704. /*
  705. * Increment the usage count of the "struct epitem" making it sure
  706. * that the user will have a valid pointer to reference.
  707. */
  708. static void ep_use_epitem(struct epitem *epi)
  709. {
  710. atomic_inc(&epi->usecnt);
  711. }
  712. /*
  713. * Decrement ( release ) the usage count by signaling that the user
  714. * has finished using the structure. It might lead to freeing the
  715. * structure itself if the count goes to zero.
  716. */
  717. static void ep_release_epitem(struct epitem *epi)
  718. {
  719. if (atomic_dec_and_test(&epi->usecnt))
  720. EPI_MEM_FREE(epi);
  721. }
  722. /*
  723. * This is the callback that is used to add our wait queue to the
  724. * target file wakeup lists.
  725. */
  726. static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
  727. poll_table *pt)
  728. {
  729. struct epitem *epi = EP_ITEM_FROM_EPQUEUE(pt);
  730. struct eppoll_entry *pwq;
  731. if (epi->nwait >= 0 && (pwq = PWQ_MEM_ALLOC())) {
  732. init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
  733. pwq->whead = whead;
  734. pwq->base = epi;
  735. add_wait_queue(whead, &pwq->wait);
  736. list_add_tail(&pwq->llink, &epi->pwqlist);
  737. epi->nwait++;
  738. } else {
  739. /* We have to signal that an error occurred */
  740. epi->nwait = -1;
  741. }
  742. }
  743. static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
  744. {
  745. int kcmp;
  746. struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
  747. struct epitem *epic;
  748. while (*p) {
  749. parent = *p;
  750. epic = rb_entry(parent, struct epitem, rbn);
  751. kcmp = EP_CMP_FFD(&epi->ffd, &epic->ffd);
  752. if (kcmp > 0)
  753. p = &parent->rb_right;
  754. else
  755. p = &parent->rb_left;
  756. }
  757. rb_link_node(&epi->rbn, parent, p);
  758. rb_insert_color(&epi->rbn, &ep->rbr);
  759. }
  760. static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
  761. struct file *tfile, int fd)
  762. {
  763. int error, revents, pwake = 0;
  764. unsigned long flags;
  765. struct epitem *epi;
  766. struct ep_pqueue epq;
  767. error = -ENOMEM;
  768. if (!(epi = EPI_MEM_ALLOC()))
  769. goto eexit_1;
  770. /* Item initialization follow here ... */
  771. EP_RB_INITNODE(&epi->rbn);
  772. INIT_LIST_HEAD(&epi->rdllink);
  773. INIT_LIST_HEAD(&epi->fllink);
  774. INIT_LIST_HEAD(&epi->txlink);
  775. INIT_LIST_HEAD(&epi->pwqlist);
  776. epi->ep = ep;
  777. EP_SET_FFD(&epi->ffd, tfile, fd);
  778. epi->event = *event;
  779. atomic_set(&epi->usecnt, 1);
  780. epi->nwait = 0;
  781. /* Initialize the poll table using the queue callback */
  782. epq.epi = epi;
  783. init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
  784. /*
  785. * Attach the item to the poll hooks and get current event bits.
  786. * We can safely use the file* here because its usage count has
  787. * been increased by the caller of this function.
  788. */
  789. revents = tfile->f_op->poll(tfile, &epq.pt);
  790. /*
  791. * We have to check if something went wrong during the poll wait queue
  792. * install process. Namely an allocation for a wait queue failed due
  793. * high memory pressure.
  794. */
  795. if (epi->nwait < 0)
  796. goto eexit_2;
  797. /* Add the current item to the list of active epoll hook for this file */
  798. spin_lock(&tfile->f_ep_lock);
  799. list_add_tail(&epi->fllink, &tfile->f_ep_links);
  800. spin_unlock(&tfile->f_ep_lock);
  801. /* We have to drop the new item inside our item list to keep track of it */
  802. write_lock_irqsave(&ep->lock, flags);
  803. /* Add the current item to the rb-tree */
  804. ep_rbtree_insert(ep, epi);
  805. /* If the file is already "ready" we drop it inside the ready list */
  806. if ((revents & event->events) && !EP_IS_LINKED(&epi->rdllink)) {
  807. list_add_tail(&epi->rdllink, &ep->rdllist);
  808. /* Notify waiting tasks that events are available */
  809. if (waitqueue_active(&ep->wq))
  810. wake_up(&ep->wq);
  811. if (waitqueue_active(&ep->poll_wait))
  812. pwake++;
  813. }
  814. write_unlock_irqrestore(&ep->lock, flags);
  815. /* We have to call this outside the lock */
  816. if (pwake)
  817. ep_poll_safewake(&psw, &ep->poll_wait);
  818. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n",
  819. current, ep, tfile, fd));
  820. return 0;
  821. eexit_2:
  822. ep_unregister_pollwait(ep, epi);
  823. /*
  824. * We need to do this because an event could have been arrived on some
  825. * allocated wait queue.
  826. */
  827. write_lock_irqsave(&ep->lock, flags);
  828. if (EP_IS_LINKED(&epi->rdllink))
  829. EP_LIST_DEL(&epi->rdllink);
  830. write_unlock_irqrestore(&ep->lock, flags);
  831. EPI_MEM_FREE(epi);
  832. eexit_1:
  833. return error;
  834. }
  835. /*
  836. * Modify the interest event mask by dropping an event if the new mask
  837. * has a match in the current file status.
  838. */
  839. static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
  840. {
  841. int pwake = 0;
  842. unsigned int revents;
  843. unsigned long flags;
  844. /*
  845. * Set the new event interest mask before calling f_op->poll(), otherwise
  846. * a potential race might occur. In fact if we do this operation inside
  847. * the lock, an event might happen between the f_op->poll() call and the
  848. * new event set registering.
  849. */
  850. epi->event.events = event->events;
  851. /*
  852. * Get current event bits. We can safely use the file* here because
  853. * its usage count has been increased by the caller of this function.
  854. */
  855. revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
  856. write_lock_irqsave(&ep->lock, flags);
  857. /* Copy the data member from inside the lock */
  858. epi->event.data = event->data;
  859. /*
  860. * If the item is not linked to the hash it means that it's on its
  861. * way toward the removal. Do nothing in this case.
  862. */
  863. if (EP_RB_LINKED(&epi->rbn)) {
  864. /*
  865. * If the item is "hot" and it is not registered inside the ready
  866. * list, push it inside. If the item is not "hot" and it is currently
  867. * registered inside the ready list, unlink it.
  868. */
  869. if (revents & event->events) {
  870. if (!EP_IS_LINKED(&epi->rdllink)) {
  871. list_add_tail(&epi->rdllink, &ep->rdllist);
  872. /* Notify waiting tasks that events are available */
  873. if (waitqueue_active(&ep->wq))
  874. wake_up(&ep->wq);
  875. if (waitqueue_active(&ep->poll_wait))
  876. pwake++;
  877. }
  878. }
  879. }
  880. write_unlock_irqrestore(&ep->lock, flags);
  881. /* We have to call this outside the lock */
  882. if (pwake)
  883. ep_poll_safewake(&psw, &ep->poll_wait);
  884. return 0;
  885. }
  886. /*
  887. * This function unregister poll callbacks from the associated file descriptor.
  888. * Since this must be called without holding "ep->lock" the atomic exchange trick
  889. * will protect us from multiple unregister.
  890. */
  891. static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
  892. {
  893. int nwait;
  894. struct list_head *lsthead = &epi->pwqlist;
  895. struct eppoll_entry *pwq;
  896. /* This is called without locks, so we need the atomic exchange */
  897. nwait = xchg(&epi->nwait, 0);
  898. if (nwait) {
  899. while (!list_empty(lsthead)) {
  900. pwq = list_entry(lsthead->next, struct eppoll_entry, llink);
  901. EP_LIST_DEL(&pwq->llink);
  902. remove_wait_queue(pwq->whead, &pwq->wait);
  903. PWQ_MEM_FREE(pwq);
  904. }
  905. }
  906. }
  907. /*
  908. * Unlink the "struct epitem" from all places it might have been hooked up.
  909. * This function must be called with write IRQ lock on "ep->lock".
  910. */
  911. static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
  912. {
  913. int error;
  914. /*
  915. * It can happen that this one is called for an item already unlinked.
  916. * The check protect us from doing a double unlink ( crash ).
  917. */
  918. error = -ENOENT;
  919. if (!EP_RB_LINKED(&epi->rbn))
  920. goto eexit_1;
  921. /*
  922. * Clear the event mask for the unlinked item. This will avoid item
  923. * notifications to be sent after the unlink operation from inside
  924. * the kernel->userspace event transfer loop.
  925. */
  926. epi->event.events = 0;
  927. /*
  928. * At this point is safe to do the job, unlink the item from our rb-tree.
  929. * This operation togheter with the above check closes the door to
  930. * double unlinks.
  931. */
  932. EP_RB_ERASE(&epi->rbn, &ep->rbr);
  933. /*
  934. * If the item we are going to remove is inside the ready file descriptors
  935. * we want to remove it from this list to avoid stale events.
  936. */
  937. if (EP_IS_LINKED(&epi->rdllink))
  938. EP_LIST_DEL(&epi->rdllink);
  939. error = 0;
  940. eexit_1:
  941. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
  942. current, ep, epi->file, error));
  943. return error;
  944. }
  945. /*
  946. * Removes a "struct epitem" from the eventpoll hash and deallocates
  947. * all the associated resources.
  948. */
  949. static int ep_remove(struct eventpoll *ep, struct epitem *epi)
  950. {
  951. int error;
  952. unsigned long flags;
  953. struct file *file = epi->ffd.file;
  954. /*
  955. * Removes poll wait queue hooks. We _have_ to do this without holding
  956. * the "ep->lock" otherwise a deadlock might occur. This because of the
  957. * sequence of the lock acquisition. Here we do "ep->lock" then the wait
  958. * queue head lock when unregistering the wait queue. The wakeup callback
  959. * will run by holding the wait queue head lock and will call our callback
  960. * that will try to get "ep->lock".
  961. */
  962. ep_unregister_pollwait(ep, epi);
  963. /* Remove the current item from the list of epoll hooks */
  964. spin_lock(&file->f_ep_lock);
  965. if (EP_IS_LINKED(&epi->fllink))
  966. EP_LIST_DEL(&epi->fllink);
  967. spin_unlock(&file->f_ep_lock);
  968. /* We need to acquire the write IRQ lock before calling ep_unlink() */
  969. write_lock_irqsave(&ep->lock, flags);
  970. /* Really unlink the item from the hash */
  971. error = ep_unlink(ep, epi);
  972. write_unlock_irqrestore(&ep->lock, flags);
  973. if (error)
  974. goto eexit_1;
  975. /* At this point it is safe to free the eventpoll item */
  976. ep_release_epitem(epi);
  977. error = 0;
  978. eexit_1:
  979. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
  980. current, ep, file, error));
  981. return error;
  982. }
  983. /*
  984. * This is the callback that is passed to the wait queue wakeup
  985. * machanism. It is called by the stored file descriptors when they
  986. * have events to report.
  987. */
  988. static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
  989. {
  990. int pwake = 0;
  991. unsigned long flags;
  992. struct epitem *epi = EP_ITEM_FROM_WAIT(wait);
  993. struct eventpoll *ep = epi->ep;
  994. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
  995. current, epi->file, epi, ep));
  996. write_lock_irqsave(&ep->lock, flags);
  997. /*
  998. * If the event mask does not contain any poll(2) event, we consider the
  999. * descriptor to be disabled. This condition is likely the effect of the
  1000. * EPOLLONESHOT bit that disables the descriptor when an event is received,
  1001. * until the next EPOLL_CTL_MOD will be issued.
  1002. */
  1003. if (!(epi->event.events & ~EP_PRIVATE_BITS))
  1004. goto is_disabled;
  1005. /* If this file is already in the ready list we exit soon */
  1006. if (EP_IS_LINKED(&epi->rdllink))
  1007. goto is_linked;
  1008. list_add_tail(&epi->rdllink, &ep->rdllist);
  1009. is_linked:
  1010. /*
  1011. * Wake up ( if active ) both the eventpoll wait list and the ->poll()
  1012. * wait list.
  1013. */
  1014. if (waitqueue_active(&ep->wq))
  1015. wake_up(&ep->wq);
  1016. if (waitqueue_active(&ep->poll_wait))
  1017. pwake++;
  1018. is_disabled:
  1019. write_unlock_irqrestore(&ep->lock, flags);
  1020. /* We have to call this outside the lock */
  1021. if (pwake)
  1022. ep_poll_safewake(&psw, &ep->poll_wait);
  1023. return 1;
  1024. }
  1025. static int ep_eventpoll_close(struct inode *inode, struct file *file)
  1026. {
  1027. struct eventpoll *ep = file->private_data;
  1028. if (ep) {
  1029. ep_free(ep);
  1030. kfree(ep);
  1031. }
  1032. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
  1033. return 0;
  1034. }
  1035. static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
  1036. {
  1037. unsigned int pollflags = 0;
  1038. unsigned long flags;
  1039. struct eventpoll *ep = file->private_data;
  1040. /* Insert inside our poll wait queue */
  1041. poll_wait(file, &ep->poll_wait, wait);
  1042. /* Check our condition */
  1043. read_lock_irqsave(&ep->lock, flags);
  1044. if (!list_empty(&ep->rdllist))
  1045. pollflags = POLLIN | POLLRDNORM;
  1046. read_unlock_irqrestore(&ep->lock, flags);
  1047. return pollflags;
  1048. }
  1049. /*
  1050. * Since we have to release the lock during the __copy_to_user() operation and
  1051. * during the f_op->poll() call, we try to collect the maximum number of items
  1052. * by reducing the irqlock/irqunlock switching rate.
  1053. */
  1054. static int ep_collect_ready_items(struct eventpoll *ep, struct list_head *txlist, int maxevents)
  1055. {
  1056. int nepi;
  1057. unsigned long flags;
  1058. struct list_head *lsthead = &ep->rdllist, *lnk;
  1059. struct epitem *epi;
  1060. write_lock_irqsave(&ep->lock, flags);
  1061. for (nepi = 0, lnk = lsthead->next; lnk != lsthead && nepi < maxevents;) {
  1062. epi = list_entry(lnk, struct epitem, rdllink);
  1063. lnk = lnk->next;
  1064. /* If this file is already in the ready list we exit soon */
  1065. if (!EP_IS_LINKED(&epi->txlink)) {
  1066. /*
  1067. * This is initialized in this way so that the default
  1068. * behaviour of the reinjecting code will be to push back
  1069. * the item inside the ready list.
  1070. */
  1071. epi->revents = epi->event.events;
  1072. /* Link the ready item into the transfer list */
  1073. list_add(&epi->txlink, txlist);
  1074. nepi++;
  1075. /*
  1076. * Unlink the item from the ready list.
  1077. */
  1078. EP_LIST_DEL(&epi->rdllink);
  1079. }
  1080. }
  1081. write_unlock_irqrestore(&ep->lock, flags);
  1082. return nepi;
  1083. }
  1084. /*
  1085. * This function is called without holding the "ep->lock" since the call to
  1086. * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
  1087. * because of the way poll() is traditionally implemented in Linux.
  1088. */
  1089. static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
  1090. struct epoll_event __user *events)
  1091. {
  1092. int eventcnt = 0;
  1093. unsigned int revents;
  1094. struct list_head *lnk;
  1095. struct epitem *epi;
  1096. /*
  1097. * We can loop without lock because this is a task private list.
  1098. * The test done during the collection loop will guarantee us that
  1099. * another task will not try to collect this file. Also, items
  1100. * cannot vanish during the loop because we are holding "sem".
  1101. */
  1102. list_for_each(lnk, txlist) {
  1103. epi = list_entry(lnk, struct epitem, txlink);
  1104. /*
  1105. * Get the ready file event set. We can safely use the file
  1106. * because we are holding the "sem" in read and this will
  1107. * guarantee that both the file and the item will not vanish.
  1108. */
  1109. revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
  1110. /*
  1111. * Set the return event set for the current file descriptor.
  1112. * Note that only the task task was successfully able to link
  1113. * the item to its "txlist" will write this field.
  1114. */
  1115. epi->revents = revents & epi->event.events;
  1116. if (epi->revents) {
  1117. if (__put_user(epi->revents,
  1118. &events[eventcnt].events) ||
  1119. __put_user(epi->event.data,
  1120. &events[eventcnt].data))
  1121. return -EFAULT;
  1122. if (epi->event.events & EPOLLONESHOT)
  1123. epi->event.events &= EP_PRIVATE_BITS;
  1124. eventcnt++;
  1125. }
  1126. }
  1127. return eventcnt;
  1128. }
  1129. /*
  1130. * Walk through the transfer list we collected with ep_collect_ready_items()
  1131. * and, if 1) the item is still "alive" 2) its event set is not empty 3) it's
  1132. * not already linked, links it to the ready list. Same as above, we are holding
  1133. * "sem" so items cannot vanish underneath our nose.
  1134. */
  1135. static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist)
  1136. {
  1137. int ricnt = 0, pwake = 0;
  1138. unsigned long flags;
  1139. struct epitem *epi;
  1140. write_lock_irqsave(&ep->lock, flags);
  1141. while (!list_empty(txlist)) {
  1142. epi = list_entry(txlist->next, struct epitem, txlink);
  1143. /* Unlink the current item from the transfer list */
  1144. EP_LIST_DEL(&epi->txlink);
  1145. /*
  1146. * If the item is no more linked to the interest set, we don't
  1147. * have to push it inside the ready list because the following
  1148. * ep_release_epitem() is going to drop it. Also, if the current
  1149. * item is set to have an Edge Triggered behaviour, we don't have
  1150. * to push it back either.
  1151. */
  1152. if (EP_RB_LINKED(&epi->rbn) && !(epi->event.events & EPOLLET) &&
  1153. (epi->revents & epi->event.events) && !EP_IS_LINKED(&epi->rdllink)) {
  1154. list_add_tail(&epi->rdllink, &ep->rdllist);
  1155. ricnt++;
  1156. }
  1157. }
  1158. if (ricnt) {
  1159. /*
  1160. * Wake up ( if active ) both the eventpoll wait list and the ->poll()
  1161. * wait list.
  1162. */
  1163. if (waitqueue_active(&ep->wq))
  1164. wake_up(&ep->wq);
  1165. if (waitqueue_active(&ep->poll_wait))
  1166. pwake++;
  1167. }
  1168. write_unlock_irqrestore(&ep->lock, flags);
  1169. /* We have to call this outside the lock */
  1170. if (pwake)
  1171. ep_poll_safewake(&psw, &ep->poll_wait);
  1172. }
  1173. /*
  1174. * Perform the transfer of events to user space.
  1175. */
  1176. static int ep_events_transfer(struct eventpoll *ep,
  1177. struct epoll_event __user *events, int maxevents)
  1178. {
  1179. int eventcnt = 0;
  1180. struct list_head txlist;
  1181. INIT_LIST_HEAD(&txlist);
  1182. /*
  1183. * We need to lock this because we could be hit by
  1184. * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
  1185. */
  1186. down_read(&ep->sem);
  1187. /* Collect/extract ready items */
  1188. if (ep_collect_ready_items(ep, &txlist, maxevents) > 0) {
  1189. /* Build result set in userspace */
  1190. eventcnt = ep_send_events(ep, &txlist, events);
  1191. /* Reinject ready items into the ready list */
  1192. ep_reinject_items(ep, &txlist);
  1193. }
  1194. up_read(&ep->sem);
  1195. return eventcnt;
  1196. }
  1197. static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  1198. int maxevents, long timeout)
  1199. {
  1200. int res, eavail;
  1201. unsigned long flags;
  1202. long jtimeout;
  1203. wait_queue_t wait;
  1204. /*
  1205. * Calculate the timeout by checking for the "infinite" value ( -1 )
  1206. * and the overflow condition. The passed timeout is in milliseconds,
  1207. * that why (t * HZ) / 1000.
  1208. */
  1209. jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ?
  1210. MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000;
  1211. retry:
  1212. write_lock_irqsave(&ep->lock, flags);
  1213. res = 0;
  1214. if (list_empty(&ep->rdllist)) {
  1215. /*
  1216. * We don't have any available event to return to the caller.
  1217. * We need to sleep here, and we will be wake up by
  1218. * ep_poll_callback() when events will become available.
  1219. */
  1220. init_waitqueue_entry(&wait, current);
  1221. add_wait_queue(&ep->wq, &wait);
  1222. for (;;) {
  1223. /*
  1224. * We don't want to sleep if the ep_poll_callback() sends us
  1225. * a wakeup in between. That's why we set the task state
  1226. * to TASK_INTERRUPTIBLE before doing the checks.
  1227. */
  1228. set_current_state(TASK_INTERRUPTIBLE);
  1229. if (!list_empty(&ep->rdllist) || !jtimeout)
  1230. break;
  1231. if (signal_pending(current)) {
  1232. res = -EINTR;
  1233. break;
  1234. }
  1235. write_unlock_irqrestore(&ep->lock, flags);
  1236. jtimeout = schedule_timeout(jtimeout);
  1237. write_lock_irqsave(&ep->lock, flags);
  1238. }
  1239. remove_wait_queue(&ep->wq, &wait);
  1240. set_current_state(TASK_RUNNING);
  1241. }
  1242. /* Is it worth to try to dig for events ? */
  1243. eavail = !list_empty(&ep->rdllist);
  1244. write_unlock_irqrestore(&ep->lock, flags);
  1245. /*
  1246. * Try to transfer events to user space. In case we get 0 events and
  1247. * there's still timeout left over, we go trying again in search of
  1248. * more luck.
  1249. */
  1250. if (!res && eavail &&
  1251. !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout)
  1252. goto retry;
  1253. return res;
  1254. }
  1255. static int eventpollfs_delete_dentry(struct dentry *dentry)
  1256. {
  1257. return 1;
  1258. }
  1259. static struct inode *ep_eventpoll_inode(void)
  1260. {
  1261. int error = -ENOMEM;
  1262. struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
  1263. if (!inode)
  1264. goto eexit_1;
  1265. inode->i_fop = &eventpoll_fops;
  1266. /*
  1267. * Mark the inode dirty from the very beginning,
  1268. * that way it will never be moved to the dirty
  1269. * list because mark_inode_dirty() will think
  1270. * that it already _is_ on the dirty list.
  1271. */
  1272. inode->i_state = I_DIRTY;
  1273. inode->i_mode = S_IRUSR | S_IWUSR;
  1274. inode->i_uid = current->fsuid;
  1275. inode->i_gid = current->fsgid;
  1276. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  1277. inode->i_blksize = PAGE_SIZE;
  1278. return inode;
  1279. eexit_1:
  1280. return ERR_PTR(error);
  1281. }
  1282. static struct super_block *
  1283. eventpollfs_get_sb(struct file_system_type *fs_type, int flags,
  1284. const char *dev_name, void *data)
  1285. {
  1286. return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC);
  1287. }
  1288. static int __init eventpoll_init(void)
  1289. {
  1290. int error;
  1291. init_MUTEX(&epsem);
  1292. /* Initialize the structure used to perform safe poll wait head wake ups */
  1293. ep_poll_safewake_init(&psw);
  1294. /* Allocates slab cache used to allocate "struct epitem" items */
  1295. epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
  1296. 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
  1297. NULL, NULL);
  1298. /* Allocates slab cache used to allocate "struct eppoll_entry" */
  1299. pwq_cache = kmem_cache_create("eventpoll_pwq",
  1300. sizeof(struct eppoll_entry), 0,
  1301. EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
  1302. /*
  1303. * Register the virtual file system that will be the source of inodes
  1304. * for the eventpoll files
  1305. */
  1306. error = register_filesystem(&eventpoll_fs_type);
  1307. if (error)
  1308. goto epanic;
  1309. /* Mount the above commented virtual file system */
  1310. eventpoll_mnt = kern_mount(&eventpoll_fs_type);
  1311. error = PTR_ERR(eventpoll_mnt);
  1312. if (IS_ERR(eventpoll_mnt))
  1313. goto epanic;
  1314. DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n",
  1315. current));
  1316. return 0;
  1317. epanic:
  1318. panic("eventpoll_init() failed\n");
  1319. }
  1320. static void __exit eventpoll_exit(void)
  1321. {
  1322. /* Undo all operations done inside eventpoll_init() */
  1323. unregister_filesystem(&eventpoll_fs_type);
  1324. mntput(eventpoll_mnt);
  1325. kmem_cache_destroy(pwq_cache);
  1326. kmem_cache_destroy(epi_cache);
  1327. }
  1328. module_init(eventpoll_init);
  1329. module_exit(eventpoll_exit);
  1330. MODULE_LICENSE("GPL");