eventpoll.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830
  1. /*
  2. * fs/eventpoll.c (Efficient event retrieval implementation)
  3. * Copyright (C) 2001,...,2009 Davide Libenzi
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * Davide Libenzi <davidel@xmailserver.org>
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/file.h>
  18. #include <linux/signal.h>
  19. #include <linux/errno.h>
  20. #include <linux/mm.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/string.h>
  24. #include <linux/list.h>
  25. #include <linux/hash.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/syscalls.h>
  28. #include <linux/rbtree.h>
  29. #include <linux/wait.h>
  30. #include <linux/eventpoll.h>
  31. #include <linux/mount.h>
  32. #include <linux/bitops.h>
  33. #include <linux/mutex.h>
  34. #include <linux/anon_inodes.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/system.h>
  37. #include <asm/io.h>
  38. #include <asm/mman.h>
  39. #include <linux/atomic.h>
  40. /*
  41. * LOCKING:
  42. * There are three level of locking required by epoll :
  43. *
  44. * 1) epmutex (mutex)
  45. * 2) ep->mtx (mutex)
  46. * 3) ep->lock (spinlock)
  47. *
  48. * The acquire order is the one listed above, from 1 to 3.
  49. * We need a spinlock (ep->lock) because we manipulate objects
  50. * from inside the poll callback, that might be triggered from
  51. * a wake_up() that in turn might be called from IRQ context.
  52. * So we can't sleep inside the poll callback and hence we need
  53. * a spinlock. During the event transfer loop (from kernel to
  54. * user space) we could end up sleeping due a copy_to_user(), so
  55. * we need a lock that will allow us to sleep. This lock is a
  56. * mutex (ep->mtx). It is acquired during the event transfer loop,
  57. * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
  58. * Then we also need a global mutex to serialize eventpoll_release_file()
  59. * and ep_free().
  60. * This mutex is acquired by ep_free() during the epoll file
  61. * cleanup path and it is also acquired by eventpoll_release_file()
  62. * if a file has been pushed inside an epoll set and it is then
  63. * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
  64. * It is also acquired when inserting an epoll fd onto another epoll
  65. * fd. We do this so that we walk the epoll tree and ensure that this
  66. * insertion does not create a cycle of epoll file descriptors, which
  67. * could lead to deadlock. We need a global mutex to prevent two
  68. * simultaneous inserts (A into B and B into A) from racing and
  69. * constructing a cycle without either insert observing that it is
  70. * going to.
  71. * It is necessary to acquire multiple "ep->mtx"es at once in the
  72. * case when one epoll fd is added to another. In this case, we
  73. * always acquire the locks in the order of nesting (i.e. after
  74. * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
  75. * before e2->mtx). Since we disallow cycles of epoll file
  76. * descriptors, this ensures that the mutexes are well-ordered. In
  77. * order to communicate this nesting to lockdep, when walking a tree
  78. * of epoll file descriptors, we use the current recursion depth as
  79. * the lockdep subkey.
  80. * It is possible to drop the "ep->mtx" and to use the global
  81. * mutex "epmutex" (together with "ep->lock") to have it working,
  82. * but having "ep->mtx" will make the interface more scalable.
  83. * Events that require holding "epmutex" are very rare, while for
  84. * normal operations the epoll private "ep->mtx" will guarantee
  85. * a better scalability.
  86. */
  87. /* Epoll private bits inside the event mask */
  88. #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
  89. /* Maximum number of nesting allowed inside epoll sets */
  90. #define EP_MAX_NESTS 4
  91. #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
  92. #define EP_UNACTIVE_PTR ((void *) -1L)
  93. #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
  94. struct epoll_filefd {
  95. struct file *file;
  96. int fd;
  97. };
  98. /*
  99. * Structure used to track possible nested calls, for too deep recursions
  100. * and loop cycles.
  101. */
  102. struct nested_call_node {
  103. struct list_head llink;
  104. void *cookie;
  105. void *ctx;
  106. };
  107. /*
  108. * This structure is used as collector for nested calls, to check for
  109. * maximum recursion dept and loop cycles.
  110. */
  111. struct nested_calls {
  112. struct list_head tasks_call_list;
  113. spinlock_t lock;
  114. };
  115. /*
  116. * Each file descriptor added to the eventpoll interface will
  117. * have an entry of this type linked to the "rbr" RB tree.
  118. */
  119. struct epitem {
  120. /* RB tree node used to link this structure to the eventpoll RB tree */
  121. struct rb_node rbn;
  122. /* List header used to link this structure to the eventpoll ready list */
  123. struct list_head rdllink;
  124. /*
  125. * Works together "struct eventpoll"->ovflist in keeping the
  126. * single linked chain of items.
  127. */
  128. struct epitem *next;
  129. /* The file descriptor information this item refers to */
  130. struct epoll_filefd ffd;
  131. /* Number of active wait queue attached to poll operations */
  132. int nwait;
  133. /* List containing poll wait queues */
  134. struct list_head pwqlist;
  135. /* The "container" of this item */
  136. struct eventpoll *ep;
  137. /* List header used to link this item to the "struct file" items list */
  138. struct list_head fllink;
  139. /* The structure that describe the interested events and the source fd */
  140. struct epoll_event event;
  141. };
  142. /*
  143. * This structure is stored inside the "private_data" member of the file
  144. * structure and represents the main data structure for the eventpoll
  145. * interface.
  146. */
  147. struct eventpoll {
  148. /* Protect the access to this structure */
  149. spinlock_t lock;
  150. /*
  151. * This mutex is used to ensure that files are not removed
  152. * while epoll is using them. This is held during the event
  153. * collection loop, the file cleanup path, the epoll file exit
  154. * code and the ctl operations.
  155. */
  156. struct mutex mtx;
  157. /* Wait queue used by sys_epoll_wait() */
  158. wait_queue_head_t wq;
  159. /* Wait queue used by file->poll() */
  160. wait_queue_head_t poll_wait;
  161. /* List of ready file descriptors */
  162. struct list_head rdllist;
  163. /* RB tree root used to store monitored fd structs */
  164. struct rb_root rbr;
  165. /*
  166. * This is a single linked list that chains all the "struct epitem" that
  167. * happened while transferring ready events to userspace w/out
  168. * holding ->lock.
  169. */
  170. struct epitem *ovflist;
  171. /* The user that created the eventpoll descriptor */
  172. struct user_struct *user;
  173. struct file *file;
  174. /* used to optimize loop detection check */
  175. int visited;
  176. struct list_head visited_list_link;
  177. };
  178. /* Wait structure used by the poll hooks */
  179. struct eppoll_entry {
  180. /* List header used to link this structure to the "struct epitem" */
  181. struct list_head llink;
  182. /* The "base" pointer is set to the container "struct epitem" */
  183. struct epitem *base;
  184. /*
  185. * Wait queue item that will be linked to the target file wait
  186. * queue head.
  187. */
  188. wait_queue_t wait;
  189. /* The wait queue head that linked the "wait" wait queue item */
  190. wait_queue_head_t *whead;
  191. };
  192. /* Wrapper struct used by poll queueing */
  193. struct ep_pqueue {
  194. poll_table pt;
  195. struct epitem *epi;
  196. };
  197. /* Used by the ep_send_events() function as callback private data */
  198. struct ep_send_events_data {
  199. int maxevents;
  200. struct epoll_event __user *events;
  201. };
  202. /*
  203. * Configuration options available inside /proc/sys/fs/epoll/
  204. */
  205. /* Maximum number of epoll watched descriptors, per user */
  206. static long max_user_watches __read_mostly;
  207. /*
  208. * This mutex is used to serialize ep_free() and eventpoll_release_file().
  209. */
  210. static DEFINE_MUTEX(epmutex);
  211. /* Used to check for epoll file descriptor inclusion loops */
  212. static struct nested_calls poll_loop_ncalls;
  213. /* Used for safe wake up implementation */
  214. static struct nested_calls poll_safewake_ncalls;
  215. /* Used to call file's f_op->poll() under the nested calls boundaries */
  216. static struct nested_calls poll_readywalk_ncalls;
  217. /* Slab cache used to allocate "struct epitem" */
  218. static struct kmem_cache *epi_cache __read_mostly;
  219. /* Slab cache used to allocate "struct eppoll_entry" */
  220. static struct kmem_cache *pwq_cache __read_mostly;
  221. /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
  222. static LIST_HEAD(visited_list);
  223. /*
  224. * List of files with newly added links, where we may need to limit the number
  225. * of emanating paths. Protected by the epmutex.
  226. */
  227. static LIST_HEAD(tfile_check_list);
  228. #ifdef CONFIG_SYSCTL
  229. #include <linux/sysctl.h>
  230. static long zero;
  231. static long long_max = LONG_MAX;
  232. ctl_table epoll_table[] = {
  233. {
  234. .procname = "max_user_watches",
  235. .data = &max_user_watches,
  236. .maxlen = sizeof(max_user_watches),
  237. .mode = 0644,
  238. .proc_handler = proc_doulongvec_minmax,
  239. .extra1 = &zero,
  240. .extra2 = &long_max,
  241. },
  242. { }
  243. };
  244. #endif /* CONFIG_SYSCTL */
  245. static const struct file_operations eventpoll_fops;
  246. static inline int is_file_epoll(struct file *f)
  247. {
  248. return f->f_op == &eventpoll_fops;
  249. }
  250. /* Setup the structure that is used as key for the RB tree */
  251. static inline void ep_set_ffd(struct epoll_filefd *ffd,
  252. struct file *file, int fd)
  253. {
  254. ffd->file = file;
  255. ffd->fd = fd;
  256. }
  257. /* Compare RB tree keys */
  258. static inline int ep_cmp_ffd(struct epoll_filefd *p1,
  259. struct epoll_filefd *p2)
  260. {
  261. return (p1->file > p2->file ? +1:
  262. (p1->file < p2->file ? -1 : p1->fd - p2->fd));
  263. }
  264. /* Tells us if the item is currently linked */
  265. static inline int ep_is_linked(struct list_head *p)
  266. {
  267. return !list_empty(p);
  268. }
  269. static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
  270. {
  271. return container_of(p, struct eppoll_entry, wait);
  272. }
  273. /* Get the "struct epitem" from a wait queue pointer */
  274. static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
  275. {
  276. return container_of(p, struct eppoll_entry, wait)->base;
  277. }
  278. /* Get the "struct epitem" from an epoll queue wrapper */
  279. static inline struct epitem *ep_item_from_epqueue(poll_table *p)
  280. {
  281. return container_of(p, struct ep_pqueue, pt)->epi;
  282. }
  283. /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
  284. static inline int ep_op_has_event(int op)
  285. {
  286. return op != EPOLL_CTL_DEL;
  287. }
  288. /* Initialize the poll safe wake up structure */
  289. static void ep_nested_calls_init(struct nested_calls *ncalls)
  290. {
  291. INIT_LIST_HEAD(&ncalls->tasks_call_list);
  292. spin_lock_init(&ncalls->lock);
  293. }
  294. /**
  295. * ep_events_available - Checks if ready events might be available.
  296. *
  297. * @ep: Pointer to the eventpoll context.
  298. *
  299. * Returns: Returns a value different than zero if ready events are available,
  300. * or zero otherwise.
  301. */
  302. static inline int ep_events_available(struct eventpoll *ep)
  303. {
  304. return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
  305. }
  306. /**
  307. * ep_call_nested - Perform a bound (possibly) nested call, by checking
  308. * that the recursion limit is not exceeded, and that
  309. * the same nested call (by the meaning of same cookie) is
  310. * no re-entered.
  311. *
  312. * @ncalls: Pointer to the nested_calls structure to be used for this call.
  313. * @max_nests: Maximum number of allowed nesting calls.
  314. * @nproc: Nested call core function pointer.
  315. * @priv: Opaque data to be passed to the @nproc callback.
  316. * @cookie: Cookie to be used to identify this nested call.
  317. * @ctx: This instance context.
  318. *
  319. * Returns: Returns the code returned by the @nproc callback, or -1 if
  320. * the maximum recursion limit has been exceeded.
  321. */
  322. static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
  323. int (*nproc)(void *, void *, int), void *priv,
  324. void *cookie, void *ctx)
  325. {
  326. int error, call_nests = 0;
  327. unsigned long flags;
  328. struct list_head *lsthead = &ncalls->tasks_call_list;
  329. struct nested_call_node *tncur;
  330. struct nested_call_node tnode;
  331. spin_lock_irqsave(&ncalls->lock, flags);
  332. /*
  333. * Try to see if the current task is already inside this wakeup call.
  334. * We use a list here, since the population inside this set is always
  335. * very much limited.
  336. */
  337. list_for_each_entry(tncur, lsthead, llink) {
  338. if (tncur->ctx == ctx &&
  339. (tncur->cookie == cookie || ++call_nests > max_nests)) {
  340. /*
  341. * Ops ... loop detected or maximum nest level reached.
  342. * We abort this wake by breaking the cycle itself.
  343. */
  344. error = -1;
  345. goto out_unlock;
  346. }
  347. }
  348. /* Add the current task and cookie to the list */
  349. tnode.ctx = ctx;
  350. tnode.cookie = cookie;
  351. list_add(&tnode.llink, lsthead);
  352. spin_unlock_irqrestore(&ncalls->lock, flags);
  353. /* Call the nested function */
  354. error = (*nproc)(priv, cookie, call_nests);
  355. /* Remove the current task from the list */
  356. spin_lock_irqsave(&ncalls->lock, flags);
  357. list_del(&tnode.llink);
  358. out_unlock:
  359. spin_unlock_irqrestore(&ncalls->lock, flags);
  360. return error;
  361. }
  362. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  363. static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
  364. unsigned long events, int subclass)
  365. {
  366. unsigned long flags;
  367. spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
  368. wake_up_locked_poll(wqueue, events);
  369. spin_unlock_irqrestore(&wqueue->lock, flags);
  370. }
  371. #else
  372. static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
  373. unsigned long events, int subclass)
  374. {
  375. wake_up_poll(wqueue, events);
  376. }
  377. #endif
  378. static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
  379. {
  380. ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
  381. 1 + call_nests);
  382. return 0;
  383. }
  384. /*
  385. * Perform a safe wake up of the poll wait list. The problem is that
  386. * with the new callback'd wake up system, it is possible that the
  387. * poll callback is reentered from inside the call to wake_up() done
  388. * on the poll wait queue head. The rule is that we cannot reenter the
  389. * wake up code from the same task more than EP_MAX_NESTS times,
  390. * and we cannot reenter the same wait queue head at all. This will
  391. * enable to have a hierarchy of epoll file descriptor of no more than
  392. * EP_MAX_NESTS deep.
  393. */
  394. static void ep_poll_safewake(wait_queue_head_t *wq)
  395. {
  396. int this_cpu = get_cpu();
  397. ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
  398. ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
  399. put_cpu();
  400. }
  401. static void ep_remove_wait_queue(struct eppoll_entry *pwq)
  402. {
  403. wait_queue_head_t *whead;
  404. rcu_read_lock();
  405. /* If it is cleared by POLLFREE, it should be rcu-safe */
  406. whead = rcu_dereference(pwq->whead);
  407. if (whead)
  408. remove_wait_queue(whead, &pwq->wait);
  409. rcu_read_unlock();
  410. }
  411. /*
  412. * This function unregisters poll callbacks from the associated file
  413. * descriptor. Must be called with "mtx" held (or "epmutex" if called from
  414. * ep_free).
  415. */
  416. static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
  417. {
  418. struct list_head *lsthead = &epi->pwqlist;
  419. struct eppoll_entry *pwq;
  420. while (!list_empty(lsthead)) {
  421. pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
  422. list_del(&pwq->llink);
  423. ep_remove_wait_queue(pwq);
  424. kmem_cache_free(pwq_cache, pwq);
  425. }
  426. }
  427. /**
  428. * ep_scan_ready_list - Scans the ready list in a way that makes possible for
  429. * the scan code, to call f_op->poll(). Also allows for
  430. * O(NumReady) performance.
  431. *
  432. * @ep: Pointer to the epoll private data structure.
  433. * @sproc: Pointer to the scan callback.
  434. * @priv: Private opaque data passed to the @sproc callback.
  435. * @depth: The current depth of recursive f_op->poll calls.
  436. *
  437. * Returns: The same integer error code returned by the @sproc callback.
  438. */
  439. static int ep_scan_ready_list(struct eventpoll *ep,
  440. int (*sproc)(struct eventpoll *,
  441. struct list_head *, void *),
  442. void *priv,
  443. int depth)
  444. {
  445. int error, pwake = 0;
  446. unsigned long flags;
  447. struct epitem *epi, *nepi;
  448. LIST_HEAD(txlist);
  449. /*
  450. * We need to lock this because we could be hit by
  451. * eventpoll_release_file() and epoll_ctl().
  452. */
  453. mutex_lock_nested(&ep->mtx, depth);
  454. /*
  455. * Steal the ready list, and re-init the original one to the
  456. * empty list. Also, set ep->ovflist to NULL so that events
  457. * happening while looping w/out locks, are not lost. We cannot
  458. * have the poll callback to queue directly on ep->rdllist,
  459. * because we want the "sproc" callback to be able to do it
  460. * in a lockless way.
  461. */
  462. spin_lock_irqsave(&ep->lock, flags);
  463. list_splice_init(&ep->rdllist, &txlist);
  464. ep->ovflist = NULL;
  465. spin_unlock_irqrestore(&ep->lock, flags);
  466. /*
  467. * Now call the callback function.
  468. */
  469. error = (*sproc)(ep, &txlist, priv);
  470. spin_lock_irqsave(&ep->lock, flags);
  471. /*
  472. * During the time we spent inside the "sproc" callback, some
  473. * other events might have been queued by the poll callback.
  474. * We re-insert them inside the main ready-list here.
  475. */
  476. for (nepi = ep->ovflist; (epi = nepi) != NULL;
  477. nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
  478. /*
  479. * We need to check if the item is already in the list.
  480. * During the "sproc" callback execution time, items are
  481. * queued into ->ovflist but the "txlist" might already
  482. * contain them, and the list_splice() below takes care of them.
  483. */
  484. if (!ep_is_linked(&epi->rdllink))
  485. list_add_tail(&epi->rdllink, &ep->rdllist);
  486. }
  487. /*
  488. * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
  489. * releasing the lock, events will be queued in the normal way inside
  490. * ep->rdllist.
  491. */
  492. ep->ovflist = EP_UNACTIVE_PTR;
  493. /*
  494. * Quickly re-inject items left on "txlist".
  495. */
  496. list_splice(&txlist, &ep->rdllist);
  497. if (!list_empty(&ep->rdllist)) {
  498. /*
  499. * Wake up (if active) both the eventpoll wait list and
  500. * the ->poll() wait list (delayed after we release the lock).
  501. */
  502. if (waitqueue_active(&ep->wq))
  503. wake_up_locked(&ep->wq);
  504. if (waitqueue_active(&ep->poll_wait))
  505. pwake++;
  506. }
  507. spin_unlock_irqrestore(&ep->lock, flags);
  508. mutex_unlock(&ep->mtx);
  509. /* We have to call this outside the lock */
  510. if (pwake)
  511. ep_poll_safewake(&ep->poll_wait);
  512. return error;
  513. }
  514. /*
  515. * Removes a "struct epitem" from the eventpoll RB tree and deallocates
  516. * all the associated resources. Must be called with "mtx" held.
  517. */
  518. static int ep_remove(struct eventpoll *ep, struct epitem *epi)
  519. {
  520. unsigned long flags;
  521. struct file *file = epi->ffd.file;
  522. /*
  523. * Removes poll wait queue hooks. We _have_ to do this without holding
  524. * the "ep->lock" otherwise a deadlock might occur. This because of the
  525. * sequence of the lock acquisition. Here we do "ep->lock" then the wait
  526. * queue head lock when unregistering the wait queue. The wakeup callback
  527. * will run by holding the wait queue head lock and will call our callback
  528. * that will try to get "ep->lock".
  529. */
  530. ep_unregister_pollwait(ep, epi);
  531. /* Remove the current item from the list of epoll hooks */
  532. spin_lock(&file->f_lock);
  533. if (ep_is_linked(&epi->fllink))
  534. list_del_init(&epi->fllink);
  535. spin_unlock(&file->f_lock);
  536. rb_erase(&epi->rbn, &ep->rbr);
  537. spin_lock_irqsave(&ep->lock, flags);
  538. if (ep_is_linked(&epi->rdllink))
  539. list_del_init(&epi->rdllink);
  540. spin_unlock_irqrestore(&ep->lock, flags);
  541. /* At this point it is safe to free the eventpoll item */
  542. kmem_cache_free(epi_cache, epi);
  543. atomic_long_dec(&ep->user->epoll_watches);
  544. return 0;
  545. }
  546. static void ep_free(struct eventpoll *ep)
  547. {
  548. struct rb_node *rbp;
  549. struct epitem *epi;
  550. /* We need to release all tasks waiting for these file */
  551. if (waitqueue_active(&ep->poll_wait))
  552. ep_poll_safewake(&ep->poll_wait);
  553. /*
  554. * We need to lock this because we could be hit by
  555. * eventpoll_release_file() while we're freeing the "struct eventpoll".
  556. * We do not need to hold "ep->mtx" here because the epoll file
  557. * is on the way to be removed and no one has references to it
  558. * anymore. The only hit might come from eventpoll_release_file() but
  559. * holding "epmutex" is sufficient here.
  560. */
  561. mutex_lock(&epmutex);
  562. /*
  563. * Walks through the whole tree by unregistering poll callbacks.
  564. */
  565. for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  566. epi = rb_entry(rbp, struct epitem, rbn);
  567. ep_unregister_pollwait(ep, epi);
  568. }
  569. /*
  570. * Walks through the whole tree by freeing each "struct epitem". At this
  571. * point we are sure no poll callbacks will be lingering around, and also by
  572. * holding "epmutex" we can be sure that no file cleanup code will hit
  573. * us during this operation. So we can avoid the lock on "ep->lock".
  574. */
  575. while ((rbp = rb_first(&ep->rbr)) != NULL) {
  576. epi = rb_entry(rbp, struct epitem, rbn);
  577. ep_remove(ep, epi);
  578. }
  579. mutex_unlock(&epmutex);
  580. mutex_destroy(&ep->mtx);
  581. free_uid(ep->user);
  582. kfree(ep);
  583. }
  584. static int ep_eventpoll_release(struct inode *inode, struct file *file)
  585. {
  586. struct eventpoll *ep = file->private_data;
  587. if (ep)
  588. ep_free(ep);
  589. return 0;
  590. }
  591. static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
  592. void *priv)
  593. {
  594. struct epitem *epi, *tmp;
  595. poll_table pt;
  596. init_poll_funcptr(&pt, NULL);
  597. list_for_each_entry_safe(epi, tmp, head, rdllink) {
  598. pt._key = epi->event.events;
  599. if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
  600. epi->event.events)
  601. return POLLIN | POLLRDNORM;
  602. else {
  603. /*
  604. * Item has been dropped into the ready list by the poll
  605. * callback, but it's not actually ready, as far as
  606. * caller requested events goes. We can remove it here.
  607. */
  608. list_del_init(&epi->rdllink);
  609. }
  610. }
  611. return 0;
  612. }
  613. static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
  614. {
  615. return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
  616. }
  617. static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
  618. {
  619. int pollflags;
  620. struct eventpoll *ep = file->private_data;
  621. /* Insert inside our poll wait queue */
  622. poll_wait(file, &ep->poll_wait, wait);
  623. /*
  624. * Proceed to find out if wanted events are really available inside
  625. * the ready list. This need to be done under ep_call_nested()
  626. * supervision, since the call to f_op->poll() done on listed files
  627. * could re-enter here.
  628. */
  629. pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
  630. ep_poll_readyevents_proc, ep, ep, current);
  631. return pollflags != -1 ? pollflags : 0;
  632. }
  633. /* File callbacks that implement the eventpoll file behaviour */
  634. static const struct file_operations eventpoll_fops = {
  635. .release = ep_eventpoll_release,
  636. .poll = ep_eventpoll_poll,
  637. .llseek = noop_llseek,
  638. };
  639. /*
  640. * This is called from eventpoll_release() to unlink files from the eventpoll
  641. * interface. We need to have this facility to cleanup correctly files that are
  642. * closed without being removed from the eventpoll interface.
  643. */
  644. void eventpoll_release_file(struct file *file)
  645. {
  646. struct list_head *lsthead = &file->f_ep_links;
  647. struct eventpoll *ep;
  648. struct epitem *epi;
  649. /*
  650. * We don't want to get "file->f_lock" because it is not
  651. * necessary. It is not necessary because we're in the "struct file"
  652. * cleanup path, and this means that no one is using this file anymore.
  653. * So, for example, epoll_ctl() cannot hit here since if we reach this
  654. * point, the file counter already went to zero and fget() would fail.
  655. * The only hit might come from ep_free() but by holding the mutex
  656. * will correctly serialize the operation. We do need to acquire
  657. * "ep->mtx" after "epmutex" because ep_remove() requires it when called
  658. * from anywhere but ep_free().
  659. *
  660. * Besides, ep_remove() acquires the lock, so we can't hold it here.
  661. */
  662. mutex_lock(&epmutex);
  663. while (!list_empty(lsthead)) {
  664. epi = list_first_entry(lsthead, struct epitem, fllink);
  665. ep = epi->ep;
  666. list_del_init(&epi->fllink);
  667. mutex_lock_nested(&ep->mtx, 0);
  668. ep_remove(ep, epi);
  669. mutex_unlock(&ep->mtx);
  670. }
  671. mutex_unlock(&epmutex);
  672. }
  673. static int ep_alloc(struct eventpoll **pep)
  674. {
  675. int error;
  676. struct user_struct *user;
  677. struct eventpoll *ep;
  678. user = get_current_user();
  679. error = -ENOMEM;
  680. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  681. if (unlikely(!ep))
  682. goto free_uid;
  683. spin_lock_init(&ep->lock);
  684. mutex_init(&ep->mtx);
  685. init_waitqueue_head(&ep->wq);
  686. init_waitqueue_head(&ep->poll_wait);
  687. INIT_LIST_HEAD(&ep->rdllist);
  688. ep->rbr = RB_ROOT;
  689. ep->ovflist = EP_UNACTIVE_PTR;
  690. ep->user = user;
  691. *pep = ep;
  692. return 0;
  693. free_uid:
  694. free_uid(user);
  695. return error;
  696. }
  697. /*
  698. * Search the file inside the eventpoll tree. The RB tree operations
  699. * are protected by the "mtx" mutex, and ep_find() must be called with
  700. * "mtx" held.
  701. */
  702. static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
  703. {
  704. int kcmp;
  705. struct rb_node *rbp;
  706. struct epitem *epi, *epir = NULL;
  707. struct epoll_filefd ffd;
  708. ep_set_ffd(&ffd, file, fd);
  709. for (rbp = ep->rbr.rb_node; rbp; ) {
  710. epi = rb_entry(rbp, struct epitem, rbn);
  711. kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
  712. if (kcmp > 0)
  713. rbp = rbp->rb_right;
  714. else if (kcmp < 0)
  715. rbp = rbp->rb_left;
  716. else {
  717. epir = epi;
  718. break;
  719. }
  720. }
  721. return epir;
  722. }
  723. /*
  724. * This is the callback that is passed to the wait queue wakeup
  725. * mechanism. It is called by the stored file descriptors when they
  726. * have events to report.
  727. */
  728. static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
  729. {
  730. int pwake = 0;
  731. unsigned long flags;
  732. struct epitem *epi = ep_item_from_wait(wait);
  733. struct eventpoll *ep = epi->ep;
  734. if ((unsigned long)key & POLLFREE) {
  735. ep_pwq_from_wait(wait)->whead = NULL;
  736. /*
  737. * whead = NULL above can race with ep_remove_wait_queue()
  738. * which can do another remove_wait_queue() after us, so we
  739. * can't use __remove_wait_queue(). whead->lock is held by
  740. * the caller.
  741. */
  742. list_del_init(&wait->task_list);
  743. }
  744. spin_lock_irqsave(&ep->lock, flags);
  745. /*
  746. * If the event mask does not contain any poll(2) event, we consider the
  747. * descriptor to be disabled. This condition is likely the effect of the
  748. * EPOLLONESHOT bit that disables the descriptor when an event is received,
  749. * until the next EPOLL_CTL_MOD will be issued.
  750. */
  751. if (!(epi->event.events & ~EP_PRIVATE_BITS))
  752. goto out_unlock;
  753. /*
  754. * Check the events coming with the callback. At this stage, not
  755. * every device reports the events in the "key" parameter of the
  756. * callback. We need to be able to handle both cases here, hence the
  757. * test for "key" != NULL before the event match test.
  758. */
  759. if (key && !((unsigned long) key & epi->event.events))
  760. goto out_unlock;
  761. /*
  762. * If we are transferring events to userspace, we can hold no locks
  763. * (because we're accessing user memory, and because of linux f_op->poll()
  764. * semantics). All the events that happen during that period of time are
  765. * chained in ep->ovflist and requeued later on.
  766. */
  767. if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
  768. if (epi->next == EP_UNACTIVE_PTR) {
  769. epi->next = ep->ovflist;
  770. ep->ovflist = epi;
  771. }
  772. goto out_unlock;
  773. }
  774. /* If this file is already in the ready list we exit soon */
  775. if (!ep_is_linked(&epi->rdllink))
  776. list_add_tail(&epi->rdllink, &ep->rdllist);
  777. /*
  778. * Wake up ( if active ) both the eventpoll wait list and the ->poll()
  779. * wait list.
  780. */
  781. if (waitqueue_active(&ep->wq))
  782. wake_up_locked(&ep->wq);
  783. if (waitqueue_active(&ep->poll_wait))
  784. pwake++;
  785. out_unlock:
  786. spin_unlock_irqrestore(&ep->lock, flags);
  787. /* We have to call this outside the lock */
  788. if (pwake)
  789. ep_poll_safewake(&ep->poll_wait);
  790. return 1;
  791. }
  792. /*
  793. * This is the callback that is used to add our wait queue to the
  794. * target file wakeup lists.
  795. */
  796. static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
  797. poll_table *pt)
  798. {
  799. struct epitem *epi = ep_item_from_epqueue(pt);
  800. struct eppoll_entry *pwq;
  801. if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
  802. init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
  803. pwq->whead = whead;
  804. pwq->base = epi;
  805. add_wait_queue(whead, &pwq->wait);
  806. list_add_tail(&pwq->llink, &epi->pwqlist);
  807. epi->nwait++;
  808. } else {
  809. /* We have to signal that an error occurred */
  810. epi->nwait = -1;
  811. }
  812. }
  813. static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
  814. {
  815. int kcmp;
  816. struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
  817. struct epitem *epic;
  818. while (*p) {
  819. parent = *p;
  820. epic = rb_entry(parent, struct epitem, rbn);
  821. kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
  822. if (kcmp > 0)
  823. p = &parent->rb_right;
  824. else
  825. p = &parent->rb_left;
  826. }
  827. rb_link_node(&epi->rbn, parent, p);
  828. rb_insert_color(&epi->rbn, &ep->rbr);
  829. }
  830. #define PATH_ARR_SIZE 5
  831. /*
  832. * These are the number paths of length 1 to 5, that we are allowing to emanate
  833. * from a single file of interest. For example, we allow 1000 paths of length
  834. * 1, to emanate from each file of interest. This essentially represents the
  835. * potential wakeup paths, which need to be limited in order to avoid massive
  836. * uncontrolled wakeup storms. The common use case should be a single ep which
  837. * is connected to n file sources. In this case each file source has 1 path
  838. * of length 1. Thus, the numbers below should be more than sufficient. These
  839. * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
  840. * and delete can't add additional paths. Protected by the epmutex.
  841. */
  842. static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
  843. static int path_count[PATH_ARR_SIZE];
  844. static int path_count_inc(int nests)
  845. {
  846. /* Allow an arbitrary number of depth 1 paths */
  847. if (nests == 0)
  848. return 0;
  849. if (++path_count[nests] > path_limits[nests])
  850. return -1;
  851. return 0;
  852. }
  853. static void path_count_init(void)
  854. {
  855. int i;
  856. for (i = 0; i < PATH_ARR_SIZE; i++)
  857. path_count[i] = 0;
  858. }
  859. static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
  860. {
  861. int error = 0;
  862. struct file *file = priv;
  863. struct file *child_file;
  864. struct epitem *epi;
  865. list_for_each_entry(epi, &file->f_ep_links, fllink) {
  866. child_file = epi->ep->file;
  867. if (is_file_epoll(child_file)) {
  868. if (list_empty(&child_file->f_ep_links)) {
  869. if (path_count_inc(call_nests)) {
  870. error = -1;
  871. break;
  872. }
  873. } else {
  874. error = ep_call_nested(&poll_loop_ncalls,
  875. EP_MAX_NESTS,
  876. reverse_path_check_proc,
  877. child_file, child_file,
  878. current);
  879. }
  880. if (error != 0)
  881. break;
  882. } else {
  883. printk(KERN_ERR "reverse_path_check_proc: "
  884. "file is not an ep!\n");
  885. }
  886. }
  887. return error;
  888. }
  889. /**
  890. * reverse_path_check - The tfile_check_list is list of file *, which have
  891. * links that are proposed to be newly added. We need to
  892. * make sure that those added links don't add too many
  893. * paths such that we will spend all our time waking up
  894. * eventpoll objects.
  895. *
  896. * Returns: Returns zero if the proposed links don't create too many paths,
  897. * -1 otherwise.
  898. */
  899. static int reverse_path_check(void)
  900. {
  901. int length = 0;
  902. int error = 0;
  903. struct file *current_file;
  904. /* let's call this for all tfiles */
  905. list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
  906. length++;
  907. path_count_init();
  908. error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
  909. reverse_path_check_proc, current_file,
  910. current_file, current);
  911. if (error)
  912. break;
  913. }
  914. return error;
  915. }
  916. /*
  917. * Must be called with "mtx" held.
  918. */
  919. static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
  920. struct file *tfile, int fd)
  921. {
  922. int error, revents, pwake = 0;
  923. unsigned long flags;
  924. long user_watches;
  925. struct epitem *epi;
  926. struct ep_pqueue epq;
  927. user_watches = atomic_long_read(&ep->user->epoll_watches);
  928. if (unlikely(user_watches >= max_user_watches))
  929. return -ENOSPC;
  930. if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
  931. return -ENOMEM;
  932. /* Item initialization follow here ... */
  933. INIT_LIST_HEAD(&epi->rdllink);
  934. INIT_LIST_HEAD(&epi->fllink);
  935. INIT_LIST_HEAD(&epi->pwqlist);
  936. epi->ep = ep;
  937. ep_set_ffd(&epi->ffd, tfile, fd);
  938. epi->event = *event;
  939. epi->nwait = 0;
  940. epi->next = EP_UNACTIVE_PTR;
  941. /* Initialize the poll table using the queue callback */
  942. epq.epi = epi;
  943. init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
  944. epq.pt._key = event->events;
  945. /*
  946. * Attach the item to the poll hooks and get current event bits.
  947. * We can safely use the file* here because its usage count has
  948. * been increased by the caller of this function. Note that after
  949. * this operation completes, the poll callback can start hitting
  950. * the new item.
  951. */
  952. revents = tfile->f_op->poll(tfile, &epq.pt);
  953. /*
  954. * We have to check if something went wrong during the poll wait queue
  955. * install process. Namely an allocation for a wait queue failed due
  956. * high memory pressure.
  957. */
  958. error = -ENOMEM;
  959. if (epi->nwait < 0)
  960. goto error_unregister;
  961. /* Add the current item to the list of active epoll hook for this file */
  962. spin_lock(&tfile->f_lock);
  963. list_add_tail(&epi->fllink, &tfile->f_ep_links);
  964. spin_unlock(&tfile->f_lock);
  965. /*
  966. * Add the current item to the RB tree. All RB tree operations are
  967. * protected by "mtx", and ep_insert() is called with "mtx" held.
  968. */
  969. ep_rbtree_insert(ep, epi);
  970. /* now check if we've created too many backpaths */
  971. error = -EINVAL;
  972. if (reverse_path_check())
  973. goto error_remove_epi;
  974. /* We have to drop the new item inside our item list to keep track of it */
  975. spin_lock_irqsave(&ep->lock, flags);
  976. /* If the file is already "ready" we drop it inside the ready list */
  977. if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
  978. list_add_tail(&epi->rdllink, &ep->rdllist);
  979. /* Notify waiting tasks that events are available */
  980. if (waitqueue_active(&ep->wq))
  981. wake_up_locked(&ep->wq);
  982. if (waitqueue_active(&ep->poll_wait))
  983. pwake++;
  984. }
  985. spin_unlock_irqrestore(&ep->lock, flags);
  986. atomic_long_inc(&ep->user->epoll_watches);
  987. /* We have to call this outside the lock */
  988. if (pwake)
  989. ep_poll_safewake(&ep->poll_wait);
  990. return 0;
  991. error_remove_epi:
  992. spin_lock(&tfile->f_lock);
  993. if (ep_is_linked(&epi->fllink))
  994. list_del_init(&epi->fllink);
  995. spin_unlock(&tfile->f_lock);
  996. rb_erase(&epi->rbn, &ep->rbr);
  997. error_unregister:
  998. ep_unregister_pollwait(ep, epi);
  999. /*
  1000. * We need to do this because an event could have been arrived on some
  1001. * allocated wait queue. Note that we don't care about the ep->ovflist
  1002. * list, since that is used/cleaned only inside a section bound by "mtx".
  1003. * And ep_insert() is called with "mtx" held.
  1004. */
  1005. spin_lock_irqsave(&ep->lock, flags);
  1006. if (ep_is_linked(&epi->rdllink))
  1007. list_del_init(&epi->rdllink);
  1008. spin_unlock_irqrestore(&ep->lock, flags);
  1009. kmem_cache_free(epi_cache, epi);
  1010. return error;
  1011. }
  1012. /*
  1013. * Modify the interest event mask by dropping an event if the new mask
  1014. * has a match in the current file status. Must be called with "mtx" held.
  1015. */
  1016. static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
  1017. {
  1018. int pwake = 0;
  1019. unsigned int revents;
  1020. poll_table pt;
  1021. init_poll_funcptr(&pt, NULL);
  1022. /*
  1023. * Set the new event interest mask before calling f_op->poll();
  1024. * otherwise we might miss an event that happens between the
  1025. * f_op->poll() call and the new event set registering.
  1026. */
  1027. epi->event.events = event->events;
  1028. pt._key = event->events;
  1029. epi->event.data = event->data; /* protected by mtx */
  1030. /*
  1031. * Get current event bits. We can safely use the file* here because
  1032. * its usage count has been increased by the caller of this function.
  1033. */
  1034. revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
  1035. /*
  1036. * If the item is "hot" and it is not registered inside the ready
  1037. * list, push it inside.
  1038. */
  1039. if (revents & event->events) {
  1040. spin_lock_irq(&ep->lock);
  1041. if (!ep_is_linked(&epi->rdllink)) {
  1042. list_add_tail(&epi->rdllink, &ep->rdllist);
  1043. /* Notify waiting tasks that events are available */
  1044. if (waitqueue_active(&ep->wq))
  1045. wake_up_locked(&ep->wq);
  1046. if (waitqueue_active(&ep->poll_wait))
  1047. pwake++;
  1048. }
  1049. spin_unlock_irq(&ep->lock);
  1050. }
  1051. /* We have to call this outside the lock */
  1052. if (pwake)
  1053. ep_poll_safewake(&ep->poll_wait);
  1054. return 0;
  1055. }
  1056. static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
  1057. void *priv)
  1058. {
  1059. struct ep_send_events_data *esed = priv;
  1060. int eventcnt;
  1061. unsigned int revents;
  1062. struct epitem *epi;
  1063. struct epoll_event __user *uevent;
  1064. poll_table pt;
  1065. init_poll_funcptr(&pt, NULL);
  1066. /*
  1067. * We can loop without lock because we are passed a task private list.
  1068. * Items cannot vanish during the loop because ep_scan_ready_list() is
  1069. * holding "mtx" during this call.
  1070. */
  1071. for (eventcnt = 0, uevent = esed->events;
  1072. !list_empty(head) && eventcnt < esed->maxevents;) {
  1073. epi = list_first_entry(head, struct epitem, rdllink);
  1074. list_del_init(&epi->rdllink);
  1075. pt._key = epi->event.events;
  1076. revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
  1077. epi->event.events;
  1078. /*
  1079. * If the event mask intersect the caller-requested one,
  1080. * deliver the event to userspace. Again, ep_scan_ready_list()
  1081. * is holding "mtx", so no operations coming from userspace
  1082. * can change the item.
  1083. */
  1084. if (revents) {
  1085. if (__put_user(revents, &uevent->events) ||
  1086. __put_user(epi->event.data, &uevent->data)) {
  1087. list_add(&epi->rdllink, head);
  1088. return eventcnt ? eventcnt : -EFAULT;
  1089. }
  1090. eventcnt++;
  1091. uevent++;
  1092. if (epi->event.events & EPOLLONESHOT)
  1093. epi->event.events &= EP_PRIVATE_BITS;
  1094. else if (!(epi->event.events & EPOLLET)) {
  1095. /*
  1096. * If this file has been added with Level
  1097. * Trigger mode, we need to insert back inside
  1098. * the ready list, so that the next call to
  1099. * epoll_wait() will check again the events
  1100. * availability. At this point, no one can insert
  1101. * into ep->rdllist besides us. The epoll_ctl()
  1102. * callers are locked out by
  1103. * ep_scan_ready_list() holding "mtx" and the
  1104. * poll callback will queue them in ep->ovflist.
  1105. */
  1106. list_add_tail(&epi->rdllink, &ep->rdllist);
  1107. }
  1108. }
  1109. }
  1110. return eventcnt;
  1111. }
  1112. static int ep_send_events(struct eventpoll *ep,
  1113. struct epoll_event __user *events, int maxevents)
  1114. {
  1115. struct ep_send_events_data esed;
  1116. esed.maxevents = maxevents;
  1117. esed.events = events;
  1118. return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
  1119. }
  1120. static inline struct timespec ep_set_mstimeout(long ms)
  1121. {
  1122. struct timespec now, ts = {
  1123. .tv_sec = ms / MSEC_PER_SEC,
  1124. .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
  1125. };
  1126. ktime_get_ts(&now);
  1127. return timespec_add_safe(now, ts);
  1128. }
  1129. /**
  1130. * ep_poll - Retrieves ready events, and delivers them to the caller supplied
  1131. * event buffer.
  1132. *
  1133. * @ep: Pointer to the eventpoll context.
  1134. * @events: Pointer to the userspace buffer where the ready events should be
  1135. * stored.
  1136. * @maxevents: Size (in terms of number of events) of the caller event buffer.
  1137. * @timeout: Maximum timeout for the ready events fetch operation, in
  1138. * milliseconds. If the @timeout is zero, the function will not block,
  1139. * while if the @timeout is less than zero, the function will block
  1140. * until at least one event has been retrieved (or an error
  1141. * occurred).
  1142. *
  1143. * Returns: Returns the number of ready events which have been fetched, or an
  1144. * error code, in case of error.
  1145. */
  1146. static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  1147. int maxevents, long timeout)
  1148. {
  1149. int res = 0, eavail, timed_out = 0;
  1150. unsigned long flags;
  1151. long slack = 0;
  1152. wait_queue_t wait;
  1153. ktime_t expires, *to = NULL;
  1154. if (timeout > 0) {
  1155. struct timespec end_time = ep_set_mstimeout(timeout);
  1156. slack = select_estimate_accuracy(&end_time);
  1157. to = &expires;
  1158. *to = timespec_to_ktime(end_time);
  1159. } else if (timeout == 0) {
  1160. /*
  1161. * Avoid the unnecessary trip to the wait queue loop, if the
  1162. * caller specified a non blocking operation.
  1163. */
  1164. timed_out = 1;
  1165. spin_lock_irqsave(&ep->lock, flags);
  1166. goto check_events;
  1167. }
  1168. fetch_events:
  1169. spin_lock_irqsave(&ep->lock, flags);
  1170. if (!ep_events_available(ep)) {
  1171. /*
  1172. * We don't have any available event to return to the caller.
  1173. * We need to sleep here, and we will be wake up by
  1174. * ep_poll_callback() when events will become available.
  1175. */
  1176. init_waitqueue_entry(&wait, current);
  1177. __add_wait_queue_exclusive(&ep->wq, &wait);
  1178. for (;;) {
  1179. /*
  1180. * We don't want to sleep if the ep_poll_callback() sends us
  1181. * a wakeup in between. That's why we set the task state
  1182. * to TASK_INTERRUPTIBLE before doing the checks.
  1183. */
  1184. set_current_state(TASK_INTERRUPTIBLE);
  1185. if (ep_events_available(ep) || timed_out)
  1186. break;
  1187. if (signal_pending(current)) {
  1188. res = -EINTR;
  1189. break;
  1190. }
  1191. spin_unlock_irqrestore(&ep->lock, flags);
  1192. if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
  1193. timed_out = 1;
  1194. spin_lock_irqsave(&ep->lock, flags);
  1195. }
  1196. __remove_wait_queue(&ep->wq, &wait);
  1197. set_current_state(TASK_RUNNING);
  1198. }
  1199. check_events:
  1200. /* Is it worth to try to dig for events ? */
  1201. eavail = ep_events_available(ep);
  1202. spin_unlock_irqrestore(&ep->lock, flags);
  1203. /*
  1204. * Try to transfer events to user space. In case we get 0 events and
  1205. * there's still timeout left over, we go trying again in search of
  1206. * more luck.
  1207. */
  1208. if (!res && eavail &&
  1209. !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
  1210. goto fetch_events;
  1211. return res;
  1212. }
  1213. /**
  1214. * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
  1215. * API, to verify that adding an epoll file inside another
  1216. * epoll structure, does not violate the constraints, in
  1217. * terms of closed loops, or too deep chains (which can
  1218. * result in excessive stack usage).
  1219. *
  1220. * @priv: Pointer to the epoll file to be currently checked.
  1221. * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
  1222. * data structure pointer.
  1223. * @call_nests: Current dept of the @ep_call_nested() call stack.
  1224. *
  1225. * Returns: Returns zero if adding the epoll @file inside current epoll
  1226. * structure @ep does not violate the constraints, or -1 otherwise.
  1227. */
  1228. static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
  1229. {
  1230. int error = 0;
  1231. struct file *file = priv;
  1232. struct eventpoll *ep = file->private_data;
  1233. struct eventpoll *ep_tovisit;
  1234. struct rb_node *rbp;
  1235. struct epitem *epi;
  1236. mutex_lock_nested(&ep->mtx, call_nests + 1);
  1237. ep->visited = 1;
  1238. list_add(&ep->visited_list_link, &visited_list);
  1239. for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  1240. epi = rb_entry(rbp, struct epitem, rbn);
  1241. if (unlikely(is_file_epoll(epi->ffd.file))) {
  1242. ep_tovisit = epi->ffd.file->private_data;
  1243. if (ep_tovisit->visited)
  1244. continue;
  1245. error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
  1246. ep_loop_check_proc, epi->ffd.file,
  1247. ep_tovisit, current);
  1248. if (error != 0)
  1249. break;
  1250. } else {
  1251. /*
  1252. * If we've reached a file that is not associated with
  1253. * an ep, then we need to check if the newly added
  1254. * links are going to add too many wakeup paths. We do
  1255. * this by adding it to the tfile_check_list, if it's
  1256. * not already there, and calling reverse_path_check()
  1257. * during ep_insert().
  1258. */
  1259. if (list_empty(&epi->ffd.file->f_tfile_llink))
  1260. list_add(&epi->ffd.file->f_tfile_llink,
  1261. &tfile_check_list);
  1262. }
  1263. }
  1264. mutex_unlock(&ep->mtx);
  1265. return error;
  1266. }
  1267. /**
  1268. * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
  1269. * another epoll file (represented by @ep) does not create
  1270. * closed loops or too deep chains.
  1271. *
  1272. * @ep: Pointer to the epoll private data structure.
  1273. * @file: Pointer to the epoll file to be checked.
  1274. *
  1275. * Returns: Returns zero if adding the epoll @file inside current epoll
  1276. * structure @ep does not violate the constraints, or -1 otherwise.
  1277. */
  1278. static int ep_loop_check(struct eventpoll *ep, struct file *file)
  1279. {
  1280. int ret;
  1281. struct eventpoll *ep_cur, *ep_next;
  1282. ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
  1283. ep_loop_check_proc, file, ep, current);
  1284. /* clear visited list */
  1285. list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
  1286. visited_list_link) {
  1287. ep_cur->visited = 0;
  1288. list_del(&ep_cur->visited_list_link);
  1289. }
  1290. return ret;
  1291. }
  1292. static void clear_tfile_check_list(void)
  1293. {
  1294. struct file *file;
  1295. /* first clear the tfile_check_list */
  1296. while (!list_empty(&tfile_check_list)) {
  1297. file = list_first_entry(&tfile_check_list, struct file,
  1298. f_tfile_llink);
  1299. list_del_init(&file->f_tfile_llink);
  1300. }
  1301. INIT_LIST_HEAD(&tfile_check_list);
  1302. }
  1303. /*
  1304. * Open an eventpoll file descriptor.
  1305. */
  1306. SYSCALL_DEFINE1(epoll_create1, int, flags)
  1307. {
  1308. int error, fd;
  1309. struct eventpoll *ep = NULL;
  1310. struct file *file;
  1311. /* Check the EPOLL_* constant for consistency. */
  1312. BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
  1313. if (flags & ~EPOLL_CLOEXEC)
  1314. return -EINVAL;
  1315. /*
  1316. * Create the internal data structure ("struct eventpoll").
  1317. */
  1318. error = ep_alloc(&ep);
  1319. if (error < 0)
  1320. return error;
  1321. /*
  1322. * Creates all the items needed to setup an eventpoll file. That is,
  1323. * a file structure and a free file descriptor.
  1324. */
  1325. fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
  1326. if (fd < 0) {
  1327. error = fd;
  1328. goto out_free_ep;
  1329. }
  1330. file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
  1331. O_RDWR | (flags & O_CLOEXEC));
  1332. if (IS_ERR(file)) {
  1333. error = PTR_ERR(file);
  1334. goto out_free_fd;
  1335. }
  1336. fd_install(fd, file);
  1337. ep->file = file;
  1338. return fd;
  1339. out_free_fd:
  1340. put_unused_fd(fd);
  1341. out_free_ep:
  1342. ep_free(ep);
  1343. return error;
  1344. }
  1345. SYSCALL_DEFINE1(epoll_create, int, size)
  1346. {
  1347. if (size <= 0)
  1348. return -EINVAL;
  1349. return sys_epoll_create1(0);
  1350. }
  1351. /*
  1352. * The following function implements the controller interface for
  1353. * the eventpoll file that enables the insertion/removal/change of
  1354. * file descriptors inside the interest set.
  1355. */
  1356. SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
  1357. struct epoll_event __user *, event)
  1358. {
  1359. int error;
  1360. int did_lock_epmutex = 0;
  1361. struct file *file, *tfile;
  1362. struct eventpoll *ep;
  1363. struct epitem *epi;
  1364. struct epoll_event epds;
  1365. error = -EFAULT;
  1366. if (ep_op_has_event(op) &&
  1367. copy_from_user(&epds, event, sizeof(struct epoll_event)))
  1368. goto error_return;
  1369. /* Get the "struct file *" for the eventpoll file */
  1370. error = -EBADF;
  1371. file = fget(epfd);
  1372. if (!file)
  1373. goto error_return;
  1374. /* Get the "struct file *" for the target file */
  1375. tfile = fget(fd);
  1376. if (!tfile)
  1377. goto error_fput;
  1378. /* The target file descriptor must support poll */
  1379. error = -EPERM;
  1380. if (!tfile->f_op || !tfile->f_op->poll)
  1381. goto error_tgt_fput;
  1382. /*
  1383. * We have to check that the file structure underneath the file descriptor
  1384. * the user passed to us _is_ an eventpoll file. And also we do not permit
  1385. * adding an epoll file descriptor inside itself.
  1386. */
  1387. error = -EINVAL;
  1388. if (file == tfile || !is_file_epoll(file))
  1389. goto error_tgt_fput;
  1390. /*
  1391. * At this point it is safe to assume that the "private_data" contains
  1392. * our own data structure.
  1393. */
  1394. ep = file->private_data;
  1395. /*
  1396. * When we insert an epoll file descriptor, inside another epoll file
  1397. * descriptor, there is the change of creating closed loops, which are
  1398. * better be handled here, than in more critical paths. While we are
  1399. * checking for loops we also determine the list of files reachable
  1400. * and hang them on the tfile_check_list, so we can check that we
  1401. * haven't created too many possible wakeup paths.
  1402. *
  1403. * We need to hold the epmutex across both ep_insert and ep_remove
  1404. * b/c we want to make sure we are looking at a coherent view of
  1405. * epoll network.
  1406. */
  1407. if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
  1408. mutex_lock(&epmutex);
  1409. did_lock_epmutex = 1;
  1410. }
  1411. if (op == EPOLL_CTL_ADD) {
  1412. if (is_file_epoll(tfile)) {
  1413. error = -ELOOP;
  1414. if (ep_loop_check(ep, tfile) != 0)
  1415. goto error_tgt_fput;
  1416. } else
  1417. list_add(&tfile->f_tfile_llink, &tfile_check_list);
  1418. }
  1419. mutex_lock_nested(&ep->mtx, 0);
  1420. /*
  1421. * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
  1422. * above, we can be sure to be able to use the item looked up by
  1423. * ep_find() till we release the mutex.
  1424. */
  1425. epi = ep_find(ep, tfile, fd);
  1426. error = -EINVAL;
  1427. switch (op) {
  1428. case EPOLL_CTL_ADD:
  1429. if (!epi) {
  1430. epds.events |= POLLERR | POLLHUP;
  1431. error = ep_insert(ep, &epds, tfile, fd);
  1432. } else
  1433. error = -EEXIST;
  1434. clear_tfile_check_list();
  1435. break;
  1436. case EPOLL_CTL_DEL:
  1437. if (epi)
  1438. error = ep_remove(ep, epi);
  1439. else
  1440. error = -ENOENT;
  1441. break;
  1442. case EPOLL_CTL_MOD:
  1443. if (epi) {
  1444. epds.events |= POLLERR | POLLHUP;
  1445. error = ep_modify(ep, epi, &epds);
  1446. } else
  1447. error = -ENOENT;
  1448. break;
  1449. }
  1450. mutex_unlock(&ep->mtx);
  1451. error_tgt_fput:
  1452. if (did_lock_epmutex)
  1453. mutex_unlock(&epmutex);
  1454. fput(tfile);
  1455. error_fput:
  1456. fput(file);
  1457. error_return:
  1458. return error;
  1459. }
  1460. /*
  1461. * Implement the event wait interface for the eventpoll file. It is the kernel
  1462. * part of the user space epoll_wait(2).
  1463. */
  1464. SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
  1465. int, maxevents, int, timeout)
  1466. {
  1467. int error;
  1468. struct file *file;
  1469. struct eventpoll *ep;
  1470. /* The maximum number of event must be greater than zero */
  1471. if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
  1472. return -EINVAL;
  1473. /* Verify that the area passed by the user is writeable */
  1474. if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
  1475. error = -EFAULT;
  1476. goto error_return;
  1477. }
  1478. /* Get the "struct file *" for the eventpoll file */
  1479. error = -EBADF;
  1480. file = fget(epfd);
  1481. if (!file)
  1482. goto error_return;
  1483. /*
  1484. * We have to check that the file structure underneath the fd
  1485. * the user passed to us _is_ an eventpoll file.
  1486. */
  1487. error = -EINVAL;
  1488. if (!is_file_epoll(file))
  1489. goto error_fput;
  1490. /*
  1491. * At this point it is safe to assume that the "private_data" contains
  1492. * our own data structure.
  1493. */
  1494. ep = file->private_data;
  1495. /* Time to fish for events ... */
  1496. error = ep_poll(ep, events, maxevents, timeout);
  1497. error_fput:
  1498. fput(file);
  1499. error_return:
  1500. return error;
  1501. }
  1502. #ifdef HAVE_SET_RESTORE_SIGMASK
  1503. /*
  1504. * Implement the event wait interface for the eventpoll file. It is the kernel
  1505. * part of the user space epoll_pwait(2).
  1506. */
  1507. SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
  1508. int, maxevents, int, timeout, const sigset_t __user *, sigmask,
  1509. size_t, sigsetsize)
  1510. {
  1511. int error;
  1512. sigset_t ksigmask, sigsaved;
  1513. /*
  1514. * If the caller wants a certain signal mask to be set during the wait,
  1515. * we apply it here.
  1516. */
  1517. if (sigmask) {
  1518. if (sigsetsize != sizeof(sigset_t))
  1519. return -EINVAL;
  1520. if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
  1521. return -EFAULT;
  1522. sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
  1523. sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
  1524. }
  1525. error = sys_epoll_wait(epfd, events, maxevents, timeout);
  1526. /*
  1527. * If we changed the signal mask, we need to restore the original one.
  1528. * In case we've got a signal while waiting, we do not restore the
  1529. * signal mask yet, and we allow do_signal() to deliver the signal on
  1530. * the way back to userspace, before the signal mask is restored.
  1531. */
  1532. if (sigmask) {
  1533. if (error == -EINTR) {
  1534. memcpy(&current->saved_sigmask, &sigsaved,
  1535. sizeof(sigsaved));
  1536. set_restore_sigmask();
  1537. } else
  1538. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  1539. }
  1540. return error;
  1541. }
  1542. #endif /* HAVE_SET_RESTORE_SIGMASK */
  1543. static int __init eventpoll_init(void)
  1544. {
  1545. struct sysinfo si;
  1546. si_meminfo(&si);
  1547. /*
  1548. * Allows top 4% of lomem to be allocated for epoll watches (per user).
  1549. */
  1550. max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
  1551. EP_ITEM_COST;
  1552. BUG_ON(max_user_watches < 0);
  1553. /*
  1554. * Initialize the structure used to perform epoll file descriptor
  1555. * inclusion loops checks.
  1556. */
  1557. ep_nested_calls_init(&poll_loop_ncalls);
  1558. /* Initialize the structure used to perform safe poll wait head wake ups */
  1559. ep_nested_calls_init(&poll_safewake_ncalls);
  1560. /* Initialize the structure used to perform file's f_op->poll() calls */
  1561. ep_nested_calls_init(&poll_readywalk_ncalls);
  1562. /* Allocates slab cache used to allocate "struct epitem" items */
  1563. epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
  1564. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  1565. /* Allocates slab cache used to allocate "struct eppoll_entry" */
  1566. pwq_cache = kmem_cache_create("eventpoll_pwq",
  1567. sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
  1568. return 0;
  1569. }
  1570. fs_initcall(eventpoll_init);