aio.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * An async IO implementation for Linux
  3. * Written by Benjamin LaHaise <bcrl@kvack.org>
  4. *
  5. * Implements an efficient asynchronous io interface.
  6. *
  7. * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
  8. *
  9. * See ../COPYING for licensing terms.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/errno.h>
  14. #include <linux/time.h>
  15. #include <linux/aio_abi.h>
  16. #include <linux/module.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/uio.h>
  19. #define DEBUG 0
  20. #include <linux/sched.h>
  21. #include <linux/fs.h>
  22. #include <linux/file.h>
  23. #include <linux/mm.h>
  24. #include <linux/mman.h>
  25. #include <linux/slab.h>
  26. #include <linux/timer.h>
  27. #include <linux/aio.h>
  28. #include <linux/highmem.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/security.h>
  31. #include <linux/eventfd.h>
  32. #include <asm/kmap_types.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/mmu_context.h>
  35. #if DEBUG > 1
  36. #define dprintk printk
  37. #else
  38. #define dprintk(x...) do { ; } while (0)
  39. #endif
  40. /*------ sysctl variables----*/
  41. static DEFINE_SPINLOCK(aio_nr_lock);
  42. unsigned long aio_nr; /* current system wide number of aio requests */
  43. unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
  44. /*----end sysctl variables---*/
  45. static struct kmem_cache *kiocb_cachep;
  46. static struct kmem_cache *kioctx_cachep;
  47. static struct workqueue_struct *aio_wq;
  48. /* Used for rare fput completion. */
  49. static void aio_fput_routine(struct work_struct *);
  50. static DECLARE_WORK(fput_work, aio_fput_routine);
  51. static DEFINE_SPINLOCK(fput_lock);
  52. static LIST_HEAD(fput_head);
  53. static void aio_kick_handler(struct work_struct *);
  54. static void aio_queue_work(struct kioctx *);
  55. /* aio_setup
  56. * Creates the slab caches used by the aio routines, panic on
  57. * failure as this is done early during the boot sequence.
  58. */
  59. static int __init aio_setup(void)
  60. {
  61. kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  62. kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  63. aio_wq = create_workqueue("aio");
  64. pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
  65. return 0;
  66. }
  67. static void aio_free_ring(struct kioctx *ctx)
  68. {
  69. struct aio_ring_info *info = &ctx->ring_info;
  70. long i;
  71. for (i=0; i<info->nr_pages; i++)
  72. put_page(info->ring_pages[i]);
  73. if (info->mmap_size) {
  74. down_write(&ctx->mm->mmap_sem);
  75. do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
  76. up_write(&ctx->mm->mmap_sem);
  77. }
  78. if (info->ring_pages && info->ring_pages != info->internal_pages)
  79. kfree(info->ring_pages);
  80. info->ring_pages = NULL;
  81. info->nr = 0;
  82. }
  83. static int aio_setup_ring(struct kioctx *ctx)
  84. {
  85. struct aio_ring *ring;
  86. struct aio_ring_info *info = &ctx->ring_info;
  87. unsigned nr_events = ctx->max_reqs;
  88. unsigned long size;
  89. int nr_pages;
  90. /* Compensate for the ring buffer's head/tail overlap entry */
  91. nr_events += 2; /* 1 is required, 2 for good luck */
  92. size = sizeof(struct aio_ring);
  93. size += sizeof(struct io_event) * nr_events;
  94. nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
  95. if (nr_pages < 0)
  96. return -EINVAL;
  97. nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
  98. info->nr = 0;
  99. info->ring_pages = info->internal_pages;
  100. if (nr_pages > AIO_RING_PAGES) {
  101. info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
  102. if (!info->ring_pages)
  103. return -ENOMEM;
  104. }
  105. info->mmap_size = nr_pages * PAGE_SIZE;
  106. dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
  107. down_write(&ctx->mm->mmap_sem);
  108. info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
  109. PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
  110. 0);
  111. if (IS_ERR((void *)info->mmap_base)) {
  112. up_write(&ctx->mm->mmap_sem);
  113. info->mmap_size = 0;
  114. aio_free_ring(ctx);
  115. return -EAGAIN;
  116. }
  117. dprintk("mmap address: 0x%08lx\n", info->mmap_base);
  118. info->nr_pages = get_user_pages(current, ctx->mm,
  119. info->mmap_base, nr_pages,
  120. 1, 0, info->ring_pages, NULL);
  121. up_write(&ctx->mm->mmap_sem);
  122. if (unlikely(info->nr_pages != nr_pages)) {
  123. aio_free_ring(ctx);
  124. return -EAGAIN;
  125. }
  126. ctx->user_id = info->mmap_base;
  127. info->nr = nr_events; /* trusted copy */
  128. ring = kmap_atomic(info->ring_pages[0], KM_USER0);
  129. ring->nr = nr_events; /* user copy */
  130. ring->id = ctx->user_id;
  131. ring->head = ring->tail = 0;
  132. ring->magic = AIO_RING_MAGIC;
  133. ring->compat_features = AIO_RING_COMPAT_FEATURES;
  134. ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
  135. ring->header_length = sizeof(struct aio_ring);
  136. kunmap_atomic(ring, KM_USER0);
  137. return 0;
  138. }
  139. /* aio_ring_event: returns a pointer to the event at the given index from
  140. * kmap_atomic(, km). Release the pointer with put_aio_ring_event();
  141. */
  142. #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
  143. #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
  144. #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
  145. #define aio_ring_event(info, nr, km) ({ \
  146. unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
  147. struct io_event *__event; \
  148. __event = kmap_atomic( \
  149. (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
  150. __event += pos % AIO_EVENTS_PER_PAGE; \
  151. __event; \
  152. })
  153. #define put_aio_ring_event(event, km) do { \
  154. struct io_event *__event = (event); \
  155. (void)__event; \
  156. kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
  157. } while(0)
  158. static void ctx_rcu_free(struct rcu_head *head)
  159. {
  160. struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
  161. unsigned nr_events = ctx->max_reqs;
  162. kmem_cache_free(kioctx_cachep, ctx);
  163. if (nr_events) {
  164. spin_lock(&aio_nr_lock);
  165. BUG_ON(aio_nr - nr_events > aio_nr);
  166. aio_nr -= nr_events;
  167. spin_unlock(&aio_nr_lock);
  168. }
  169. }
  170. /* __put_ioctx
  171. * Called when the last user of an aio context has gone away,
  172. * and the struct needs to be freed.
  173. */
  174. static void __put_ioctx(struct kioctx *ctx)
  175. {
  176. BUG_ON(ctx->reqs_active);
  177. cancel_delayed_work(&ctx->wq);
  178. cancel_work_sync(&ctx->wq.work);
  179. aio_free_ring(ctx);
  180. mmdrop(ctx->mm);
  181. ctx->mm = NULL;
  182. pr_debug("__put_ioctx: freeing %p\n", ctx);
  183. call_rcu(&ctx->rcu_head, ctx_rcu_free);
  184. }
  185. #define get_ioctx(kioctx) do { \
  186. BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  187. atomic_inc(&(kioctx)->users); \
  188. } while (0)
  189. #define put_ioctx(kioctx) do { \
  190. BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  191. if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
  192. __put_ioctx(kioctx); \
  193. } while (0)
  194. /* ioctx_alloc
  195. * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
  196. */
  197. static struct kioctx *ioctx_alloc(unsigned nr_events)
  198. {
  199. struct mm_struct *mm;
  200. struct kioctx *ctx;
  201. int did_sync = 0;
  202. /* Prevent overflows */
  203. if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
  204. (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
  205. pr_debug("ENOMEM: nr_events too high\n");
  206. return ERR_PTR(-EINVAL);
  207. }
  208. if ((unsigned long)nr_events > aio_max_nr)
  209. return ERR_PTR(-EAGAIN);
  210. ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
  211. if (!ctx)
  212. return ERR_PTR(-ENOMEM);
  213. ctx->max_reqs = nr_events;
  214. mm = ctx->mm = current->mm;
  215. atomic_inc(&mm->mm_count);
  216. atomic_set(&ctx->users, 1);
  217. spin_lock_init(&ctx->ctx_lock);
  218. spin_lock_init(&ctx->ring_info.ring_lock);
  219. init_waitqueue_head(&ctx->wait);
  220. INIT_LIST_HEAD(&ctx->active_reqs);
  221. INIT_LIST_HEAD(&ctx->run_list);
  222. INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
  223. if (aio_setup_ring(ctx) < 0)
  224. goto out_freectx;
  225. /* limit the number of system wide aios */
  226. do {
  227. spin_lock_bh(&aio_nr_lock);
  228. if (aio_nr + nr_events > aio_max_nr ||
  229. aio_nr + nr_events < aio_nr)
  230. ctx->max_reqs = 0;
  231. else
  232. aio_nr += ctx->max_reqs;
  233. spin_unlock_bh(&aio_nr_lock);
  234. if (ctx->max_reqs || did_sync)
  235. break;
  236. /* wait for rcu callbacks to have completed before giving up */
  237. synchronize_rcu();
  238. did_sync = 1;
  239. ctx->max_reqs = nr_events;
  240. } while (1);
  241. if (ctx->max_reqs == 0)
  242. goto out_cleanup;
  243. /* now link into global list. */
  244. spin_lock(&mm->ioctx_lock);
  245. hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
  246. spin_unlock(&mm->ioctx_lock);
  247. dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
  248. ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
  249. return ctx;
  250. out_cleanup:
  251. __put_ioctx(ctx);
  252. return ERR_PTR(-EAGAIN);
  253. out_freectx:
  254. mmdrop(mm);
  255. kmem_cache_free(kioctx_cachep, ctx);
  256. ctx = ERR_PTR(-ENOMEM);
  257. dprintk("aio: error allocating ioctx %p\n", ctx);
  258. return ctx;
  259. }
  260. /* aio_cancel_all
  261. * Cancels all outstanding aio requests on an aio context. Used
  262. * when the processes owning a context have all exited to encourage
  263. * the rapid destruction of the kioctx.
  264. */
  265. static void aio_cancel_all(struct kioctx *ctx)
  266. {
  267. int (*cancel)(struct kiocb *, struct io_event *);
  268. struct io_event res;
  269. spin_lock_irq(&ctx->ctx_lock);
  270. ctx->dead = 1;
  271. while (!list_empty(&ctx->active_reqs)) {
  272. struct list_head *pos = ctx->active_reqs.next;
  273. struct kiocb *iocb = list_kiocb(pos);
  274. list_del_init(&iocb->ki_list);
  275. cancel = iocb->ki_cancel;
  276. kiocbSetCancelled(iocb);
  277. if (cancel) {
  278. iocb->ki_users++;
  279. spin_unlock_irq(&ctx->ctx_lock);
  280. cancel(iocb, &res);
  281. spin_lock_irq(&ctx->ctx_lock);
  282. }
  283. }
  284. spin_unlock_irq(&ctx->ctx_lock);
  285. }
  286. static void wait_for_all_aios(struct kioctx *ctx)
  287. {
  288. struct task_struct *tsk = current;
  289. DECLARE_WAITQUEUE(wait, tsk);
  290. spin_lock_irq(&ctx->ctx_lock);
  291. if (!ctx->reqs_active)
  292. goto out;
  293. add_wait_queue(&ctx->wait, &wait);
  294. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  295. while (ctx->reqs_active) {
  296. spin_unlock_irq(&ctx->ctx_lock);
  297. io_schedule();
  298. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  299. spin_lock_irq(&ctx->ctx_lock);
  300. }
  301. __set_task_state(tsk, TASK_RUNNING);
  302. remove_wait_queue(&ctx->wait, &wait);
  303. out:
  304. spin_unlock_irq(&ctx->ctx_lock);
  305. }
  306. /* wait_on_sync_kiocb:
  307. * Waits on the given sync kiocb to complete.
  308. */
  309. ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
  310. {
  311. while (iocb->ki_users) {
  312. set_current_state(TASK_UNINTERRUPTIBLE);
  313. if (!iocb->ki_users)
  314. break;
  315. io_schedule();
  316. }
  317. __set_current_state(TASK_RUNNING);
  318. return iocb->ki_user_data;
  319. }
  320. /* exit_aio: called when the last user of mm goes away. At this point,
  321. * there is no way for any new requests to be submited or any of the
  322. * io_* syscalls to be called on the context. However, there may be
  323. * outstanding requests which hold references to the context; as they
  324. * go away, they will call put_ioctx and release any pinned memory
  325. * associated with the request (held via struct page * references).
  326. */
  327. void exit_aio(struct mm_struct *mm)
  328. {
  329. struct kioctx *ctx;
  330. while (!hlist_empty(&mm->ioctx_list)) {
  331. ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
  332. hlist_del_rcu(&ctx->list);
  333. aio_cancel_all(ctx);
  334. wait_for_all_aios(ctx);
  335. /*
  336. * Ensure we don't leave the ctx on the aio_wq
  337. */
  338. cancel_work_sync(&ctx->wq.work);
  339. if (1 != atomic_read(&ctx->users))
  340. printk(KERN_DEBUG
  341. "exit_aio:ioctx still alive: %d %d %d\n",
  342. atomic_read(&ctx->users), ctx->dead,
  343. ctx->reqs_active);
  344. put_ioctx(ctx);
  345. }
  346. }
  347. /* aio_get_req
  348. * Allocate a slot for an aio request. Increments the users count
  349. * of the kioctx so that the kioctx stays around until all requests are
  350. * complete. Returns NULL if no requests are free.
  351. *
  352. * Returns with kiocb->users set to 2. The io submit code path holds
  353. * an extra reference while submitting the i/o.
  354. * This prevents races between the aio code path referencing the
  355. * req (after submitting it) and aio_complete() freeing the req.
  356. */
  357. static struct kiocb *__aio_get_req(struct kioctx *ctx)
  358. {
  359. struct kiocb *req = NULL;
  360. struct aio_ring *ring;
  361. int okay = 0;
  362. req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
  363. if (unlikely(!req))
  364. return NULL;
  365. req->ki_flags = 0;
  366. req->ki_users = 2;
  367. req->ki_key = 0;
  368. req->ki_ctx = ctx;
  369. req->ki_cancel = NULL;
  370. req->ki_retry = NULL;
  371. req->ki_dtor = NULL;
  372. req->private = NULL;
  373. req->ki_iovec = NULL;
  374. INIT_LIST_HEAD(&req->ki_run_list);
  375. req->ki_eventfd = ERR_PTR(-EINVAL);
  376. /* Check if the completion queue has enough free space to
  377. * accept an event from this io.
  378. */
  379. spin_lock_irq(&ctx->ctx_lock);
  380. ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
  381. if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
  382. list_add(&req->ki_list, &ctx->active_reqs);
  383. ctx->reqs_active++;
  384. okay = 1;
  385. }
  386. kunmap_atomic(ring, KM_USER0);
  387. spin_unlock_irq(&ctx->ctx_lock);
  388. if (!okay) {
  389. kmem_cache_free(kiocb_cachep, req);
  390. req = NULL;
  391. }
  392. return req;
  393. }
  394. static inline struct kiocb *aio_get_req(struct kioctx *ctx)
  395. {
  396. struct kiocb *req;
  397. /* Handle a potential starvation case -- should be exceedingly rare as
  398. * requests will be stuck on fput_head only if the aio_fput_routine is
  399. * delayed and the requests were the last user of the struct file.
  400. */
  401. req = __aio_get_req(ctx);
  402. if (unlikely(NULL == req)) {
  403. aio_fput_routine(NULL);
  404. req = __aio_get_req(ctx);
  405. }
  406. return req;
  407. }
  408. static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
  409. {
  410. assert_spin_locked(&ctx->ctx_lock);
  411. if (!IS_ERR(req->ki_eventfd))
  412. fput(req->ki_eventfd);
  413. if (req->ki_dtor)
  414. req->ki_dtor(req);
  415. if (req->ki_iovec != &req->ki_inline_vec)
  416. kfree(req->ki_iovec);
  417. kmem_cache_free(kiocb_cachep, req);
  418. ctx->reqs_active--;
  419. if (unlikely(!ctx->reqs_active && ctx->dead))
  420. wake_up(&ctx->wait);
  421. }
  422. static void aio_fput_routine(struct work_struct *data)
  423. {
  424. spin_lock_irq(&fput_lock);
  425. while (likely(!list_empty(&fput_head))) {
  426. struct kiocb *req = list_kiocb(fput_head.next);
  427. struct kioctx *ctx = req->ki_ctx;
  428. list_del(&req->ki_list);
  429. spin_unlock_irq(&fput_lock);
  430. /* Complete the fput */
  431. __fput(req->ki_filp);
  432. /* Link the iocb into the context's free list */
  433. spin_lock_irq(&ctx->ctx_lock);
  434. really_put_req(ctx, req);
  435. spin_unlock_irq(&ctx->ctx_lock);
  436. put_ioctx(ctx);
  437. spin_lock_irq(&fput_lock);
  438. }
  439. spin_unlock_irq(&fput_lock);
  440. }
  441. /* __aio_put_req
  442. * Returns true if this put was the last user of the request.
  443. */
  444. static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
  445. {
  446. dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
  447. req, atomic_long_read(&req->ki_filp->f_count));
  448. assert_spin_locked(&ctx->ctx_lock);
  449. req->ki_users --;
  450. BUG_ON(req->ki_users < 0);
  451. if (likely(req->ki_users))
  452. return 0;
  453. list_del(&req->ki_list); /* remove from active_reqs */
  454. req->ki_cancel = NULL;
  455. req->ki_retry = NULL;
  456. /* Must be done under the lock to serialise against cancellation.
  457. * Call this aio_fput as it duplicates fput via the fput_work.
  458. */
  459. if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
  460. get_ioctx(ctx);
  461. spin_lock(&fput_lock);
  462. list_add(&req->ki_list, &fput_head);
  463. spin_unlock(&fput_lock);
  464. queue_work(aio_wq, &fput_work);
  465. } else
  466. really_put_req(ctx, req);
  467. return 1;
  468. }
  469. /* aio_put_req
  470. * Returns true if this put was the last user of the kiocb,
  471. * false if the request is still in use.
  472. */
  473. int aio_put_req(struct kiocb *req)
  474. {
  475. struct kioctx *ctx = req->ki_ctx;
  476. int ret;
  477. spin_lock_irq(&ctx->ctx_lock);
  478. ret = __aio_put_req(ctx, req);
  479. spin_unlock_irq(&ctx->ctx_lock);
  480. return ret;
  481. }
  482. static struct kioctx *lookup_ioctx(unsigned long ctx_id)
  483. {
  484. struct mm_struct *mm = current->mm;
  485. struct kioctx *ctx = NULL;
  486. struct hlist_node *n;
  487. rcu_read_lock();
  488. hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
  489. if (ctx->user_id == ctx_id && !ctx->dead) {
  490. get_ioctx(ctx);
  491. break;
  492. }
  493. }
  494. rcu_read_unlock();
  495. return ctx;
  496. }
  497. /*
  498. * use_mm
  499. * Makes the calling kernel thread take on the specified
  500. * mm context.
  501. * Called by the retry thread execute retries within the
  502. * iocb issuer's mm context, so that copy_from/to_user
  503. * operations work seamlessly for aio.
  504. * (Note: this routine is intended to be called only
  505. * from a kernel thread context)
  506. */
  507. static void use_mm(struct mm_struct *mm)
  508. {
  509. struct mm_struct *active_mm;
  510. struct task_struct *tsk = current;
  511. task_lock(tsk);
  512. active_mm = tsk->active_mm;
  513. atomic_inc(&mm->mm_count);
  514. tsk->mm = mm;
  515. tsk->active_mm = mm;
  516. switch_mm(active_mm, mm, tsk);
  517. task_unlock(tsk);
  518. mmdrop(active_mm);
  519. }
  520. /*
  521. * unuse_mm
  522. * Reverses the effect of use_mm, i.e. releases the
  523. * specified mm context which was earlier taken on
  524. * by the calling kernel thread
  525. * (Note: this routine is intended to be called only
  526. * from a kernel thread context)
  527. */
  528. static void unuse_mm(struct mm_struct *mm)
  529. {
  530. struct task_struct *tsk = current;
  531. task_lock(tsk);
  532. tsk->mm = NULL;
  533. /* active_mm is still 'mm' */
  534. enter_lazy_tlb(mm, tsk);
  535. task_unlock(tsk);
  536. }
  537. /*
  538. * Queue up a kiocb to be retried. Assumes that the kiocb
  539. * has already been marked as kicked, and places it on
  540. * the retry run list for the corresponding ioctx, if it
  541. * isn't already queued. Returns 1 if it actually queued
  542. * the kiocb (to tell the caller to activate the work
  543. * queue to process it), or 0, if it found that it was
  544. * already queued.
  545. */
  546. static inline int __queue_kicked_iocb(struct kiocb *iocb)
  547. {
  548. struct kioctx *ctx = iocb->ki_ctx;
  549. assert_spin_locked(&ctx->ctx_lock);
  550. if (list_empty(&iocb->ki_run_list)) {
  551. list_add_tail(&iocb->ki_run_list,
  552. &ctx->run_list);
  553. return 1;
  554. }
  555. return 0;
  556. }
  557. /* aio_run_iocb
  558. * This is the core aio execution routine. It is
  559. * invoked both for initial i/o submission and
  560. * subsequent retries via the aio_kick_handler.
  561. * Expects to be invoked with iocb->ki_ctx->lock
  562. * already held. The lock is released and reacquired
  563. * as needed during processing.
  564. *
  565. * Calls the iocb retry method (already setup for the
  566. * iocb on initial submission) for operation specific
  567. * handling, but takes care of most of common retry
  568. * execution details for a given iocb. The retry method
  569. * needs to be non-blocking as far as possible, to avoid
  570. * holding up other iocbs waiting to be serviced by the
  571. * retry kernel thread.
  572. *
  573. * The trickier parts in this code have to do with
  574. * ensuring that only one retry instance is in progress
  575. * for a given iocb at any time. Providing that guarantee
  576. * simplifies the coding of individual aio operations as
  577. * it avoids various potential races.
  578. */
  579. static ssize_t aio_run_iocb(struct kiocb *iocb)
  580. {
  581. struct kioctx *ctx = iocb->ki_ctx;
  582. ssize_t (*retry)(struct kiocb *);
  583. ssize_t ret;
  584. if (!(retry = iocb->ki_retry)) {
  585. printk("aio_run_iocb: iocb->ki_retry = NULL\n");
  586. return 0;
  587. }
  588. /*
  589. * We don't want the next retry iteration for this
  590. * operation to start until this one has returned and
  591. * updated the iocb state. However, wait_queue functions
  592. * can trigger a kick_iocb from interrupt context in the
  593. * meantime, indicating that data is available for the next
  594. * iteration. We want to remember that and enable the
  595. * next retry iteration _after_ we are through with
  596. * this one.
  597. *
  598. * So, in order to be able to register a "kick", but
  599. * prevent it from being queued now, we clear the kick
  600. * flag, but make the kick code *think* that the iocb is
  601. * still on the run list until we are actually done.
  602. * When we are done with this iteration, we check if
  603. * the iocb was kicked in the meantime and if so, queue
  604. * it up afresh.
  605. */
  606. kiocbClearKicked(iocb);
  607. /*
  608. * This is so that aio_complete knows it doesn't need to
  609. * pull the iocb off the run list (We can't just call
  610. * INIT_LIST_HEAD because we don't want a kick_iocb to
  611. * queue this on the run list yet)
  612. */
  613. iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
  614. spin_unlock_irq(&ctx->ctx_lock);
  615. /* Quit retrying if the i/o has been cancelled */
  616. if (kiocbIsCancelled(iocb)) {
  617. ret = -EINTR;
  618. aio_complete(iocb, ret, 0);
  619. /* must not access the iocb after this */
  620. goto out;
  621. }
  622. /*
  623. * Now we are all set to call the retry method in async
  624. * context.
  625. */
  626. ret = retry(iocb);
  627. if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
  628. BUG_ON(!list_empty(&iocb->ki_wait.task_list));
  629. aio_complete(iocb, ret, 0);
  630. }
  631. out:
  632. spin_lock_irq(&ctx->ctx_lock);
  633. if (-EIOCBRETRY == ret) {
  634. /*
  635. * OK, now that we are done with this iteration
  636. * and know that there is more left to go,
  637. * this is where we let go so that a subsequent
  638. * "kick" can start the next iteration
  639. */
  640. /* will make __queue_kicked_iocb succeed from here on */
  641. INIT_LIST_HEAD(&iocb->ki_run_list);
  642. /* we must queue the next iteration ourselves, if it
  643. * has already been kicked */
  644. if (kiocbIsKicked(iocb)) {
  645. __queue_kicked_iocb(iocb);
  646. /*
  647. * __queue_kicked_iocb will always return 1 here, because
  648. * iocb->ki_run_list is empty at this point so it should
  649. * be safe to unconditionally queue the context into the
  650. * work queue.
  651. */
  652. aio_queue_work(ctx);
  653. }
  654. }
  655. return ret;
  656. }
  657. /*
  658. * __aio_run_iocbs:
  659. * Process all pending retries queued on the ioctx
  660. * run list.
  661. * Assumes it is operating within the aio issuer's mm
  662. * context.
  663. */
  664. static int __aio_run_iocbs(struct kioctx *ctx)
  665. {
  666. struct kiocb *iocb;
  667. struct list_head run_list;
  668. assert_spin_locked(&ctx->ctx_lock);
  669. list_replace_init(&ctx->run_list, &run_list);
  670. while (!list_empty(&run_list)) {
  671. iocb = list_entry(run_list.next, struct kiocb,
  672. ki_run_list);
  673. list_del(&iocb->ki_run_list);
  674. /*
  675. * Hold an extra reference while retrying i/o.
  676. */
  677. iocb->ki_users++; /* grab extra reference */
  678. aio_run_iocb(iocb);
  679. __aio_put_req(ctx, iocb);
  680. }
  681. if (!list_empty(&ctx->run_list))
  682. return 1;
  683. return 0;
  684. }
  685. static void aio_queue_work(struct kioctx * ctx)
  686. {
  687. unsigned long timeout;
  688. /*
  689. * if someone is waiting, get the work started right
  690. * away, otherwise, use a longer delay
  691. */
  692. smp_mb();
  693. if (waitqueue_active(&ctx->wait))
  694. timeout = 1;
  695. else
  696. timeout = HZ/10;
  697. queue_delayed_work(aio_wq, &ctx->wq, timeout);
  698. }
  699. /*
  700. * aio_run_iocbs:
  701. * Process all pending retries queued on the ioctx
  702. * run list.
  703. * Assumes it is operating within the aio issuer's mm
  704. * context.
  705. */
  706. static inline void aio_run_iocbs(struct kioctx *ctx)
  707. {
  708. int requeue;
  709. spin_lock_irq(&ctx->ctx_lock);
  710. requeue = __aio_run_iocbs(ctx);
  711. spin_unlock_irq(&ctx->ctx_lock);
  712. if (requeue)
  713. aio_queue_work(ctx);
  714. }
  715. /*
  716. * just like aio_run_iocbs, but keeps running them until
  717. * the list stays empty
  718. */
  719. static inline void aio_run_all_iocbs(struct kioctx *ctx)
  720. {
  721. spin_lock_irq(&ctx->ctx_lock);
  722. while (__aio_run_iocbs(ctx))
  723. ;
  724. spin_unlock_irq(&ctx->ctx_lock);
  725. }
  726. /*
  727. * aio_kick_handler:
  728. * Work queue handler triggered to process pending
  729. * retries on an ioctx. Takes on the aio issuer's
  730. * mm context before running the iocbs, so that
  731. * copy_xxx_user operates on the issuer's address
  732. * space.
  733. * Run on aiod's context.
  734. */
  735. static void aio_kick_handler(struct work_struct *work)
  736. {
  737. struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
  738. mm_segment_t oldfs = get_fs();
  739. struct mm_struct *mm;
  740. int requeue;
  741. set_fs(USER_DS);
  742. use_mm(ctx->mm);
  743. spin_lock_irq(&ctx->ctx_lock);
  744. requeue =__aio_run_iocbs(ctx);
  745. mm = ctx->mm;
  746. spin_unlock_irq(&ctx->ctx_lock);
  747. unuse_mm(mm);
  748. set_fs(oldfs);
  749. /*
  750. * we're in a worker thread already, don't use queue_delayed_work,
  751. */
  752. if (requeue)
  753. queue_delayed_work(aio_wq, &ctx->wq, 0);
  754. }
  755. /*
  756. * Called by kick_iocb to queue the kiocb for retry
  757. * and if required activate the aio work queue to process
  758. * it
  759. */
  760. static void try_queue_kicked_iocb(struct kiocb *iocb)
  761. {
  762. struct kioctx *ctx = iocb->ki_ctx;
  763. unsigned long flags;
  764. int run = 0;
  765. /* We're supposed to be the only path putting the iocb back on the run
  766. * list. If we find that the iocb is *back* on a wait queue already
  767. * than retry has happened before we could queue the iocb. This also
  768. * means that the retry could have completed and freed our iocb, no
  769. * good. */
  770. BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
  771. spin_lock_irqsave(&ctx->ctx_lock, flags);
  772. /* set this inside the lock so that we can't race with aio_run_iocb()
  773. * testing it and putting the iocb on the run list under the lock */
  774. if (!kiocbTryKick(iocb))
  775. run = __queue_kicked_iocb(iocb);
  776. spin_unlock_irqrestore(&ctx->ctx_lock, flags);
  777. if (run)
  778. aio_queue_work(ctx);
  779. }
  780. /*
  781. * kick_iocb:
  782. * Called typically from a wait queue callback context
  783. * (aio_wake_function) to trigger a retry of the iocb.
  784. * The retry is usually executed by aio workqueue
  785. * threads (See aio_kick_handler).
  786. */
  787. void kick_iocb(struct kiocb *iocb)
  788. {
  789. /* sync iocbs are easy: they can only ever be executing from a
  790. * single context. */
  791. if (is_sync_kiocb(iocb)) {
  792. kiocbSetKicked(iocb);
  793. wake_up_process(iocb->ki_obj.tsk);
  794. return;
  795. }
  796. try_queue_kicked_iocb(iocb);
  797. }
  798. EXPORT_SYMBOL(kick_iocb);
  799. /* aio_complete
  800. * Called when the io request on the given iocb is complete.
  801. * Returns true if this is the last user of the request. The
  802. * only other user of the request can be the cancellation code.
  803. */
  804. int aio_complete(struct kiocb *iocb, long res, long res2)
  805. {
  806. struct kioctx *ctx = iocb->ki_ctx;
  807. struct aio_ring_info *info;
  808. struct aio_ring *ring;
  809. struct io_event *event;
  810. unsigned long flags;
  811. unsigned long tail;
  812. int ret;
  813. /*
  814. * Special case handling for sync iocbs:
  815. * - events go directly into the iocb for fast handling
  816. * - the sync task with the iocb in its stack holds the single iocb
  817. * ref, no other paths have a way to get another ref
  818. * - the sync task helpfully left a reference to itself in the iocb
  819. */
  820. if (is_sync_kiocb(iocb)) {
  821. BUG_ON(iocb->ki_users != 1);
  822. iocb->ki_user_data = res;
  823. iocb->ki_users = 0;
  824. wake_up_process(iocb->ki_obj.tsk);
  825. return 1;
  826. }
  827. info = &ctx->ring_info;
  828. /* add a completion event to the ring buffer.
  829. * must be done holding ctx->ctx_lock to prevent
  830. * other code from messing with the tail
  831. * pointer since we might be called from irq
  832. * context.
  833. */
  834. spin_lock_irqsave(&ctx->ctx_lock, flags);
  835. if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
  836. list_del_init(&iocb->ki_run_list);
  837. /*
  838. * cancelled requests don't get events, userland was given one
  839. * when the event got cancelled.
  840. */
  841. if (kiocbIsCancelled(iocb))
  842. goto put_rq;
  843. ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
  844. tail = info->tail;
  845. event = aio_ring_event(info, tail, KM_IRQ0);
  846. if (++tail >= info->nr)
  847. tail = 0;
  848. event->obj = (u64)(unsigned long)iocb->ki_obj.user;
  849. event->data = iocb->ki_user_data;
  850. event->res = res;
  851. event->res2 = res2;
  852. dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
  853. ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
  854. res, res2);
  855. /* after flagging the request as done, we
  856. * must never even look at it again
  857. */
  858. smp_wmb(); /* make event visible before updating tail */
  859. info->tail = tail;
  860. ring->tail = tail;
  861. put_aio_ring_event(event, KM_IRQ0);
  862. kunmap_atomic(ring, KM_IRQ1);
  863. pr_debug("added to ring %p at [%lu]\n", iocb, tail);
  864. /*
  865. * Check if the user asked us to deliver the result through an
  866. * eventfd. The eventfd_signal() function is safe to be called
  867. * from IRQ context.
  868. */
  869. if (!IS_ERR(iocb->ki_eventfd))
  870. eventfd_signal(iocb->ki_eventfd, 1);
  871. put_rq:
  872. /* everything turned out well, dispose of the aiocb. */
  873. ret = __aio_put_req(ctx, iocb);
  874. /*
  875. * We have to order our ring_info tail store above and test
  876. * of the wait list below outside the wait lock. This is
  877. * like in wake_up_bit() where clearing a bit has to be
  878. * ordered with the unlocked test.
  879. */
  880. smp_mb();
  881. if (waitqueue_active(&ctx->wait))
  882. wake_up(&ctx->wait);
  883. spin_unlock_irqrestore(&ctx->ctx_lock, flags);
  884. return ret;
  885. }
  886. /* aio_read_evt
  887. * Pull an event off of the ioctx's event ring. Returns the number of
  888. * events fetched (0 or 1 ;-)
  889. * FIXME: make this use cmpxchg.
  890. * TODO: make the ringbuffer user mmap()able (requires FIXME).
  891. */
  892. static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
  893. {
  894. struct aio_ring_info *info = &ioctx->ring_info;
  895. struct aio_ring *ring;
  896. unsigned long head;
  897. int ret = 0;
  898. ring = kmap_atomic(info->ring_pages[0], KM_USER0);
  899. dprintk("in aio_read_evt h%lu t%lu m%lu\n",
  900. (unsigned long)ring->head, (unsigned long)ring->tail,
  901. (unsigned long)ring->nr);
  902. if (ring->head == ring->tail)
  903. goto out;
  904. spin_lock(&info->ring_lock);
  905. head = ring->head % info->nr;
  906. if (head != ring->tail) {
  907. struct io_event *evp = aio_ring_event(info, head, KM_USER1);
  908. *ent = *evp;
  909. head = (head + 1) % info->nr;
  910. smp_mb(); /* finish reading the event before updatng the head */
  911. ring->head = head;
  912. ret = 1;
  913. put_aio_ring_event(evp, KM_USER1);
  914. }
  915. spin_unlock(&info->ring_lock);
  916. out:
  917. kunmap_atomic(ring, KM_USER0);
  918. dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
  919. (unsigned long)ring->head, (unsigned long)ring->tail);
  920. return ret;
  921. }
  922. struct aio_timeout {
  923. struct timer_list timer;
  924. int timed_out;
  925. struct task_struct *p;
  926. };
  927. static void timeout_func(unsigned long data)
  928. {
  929. struct aio_timeout *to = (struct aio_timeout *)data;
  930. to->timed_out = 1;
  931. wake_up_process(to->p);
  932. }
  933. static inline void init_timeout(struct aio_timeout *to)
  934. {
  935. setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
  936. to->timed_out = 0;
  937. to->p = current;
  938. }
  939. static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
  940. const struct timespec *ts)
  941. {
  942. to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
  943. if (time_after(to->timer.expires, jiffies))
  944. add_timer(&to->timer);
  945. else
  946. to->timed_out = 1;
  947. }
  948. static inline void clear_timeout(struct aio_timeout *to)
  949. {
  950. del_singleshot_timer_sync(&to->timer);
  951. }
  952. static int read_events(struct kioctx *ctx,
  953. long min_nr, long nr,
  954. struct io_event __user *event,
  955. struct timespec __user *timeout)
  956. {
  957. long start_jiffies = jiffies;
  958. struct task_struct *tsk = current;
  959. DECLARE_WAITQUEUE(wait, tsk);
  960. int ret;
  961. int i = 0;
  962. struct io_event ent;
  963. struct aio_timeout to;
  964. int retry = 0;
  965. /* needed to zero any padding within an entry (there shouldn't be
  966. * any, but C is fun!
  967. */
  968. memset(&ent, 0, sizeof(ent));
  969. retry:
  970. ret = 0;
  971. while (likely(i < nr)) {
  972. ret = aio_read_evt(ctx, &ent);
  973. if (unlikely(ret <= 0))
  974. break;
  975. dprintk("read event: %Lx %Lx %Lx %Lx\n",
  976. ent.data, ent.obj, ent.res, ent.res2);
  977. /* Could we split the check in two? */
  978. ret = -EFAULT;
  979. if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
  980. dprintk("aio: lost an event due to EFAULT.\n");
  981. break;
  982. }
  983. ret = 0;
  984. /* Good, event copied to userland, update counts. */
  985. event ++;
  986. i ++;
  987. }
  988. if (min_nr <= i)
  989. return i;
  990. if (ret)
  991. return ret;
  992. /* End fast path */
  993. /* racey check, but it gets redone */
  994. if (!retry && unlikely(!list_empty(&ctx->run_list))) {
  995. retry = 1;
  996. aio_run_all_iocbs(ctx);
  997. goto retry;
  998. }
  999. init_timeout(&to);
  1000. if (timeout) {
  1001. struct timespec ts;
  1002. ret = -EFAULT;
  1003. if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
  1004. goto out;
  1005. set_timeout(start_jiffies, &to, &ts);
  1006. }
  1007. while (likely(i < nr)) {
  1008. add_wait_queue_exclusive(&ctx->wait, &wait);
  1009. do {
  1010. set_task_state(tsk, TASK_INTERRUPTIBLE);
  1011. ret = aio_read_evt(ctx, &ent);
  1012. if (ret)
  1013. break;
  1014. if (min_nr <= i)
  1015. break;
  1016. if (unlikely(ctx->dead)) {
  1017. ret = -EINVAL;
  1018. break;
  1019. }
  1020. if (to.timed_out) /* Only check after read evt */
  1021. break;
  1022. /* Try to only show up in io wait if there are ops
  1023. * in flight */
  1024. if (ctx->reqs_active)
  1025. io_schedule();
  1026. else
  1027. schedule();
  1028. if (signal_pending(tsk)) {
  1029. ret = -EINTR;
  1030. break;
  1031. }
  1032. /*ret = aio_read_evt(ctx, &ent);*/
  1033. } while (1) ;
  1034. set_task_state(tsk, TASK_RUNNING);
  1035. remove_wait_queue(&ctx->wait, &wait);
  1036. if (unlikely(ret <= 0))
  1037. break;
  1038. ret = -EFAULT;
  1039. if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
  1040. dprintk("aio: lost an event due to EFAULT.\n");
  1041. break;
  1042. }
  1043. /* Good, event copied to userland, update counts. */
  1044. event ++;
  1045. i ++;
  1046. }
  1047. if (timeout)
  1048. clear_timeout(&to);
  1049. out:
  1050. destroy_timer_on_stack(&to.timer);
  1051. return i ? i : ret;
  1052. }
  1053. /* Take an ioctx and remove it from the list of ioctx's. Protects
  1054. * against races with itself via ->dead.
  1055. */
  1056. static void io_destroy(struct kioctx *ioctx)
  1057. {
  1058. struct mm_struct *mm = current->mm;
  1059. int was_dead;
  1060. /* delete the entry from the list is someone else hasn't already */
  1061. spin_lock(&mm->ioctx_lock);
  1062. was_dead = ioctx->dead;
  1063. ioctx->dead = 1;
  1064. hlist_del_rcu(&ioctx->list);
  1065. spin_unlock(&mm->ioctx_lock);
  1066. dprintk("aio_release(%p)\n", ioctx);
  1067. if (likely(!was_dead))
  1068. put_ioctx(ioctx); /* twice for the list */
  1069. aio_cancel_all(ioctx);
  1070. wait_for_all_aios(ioctx);
  1071. /*
  1072. * Wake up any waiters. The setting of ctx->dead must be seen
  1073. * by other CPUs at this point. Right now, we rely on the
  1074. * locking done by the above calls to ensure this consistency.
  1075. */
  1076. wake_up(&ioctx->wait);
  1077. put_ioctx(ioctx); /* once for the lookup */
  1078. }
  1079. /* sys_io_setup:
  1080. * Create an aio_context capable of receiving at least nr_events.
  1081. * ctxp must not point to an aio_context that already exists, and
  1082. * must be initialized to 0 prior to the call. On successful
  1083. * creation of the aio_context, *ctxp is filled in with the resulting
  1084. * handle. May fail with -EINVAL if *ctxp is not initialized,
  1085. * if the specified nr_events exceeds internal limits. May fail
  1086. * with -EAGAIN if the specified nr_events exceeds the user's limit
  1087. * of available events. May fail with -ENOMEM if insufficient kernel
  1088. * resources are available. May fail with -EFAULT if an invalid
  1089. * pointer is passed for ctxp. Will fail with -ENOSYS if not
  1090. * implemented.
  1091. */
  1092. asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
  1093. {
  1094. struct kioctx *ioctx = NULL;
  1095. unsigned long ctx;
  1096. long ret;
  1097. ret = get_user(ctx, ctxp);
  1098. if (unlikely(ret))
  1099. goto out;
  1100. ret = -EINVAL;
  1101. if (unlikely(ctx || nr_events == 0)) {
  1102. pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
  1103. ctx, nr_events);
  1104. goto out;
  1105. }
  1106. ioctx = ioctx_alloc(nr_events);
  1107. ret = PTR_ERR(ioctx);
  1108. if (!IS_ERR(ioctx)) {
  1109. ret = put_user(ioctx->user_id, ctxp);
  1110. if (!ret)
  1111. return 0;
  1112. get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
  1113. io_destroy(ioctx);
  1114. }
  1115. out:
  1116. return ret;
  1117. }
  1118. /* sys_io_destroy:
  1119. * Destroy the aio_context specified. May cancel any outstanding
  1120. * AIOs and block on completion. Will fail with -ENOSYS if not
  1121. * implemented. May fail with -EFAULT if the context pointed to
  1122. * is invalid.
  1123. */
  1124. asmlinkage long sys_io_destroy(aio_context_t ctx)
  1125. {
  1126. struct kioctx *ioctx = lookup_ioctx(ctx);
  1127. if (likely(NULL != ioctx)) {
  1128. io_destroy(ioctx);
  1129. return 0;
  1130. }
  1131. pr_debug("EINVAL: io_destroy: invalid context id\n");
  1132. return -EINVAL;
  1133. }
  1134. static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
  1135. {
  1136. struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
  1137. BUG_ON(ret <= 0);
  1138. while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
  1139. ssize_t this = min((ssize_t)iov->iov_len, ret);
  1140. iov->iov_base += this;
  1141. iov->iov_len -= this;
  1142. iocb->ki_left -= this;
  1143. ret -= this;
  1144. if (iov->iov_len == 0) {
  1145. iocb->ki_cur_seg++;
  1146. iov++;
  1147. }
  1148. }
  1149. /* the caller should not have done more io than what fit in
  1150. * the remaining iovecs */
  1151. BUG_ON(ret > 0 && iocb->ki_left == 0);
  1152. }
  1153. static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
  1154. {
  1155. struct file *file = iocb->ki_filp;
  1156. struct address_space *mapping = file->f_mapping;
  1157. struct inode *inode = mapping->host;
  1158. ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
  1159. unsigned long, loff_t);
  1160. ssize_t ret = 0;
  1161. unsigned short opcode;
  1162. if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
  1163. (iocb->ki_opcode == IOCB_CMD_PREAD)) {
  1164. rw_op = file->f_op->aio_read;
  1165. opcode = IOCB_CMD_PREADV;
  1166. } else {
  1167. rw_op = file->f_op->aio_write;
  1168. opcode = IOCB_CMD_PWRITEV;
  1169. }
  1170. /* This matches the pread()/pwrite() logic */
  1171. if (iocb->ki_pos < 0)
  1172. return -EINVAL;
  1173. do {
  1174. ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
  1175. iocb->ki_nr_segs - iocb->ki_cur_seg,
  1176. iocb->ki_pos);
  1177. if (ret > 0)
  1178. aio_advance_iovec(iocb, ret);
  1179. /* retry all partial writes. retry partial reads as long as its a
  1180. * regular file. */
  1181. } while (ret > 0 && iocb->ki_left > 0 &&
  1182. (opcode == IOCB_CMD_PWRITEV ||
  1183. (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
  1184. /* This means we must have transferred all that we could */
  1185. /* No need to retry anymore */
  1186. if ((ret == 0) || (iocb->ki_left == 0))
  1187. ret = iocb->ki_nbytes - iocb->ki_left;
  1188. /* If we managed to write some out we return that, rather than
  1189. * the eventual error. */
  1190. if (opcode == IOCB_CMD_PWRITEV
  1191. && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
  1192. && iocb->ki_nbytes - iocb->ki_left)
  1193. ret = iocb->ki_nbytes - iocb->ki_left;
  1194. return ret;
  1195. }
  1196. static ssize_t aio_fdsync(struct kiocb *iocb)
  1197. {
  1198. struct file *file = iocb->ki_filp;
  1199. ssize_t ret = -EINVAL;
  1200. if (file->f_op->aio_fsync)
  1201. ret = file->f_op->aio_fsync(iocb, 1);
  1202. return ret;
  1203. }
  1204. static ssize_t aio_fsync(struct kiocb *iocb)
  1205. {
  1206. struct file *file = iocb->ki_filp;
  1207. ssize_t ret = -EINVAL;
  1208. if (file->f_op->aio_fsync)
  1209. ret = file->f_op->aio_fsync(iocb, 0);
  1210. return ret;
  1211. }
  1212. static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
  1213. {
  1214. ssize_t ret;
  1215. ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
  1216. kiocb->ki_nbytes, 1,
  1217. &kiocb->ki_inline_vec, &kiocb->ki_iovec);
  1218. if (ret < 0)
  1219. goto out;
  1220. kiocb->ki_nr_segs = kiocb->ki_nbytes;
  1221. kiocb->ki_cur_seg = 0;
  1222. /* ki_nbytes/left now reflect bytes instead of segs */
  1223. kiocb->ki_nbytes = ret;
  1224. kiocb->ki_left = ret;
  1225. ret = 0;
  1226. out:
  1227. return ret;
  1228. }
  1229. static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
  1230. {
  1231. kiocb->ki_iovec = &kiocb->ki_inline_vec;
  1232. kiocb->ki_iovec->iov_base = kiocb->ki_buf;
  1233. kiocb->ki_iovec->iov_len = kiocb->ki_left;
  1234. kiocb->ki_nr_segs = 1;
  1235. kiocb->ki_cur_seg = 0;
  1236. return 0;
  1237. }
  1238. /*
  1239. * aio_setup_iocb:
  1240. * Performs the initial checks and aio retry method
  1241. * setup for the kiocb at the time of io submission.
  1242. */
  1243. static ssize_t aio_setup_iocb(struct kiocb *kiocb)
  1244. {
  1245. struct file *file = kiocb->ki_filp;
  1246. ssize_t ret = 0;
  1247. switch (kiocb->ki_opcode) {
  1248. case IOCB_CMD_PREAD:
  1249. ret = -EBADF;
  1250. if (unlikely(!(file->f_mode & FMODE_READ)))
  1251. break;
  1252. ret = -EFAULT;
  1253. if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
  1254. kiocb->ki_left)))
  1255. break;
  1256. ret = security_file_permission(file, MAY_READ);
  1257. if (unlikely(ret))
  1258. break;
  1259. ret = aio_setup_single_vector(kiocb);
  1260. if (ret)
  1261. break;
  1262. ret = -EINVAL;
  1263. if (file->f_op->aio_read)
  1264. kiocb->ki_retry = aio_rw_vect_retry;
  1265. break;
  1266. case IOCB_CMD_PWRITE:
  1267. ret = -EBADF;
  1268. if (unlikely(!(file->f_mode & FMODE_WRITE)))
  1269. break;
  1270. ret = -EFAULT;
  1271. if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
  1272. kiocb->ki_left)))
  1273. break;
  1274. ret = security_file_permission(file, MAY_WRITE);
  1275. if (unlikely(ret))
  1276. break;
  1277. ret = aio_setup_single_vector(kiocb);
  1278. if (ret)
  1279. break;
  1280. ret = -EINVAL;
  1281. if (file->f_op->aio_write)
  1282. kiocb->ki_retry = aio_rw_vect_retry;
  1283. break;
  1284. case IOCB_CMD_PREADV:
  1285. ret = -EBADF;
  1286. if (unlikely(!(file->f_mode & FMODE_READ)))
  1287. break;
  1288. ret = security_file_permission(file, MAY_READ);
  1289. if (unlikely(ret))
  1290. break;
  1291. ret = aio_setup_vectored_rw(READ, kiocb);
  1292. if (ret)
  1293. break;
  1294. ret = -EINVAL;
  1295. if (file->f_op->aio_read)
  1296. kiocb->ki_retry = aio_rw_vect_retry;
  1297. break;
  1298. case IOCB_CMD_PWRITEV:
  1299. ret = -EBADF;
  1300. if (unlikely(!(file->f_mode & FMODE_WRITE)))
  1301. break;
  1302. ret = security_file_permission(file, MAY_WRITE);
  1303. if (unlikely(ret))
  1304. break;
  1305. ret = aio_setup_vectored_rw(WRITE, kiocb);
  1306. if (ret)
  1307. break;
  1308. ret = -EINVAL;
  1309. if (file->f_op->aio_write)
  1310. kiocb->ki_retry = aio_rw_vect_retry;
  1311. break;
  1312. case IOCB_CMD_FDSYNC:
  1313. ret = -EINVAL;
  1314. if (file->f_op->aio_fsync)
  1315. kiocb->ki_retry = aio_fdsync;
  1316. break;
  1317. case IOCB_CMD_FSYNC:
  1318. ret = -EINVAL;
  1319. if (file->f_op->aio_fsync)
  1320. kiocb->ki_retry = aio_fsync;
  1321. break;
  1322. default:
  1323. dprintk("EINVAL: io_submit: no operation provided\n");
  1324. ret = -EINVAL;
  1325. }
  1326. if (!kiocb->ki_retry)
  1327. return ret;
  1328. return 0;
  1329. }
  1330. /*
  1331. * aio_wake_function:
  1332. * wait queue callback function for aio notification,
  1333. * Simply triggers a retry of the operation via kick_iocb.
  1334. *
  1335. * This callback is specified in the wait queue entry in
  1336. * a kiocb.
  1337. *
  1338. * Note:
  1339. * This routine is executed with the wait queue lock held.
  1340. * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
  1341. * the ioctx lock inside the wait queue lock. This is safe
  1342. * because this callback isn't used for wait queues which
  1343. * are nested inside ioctx lock (i.e. ctx->wait)
  1344. */
  1345. static int aio_wake_function(wait_queue_t *wait, unsigned mode,
  1346. int sync, void *key)
  1347. {
  1348. struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
  1349. list_del_init(&wait->task_list);
  1350. kick_iocb(iocb);
  1351. return 1;
  1352. }
  1353. static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
  1354. struct iocb *iocb)
  1355. {
  1356. struct kiocb *req;
  1357. struct file *file;
  1358. ssize_t ret;
  1359. /* enforce forwards compatibility on users */
  1360. if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
  1361. pr_debug("EINVAL: io_submit: reserve field set\n");
  1362. return -EINVAL;
  1363. }
  1364. /* prevent overflows */
  1365. if (unlikely(
  1366. (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
  1367. (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
  1368. ((ssize_t)iocb->aio_nbytes < 0)
  1369. )) {
  1370. pr_debug("EINVAL: io_submit: overflow check\n");
  1371. return -EINVAL;
  1372. }
  1373. file = fget(iocb->aio_fildes);
  1374. if (unlikely(!file))
  1375. return -EBADF;
  1376. req = aio_get_req(ctx); /* returns with 2 references to req */
  1377. if (unlikely(!req)) {
  1378. fput(file);
  1379. return -EAGAIN;
  1380. }
  1381. req->ki_filp = file;
  1382. if (iocb->aio_flags & IOCB_FLAG_RESFD) {
  1383. /*
  1384. * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
  1385. * instance of the file* now. The file descriptor must be
  1386. * an eventfd() fd, and will be signaled for each completed
  1387. * event using the eventfd_signal() function.
  1388. */
  1389. req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
  1390. if (IS_ERR(req->ki_eventfd)) {
  1391. ret = PTR_ERR(req->ki_eventfd);
  1392. goto out_put_req;
  1393. }
  1394. }
  1395. ret = put_user(req->ki_key, &user_iocb->aio_key);
  1396. if (unlikely(ret)) {
  1397. dprintk("EFAULT: aio_key\n");
  1398. goto out_put_req;
  1399. }
  1400. req->ki_obj.user = user_iocb;
  1401. req->ki_user_data = iocb->aio_data;
  1402. req->ki_pos = iocb->aio_offset;
  1403. req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
  1404. req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
  1405. req->ki_opcode = iocb->aio_lio_opcode;
  1406. init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
  1407. INIT_LIST_HEAD(&req->ki_wait.task_list);
  1408. ret = aio_setup_iocb(req);
  1409. if (ret)
  1410. goto out_put_req;
  1411. spin_lock_irq(&ctx->ctx_lock);
  1412. aio_run_iocb(req);
  1413. if (!list_empty(&ctx->run_list)) {
  1414. /* drain the run list */
  1415. while (__aio_run_iocbs(ctx))
  1416. ;
  1417. }
  1418. spin_unlock_irq(&ctx->ctx_lock);
  1419. aio_put_req(req); /* drop extra ref to req */
  1420. return 0;
  1421. out_put_req:
  1422. aio_put_req(req); /* drop extra ref to req */
  1423. aio_put_req(req); /* drop i/o ref to req */
  1424. return ret;
  1425. }
  1426. /* sys_io_submit:
  1427. * Queue the nr iocbs pointed to by iocbpp for processing. Returns
  1428. * the number of iocbs queued. May return -EINVAL if the aio_context
  1429. * specified by ctx_id is invalid, if nr is < 0, if the iocb at
  1430. * *iocbpp[0] is not properly initialized, if the operation specified
  1431. * is invalid for the file descriptor in the iocb. May fail with
  1432. * -EFAULT if any of the data structures point to invalid data. May
  1433. * fail with -EBADF if the file descriptor specified in the first
  1434. * iocb is invalid. May fail with -EAGAIN if insufficient resources
  1435. * are available to queue any iocbs. Will return 0 if nr is 0. Will
  1436. * fail with -ENOSYS if not implemented.
  1437. */
  1438. asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
  1439. struct iocb __user * __user *iocbpp)
  1440. {
  1441. struct kioctx *ctx;
  1442. long ret = 0;
  1443. int i;
  1444. if (unlikely(nr < 0))
  1445. return -EINVAL;
  1446. if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
  1447. return -EFAULT;
  1448. ctx = lookup_ioctx(ctx_id);
  1449. if (unlikely(!ctx)) {
  1450. pr_debug("EINVAL: io_submit: invalid context id\n");
  1451. return -EINVAL;
  1452. }
  1453. /*
  1454. * AKPM: should this return a partial result if some of the IOs were
  1455. * successfully submitted?
  1456. */
  1457. for (i=0; i<nr; i++) {
  1458. struct iocb __user *user_iocb;
  1459. struct iocb tmp;
  1460. if (unlikely(__get_user(user_iocb, iocbpp + i))) {
  1461. ret = -EFAULT;
  1462. break;
  1463. }
  1464. if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
  1465. ret = -EFAULT;
  1466. break;
  1467. }
  1468. ret = io_submit_one(ctx, user_iocb, &tmp);
  1469. if (ret)
  1470. break;
  1471. }
  1472. put_ioctx(ctx);
  1473. return i ? i : ret;
  1474. }
  1475. /* lookup_kiocb
  1476. * Finds a given iocb for cancellation.
  1477. */
  1478. static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
  1479. u32 key)
  1480. {
  1481. struct list_head *pos;
  1482. assert_spin_locked(&ctx->ctx_lock);
  1483. /* TODO: use a hash or array, this sucks. */
  1484. list_for_each(pos, &ctx->active_reqs) {
  1485. struct kiocb *kiocb = list_kiocb(pos);
  1486. if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
  1487. return kiocb;
  1488. }
  1489. return NULL;
  1490. }
  1491. /* sys_io_cancel:
  1492. * Attempts to cancel an iocb previously passed to io_submit. If
  1493. * the operation is successfully cancelled, the resulting event is
  1494. * copied into the memory pointed to by result without being placed
  1495. * into the completion queue and 0 is returned. May fail with
  1496. * -EFAULT if any of the data structures pointed to are invalid.
  1497. * May fail with -EINVAL if aio_context specified by ctx_id is
  1498. * invalid. May fail with -EAGAIN if the iocb specified was not
  1499. * cancelled. Will fail with -ENOSYS if not implemented.
  1500. */
  1501. asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
  1502. struct io_event __user *result)
  1503. {
  1504. int (*cancel)(struct kiocb *iocb, struct io_event *res);
  1505. struct kioctx *ctx;
  1506. struct kiocb *kiocb;
  1507. u32 key;
  1508. int ret;
  1509. ret = get_user(key, &iocb->aio_key);
  1510. if (unlikely(ret))
  1511. return -EFAULT;
  1512. ctx = lookup_ioctx(ctx_id);
  1513. if (unlikely(!ctx))
  1514. return -EINVAL;
  1515. spin_lock_irq(&ctx->ctx_lock);
  1516. ret = -EAGAIN;
  1517. kiocb = lookup_kiocb(ctx, iocb, key);
  1518. if (kiocb && kiocb->ki_cancel) {
  1519. cancel = kiocb->ki_cancel;
  1520. kiocb->ki_users ++;
  1521. kiocbSetCancelled(kiocb);
  1522. } else
  1523. cancel = NULL;
  1524. spin_unlock_irq(&ctx->ctx_lock);
  1525. if (NULL != cancel) {
  1526. struct io_event tmp;
  1527. pr_debug("calling cancel\n");
  1528. memset(&tmp, 0, sizeof(tmp));
  1529. tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
  1530. tmp.data = kiocb->ki_user_data;
  1531. ret = cancel(kiocb, &tmp);
  1532. if (!ret) {
  1533. /* Cancellation succeeded -- copy the result
  1534. * into the user's buffer.
  1535. */
  1536. if (copy_to_user(result, &tmp, sizeof(tmp)))
  1537. ret = -EFAULT;
  1538. }
  1539. } else
  1540. ret = -EINVAL;
  1541. put_ioctx(ctx);
  1542. return ret;
  1543. }
  1544. /* io_getevents:
  1545. * Attempts to read at least min_nr events and up to nr events from
  1546. * the completion queue for the aio_context specified by ctx_id. May
  1547. * fail with -EINVAL if ctx_id is invalid, if min_nr is out of range,
  1548. * if nr is out of range, if when is out of range. May fail with
  1549. * -EFAULT if any of the memory specified to is invalid. May return
  1550. * 0 or < min_nr if no events are available and the timeout specified
  1551. * by when has elapsed, where when == NULL specifies an infinite
  1552. * timeout. Note that the timeout pointed to by when is relative and
  1553. * will be updated if not NULL and the operation blocks. Will fail
  1554. * with -ENOSYS if not implemented.
  1555. */
  1556. asmlinkage long sys_io_getevents(aio_context_t ctx_id,
  1557. long min_nr,
  1558. long nr,
  1559. struct io_event __user *events,
  1560. struct timespec __user *timeout)
  1561. {
  1562. struct kioctx *ioctx = lookup_ioctx(ctx_id);
  1563. long ret = -EINVAL;
  1564. if (likely(ioctx)) {
  1565. if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
  1566. ret = read_events(ioctx, min_nr, nr, events, timeout);
  1567. put_ioctx(ioctx);
  1568. }
  1569. asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
  1570. return ret;
  1571. }
  1572. __initcall(aio_setup);
  1573. EXPORT_SYMBOL(aio_complete);
  1574. EXPORT_SYMBOL(aio_put_req);
  1575. EXPORT_SYMBOL(wait_on_sync_kiocb);