aio.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823
  1. /*
  2. * An async IO implementation for Linux
  3. * Written by Benjamin LaHaise <bcrl@kvack.org>
  4. *
  5. * Implements an efficient asynchronous io interface.
  6. *
  7. * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
  8. *
  9. * See ../COPYING for licensing terms.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/errno.h>
  14. #include <linux/time.h>
  15. #include <linux/aio_abi.h>
  16. #include <linux/module.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/uio.h>
  19. #define DEBUG 0
  20. #include <linux/sched.h>
  21. #include <linux/fs.h>
  22. #include <linux/file.h>
  23. #include <linux/mm.h>
  24. #include <linux/mman.h>
  25. #include <linux/slab.h>
  26. #include <linux/timer.h>
  27. #include <linux/aio.h>
  28. #include <linux/highmem.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/security.h>
  31. #include <linux/eventfd.h>
  32. #include <asm/kmap_types.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/mmu_context.h>
  35. #if DEBUG > 1
  36. #define dprintk printk
  37. #else
  38. #define dprintk(x...) do { ; } while (0)
  39. #endif
  40. /*------ sysctl variables----*/
  41. static DEFINE_SPINLOCK(aio_nr_lock);
  42. unsigned long aio_nr; /* current system wide number of aio requests */
  43. unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
  44. /*----end sysctl variables---*/
  45. static struct kmem_cache *kiocb_cachep;
  46. static struct kmem_cache *kioctx_cachep;
  47. static struct workqueue_struct *aio_wq;
  48. /* Used for rare fput completion. */
  49. static void aio_fput_routine(struct work_struct *);
  50. static DECLARE_WORK(fput_work, aio_fput_routine);
  51. static DEFINE_SPINLOCK(fput_lock);
  52. static LIST_HEAD(fput_head);
  53. static void aio_kick_handler(struct work_struct *);
  54. static void aio_queue_work(struct kioctx *);
  55. /* aio_setup
  56. * Creates the slab caches used by the aio routines, panic on
  57. * failure as this is done early during the boot sequence.
  58. */
  59. static int __init aio_setup(void)
  60. {
  61. kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  62. kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  63. aio_wq = create_workqueue("aio");
  64. pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
  65. return 0;
  66. }
  67. static void aio_free_ring(struct kioctx *ctx)
  68. {
  69. struct aio_ring_info *info = &ctx->ring_info;
  70. long i;
  71. for (i=0; i<info->nr_pages; i++)
  72. put_page(info->ring_pages[i]);
  73. if (info->mmap_size) {
  74. down_write(&ctx->mm->mmap_sem);
  75. do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
  76. up_write(&ctx->mm->mmap_sem);
  77. }
  78. if (info->ring_pages && info->ring_pages != info->internal_pages)
  79. kfree(info->ring_pages);
  80. info->ring_pages = NULL;
  81. info->nr = 0;
  82. }
  83. static int aio_setup_ring(struct kioctx *ctx)
  84. {
  85. struct aio_ring *ring;
  86. struct aio_ring_info *info = &ctx->ring_info;
  87. unsigned nr_events = ctx->max_reqs;
  88. unsigned long size;
  89. int nr_pages;
  90. /* Compensate for the ring buffer's head/tail overlap entry */
  91. nr_events += 2; /* 1 is required, 2 for good luck */
  92. size = sizeof(struct aio_ring);
  93. size += sizeof(struct io_event) * nr_events;
  94. nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
  95. if (nr_pages < 0)
  96. return -EINVAL;
  97. nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
  98. info->nr = 0;
  99. info->ring_pages = info->internal_pages;
  100. if (nr_pages > AIO_RING_PAGES) {
  101. info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
  102. if (!info->ring_pages)
  103. return -ENOMEM;
  104. }
  105. info->mmap_size = nr_pages * PAGE_SIZE;
  106. dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
  107. down_write(&ctx->mm->mmap_sem);
  108. info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
  109. PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
  110. 0);
  111. if (IS_ERR((void *)info->mmap_base)) {
  112. up_write(&ctx->mm->mmap_sem);
  113. info->mmap_size = 0;
  114. aio_free_ring(ctx);
  115. return -EAGAIN;
  116. }
  117. dprintk("mmap address: 0x%08lx\n", info->mmap_base);
  118. info->nr_pages = get_user_pages(current, ctx->mm,
  119. info->mmap_base, nr_pages,
  120. 1, 0, info->ring_pages, NULL);
  121. up_write(&ctx->mm->mmap_sem);
  122. if (unlikely(info->nr_pages != nr_pages)) {
  123. aio_free_ring(ctx);
  124. return -EAGAIN;
  125. }
  126. ctx->user_id = info->mmap_base;
  127. info->nr = nr_events; /* trusted copy */
  128. ring = kmap_atomic(info->ring_pages[0], KM_USER0);
  129. ring->nr = nr_events; /* user copy */
  130. ring->id = ctx->user_id;
  131. ring->head = ring->tail = 0;
  132. ring->magic = AIO_RING_MAGIC;
  133. ring->compat_features = AIO_RING_COMPAT_FEATURES;
  134. ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
  135. ring->header_length = sizeof(struct aio_ring);
  136. kunmap_atomic(ring, KM_USER0);
  137. return 0;
  138. }
  139. /* aio_ring_event: returns a pointer to the event at the given index from
  140. * kmap_atomic(, km). Release the pointer with put_aio_ring_event();
  141. */
  142. #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
  143. #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
  144. #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
  145. #define aio_ring_event(info, nr, km) ({ \
  146. unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
  147. struct io_event *__event; \
  148. __event = kmap_atomic( \
  149. (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
  150. __event += pos % AIO_EVENTS_PER_PAGE; \
  151. __event; \
  152. })
  153. #define put_aio_ring_event(event, km) do { \
  154. struct io_event *__event = (event); \
  155. (void)__event; \
  156. kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
  157. } while(0)
  158. /* __put_ioctx
  159. * Called when the last user of an aio context has gone away,
  160. * and the struct needs to be freed.
  161. */
  162. static void __put_ioctx(struct kioctx *ctx)
  163. {
  164. unsigned nr_events = ctx->max_reqs;
  165. BUG_ON(ctx->reqs_active);
  166. cancel_delayed_work(&ctx->wq);
  167. cancel_work_sync(&ctx->wq.work);
  168. aio_free_ring(ctx);
  169. mmdrop(ctx->mm);
  170. ctx->mm = NULL;
  171. pr_debug("__put_ioctx: freeing %p\n", ctx);
  172. kmem_cache_free(kioctx_cachep, ctx);
  173. if (nr_events) {
  174. spin_lock(&aio_nr_lock);
  175. BUG_ON(aio_nr - nr_events > aio_nr);
  176. aio_nr -= nr_events;
  177. spin_unlock(&aio_nr_lock);
  178. }
  179. }
  180. #define get_ioctx(kioctx) do { \
  181. BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  182. atomic_inc(&(kioctx)->users); \
  183. } while (0)
  184. #define put_ioctx(kioctx) do { \
  185. BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
  186. if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
  187. __put_ioctx(kioctx); \
  188. } while (0)
  189. /* ioctx_alloc
  190. * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
  191. */
  192. static struct kioctx *ioctx_alloc(unsigned nr_events)
  193. {
  194. struct mm_struct *mm;
  195. struct kioctx *ctx;
  196. /* Prevent overflows */
  197. if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
  198. (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
  199. pr_debug("ENOMEM: nr_events too high\n");
  200. return ERR_PTR(-EINVAL);
  201. }
  202. if ((unsigned long)nr_events > aio_max_nr)
  203. return ERR_PTR(-EAGAIN);
  204. ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
  205. if (!ctx)
  206. return ERR_PTR(-ENOMEM);
  207. ctx->max_reqs = nr_events;
  208. mm = ctx->mm = current->mm;
  209. atomic_inc(&mm->mm_count);
  210. atomic_set(&ctx->users, 1);
  211. spin_lock_init(&ctx->ctx_lock);
  212. spin_lock_init(&ctx->ring_info.ring_lock);
  213. init_waitqueue_head(&ctx->wait);
  214. INIT_LIST_HEAD(&ctx->active_reqs);
  215. INIT_LIST_HEAD(&ctx->run_list);
  216. INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
  217. if (aio_setup_ring(ctx) < 0)
  218. goto out_freectx;
  219. /* limit the number of system wide aios */
  220. spin_lock(&aio_nr_lock);
  221. if (aio_nr + ctx->max_reqs > aio_max_nr ||
  222. aio_nr + ctx->max_reqs < aio_nr)
  223. ctx->max_reqs = 0;
  224. else
  225. aio_nr += ctx->max_reqs;
  226. spin_unlock(&aio_nr_lock);
  227. if (ctx->max_reqs == 0)
  228. goto out_cleanup;
  229. /* now link into global list. kludge. FIXME */
  230. write_lock(&mm->ioctx_list_lock);
  231. ctx->next = mm->ioctx_list;
  232. mm->ioctx_list = ctx;
  233. write_unlock(&mm->ioctx_list_lock);
  234. dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
  235. ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
  236. return ctx;
  237. out_cleanup:
  238. __put_ioctx(ctx);
  239. return ERR_PTR(-EAGAIN);
  240. out_freectx:
  241. mmdrop(mm);
  242. kmem_cache_free(kioctx_cachep, ctx);
  243. ctx = ERR_PTR(-ENOMEM);
  244. dprintk("aio: error allocating ioctx %p\n", ctx);
  245. return ctx;
  246. }
  247. /* aio_cancel_all
  248. * Cancels all outstanding aio requests on an aio context. Used
  249. * when the processes owning a context have all exited to encourage
  250. * the rapid destruction of the kioctx.
  251. */
  252. static void aio_cancel_all(struct kioctx *ctx)
  253. {
  254. int (*cancel)(struct kiocb *, struct io_event *);
  255. struct io_event res;
  256. spin_lock_irq(&ctx->ctx_lock);
  257. ctx->dead = 1;
  258. while (!list_empty(&ctx->active_reqs)) {
  259. struct list_head *pos = ctx->active_reqs.next;
  260. struct kiocb *iocb = list_kiocb(pos);
  261. list_del_init(&iocb->ki_list);
  262. cancel = iocb->ki_cancel;
  263. kiocbSetCancelled(iocb);
  264. if (cancel) {
  265. iocb->ki_users++;
  266. spin_unlock_irq(&ctx->ctx_lock);
  267. cancel(iocb, &res);
  268. spin_lock_irq(&ctx->ctx_lock);
  269. }
  270. }
  271. spin_unlock_irq(&ctx->ctx_lock);
  272. }
  273. static void wait_for_all_aios(struct kioctx *ctx)
  274. {
  275. struct task_struct *tsk = current;
  276. DECLARE_WAITQUEUE(wait, tsk);
  277. spin_lock_irq(&ctx->ctx_lock);
  278. if (!ctx->reqs_active)
  279. goto out;
  280. add_wait_queue(&ctx->wait, &wait);
  281. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  282. while (ctx->reqs_active) {
  283. spin_unlock_irq(&ctx->ctx_lock);
  284. io_schedule();
  285. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  286. spin_lock_irq(&ctx->ctx_lock);
  287. }
  288. __set_task_state(tsk, TASK_RUNNING);
  289. remove_wait_queue(&ctx->wait, &wait);
  290. out:
  291. spin_unlock_irq(&ctx->ctx_lock);
  292. }
  293. /* wait_on_sync_kiocb:
  294. * Waits on the given sync kiocb to complete.
  295. */
  296. ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
  297. {
  298. while (iocb->ki_users) {
  299. set_current_state(TASK_UNINTERRUPTIBLE);
  300. if (!iocb->ki_users)
  301. break;
  302. io_schedule();
  303. }
  304. __set_current_state(TASK_RUNNING);
  305. return iocb->ki_user_data;
  306. }
  307. /* exit_aio: called when the last user of mm goes away. At this point,
  308. * there is no way for any new requests to be submited or any of the
  309. * io_* syscalls to be called on the context. However, there may be
  310. * outstanding requests which hold references to the context; as they
  311. * go away, they will call put_ioctx and release any pinned memory
  312. * associated with the request (held via struct page * references).
  313. */
  314. void exit_aio(struct mm_struct *mm)
  315. {
  316. struct kioctx *ctx = mm->ioctx_list;
  317. mm->ioctx_list = NULL;
  318. while (ctx) {
  319. struct kioctx *next = ctx->next;
  320. ctx->next = NULL;
  321. aio_cancel_all(ctx);
  322. wait_for_all_aios(ctx);
  323. /*
  324. * Ensure we don't leave the ctx on the aio_wq
  325. */
  326. cancel_work_sync(&ctx->wq.work);
  327. if (1 != atomic_read(&ctx->users))
  328. printk(KERN_DEBUG
  329. "exit_aio:ioctx still alive: %d %d %d\n",
  330. atomic_read(&ctx->users), ctx->dead,
  331. ctx->reqs_active);
  332. put_ioctx(ctx);
  333. ctx = next;
  334. }
  335. }
  336. /* aio_get_req
  337. * Allocate a slot for an aio request. Increments the users count
  338. * of the kioctx so that the kioctx stays around until all requests are
  339. * complete. Returns NULL if no requests are free.
  340. *
  341. * Returns with kiocb->users set to 2. The io submit code path holds
  342. * an extra reference while submitting the i/o.
  343. * This prevents races between the aio code path referencing the
  344. * req (after submitting it) and aio_complete() freeing the req.
  345. */
  346. static struct kiocb *__aio_get_req(struct kioctx *ctx)
  347. {
  348. struct kiocb *req = NULL;
  349. struct aio_ring *ring;
  350. int okay = 0;
  351. req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
  352. if (unlikely(!req))
  353. return NULL;
  354. req->ki_flags = 0;
  355. req->ki_users = 2;
  356. req->ki_key = 0;
  357. req->ki_ctx = ctx;
  358. req->ki_cancel = NULL;
  359. req->ki_retry = NULL;
  360. req->ki_dtor = NULL;
  361. req->private = NULL;
  362. req->ki_iovec = NULL;
  363. INIT_LIST_HEAD(&req->ki_run_list);
  364. req->ki_eventfd = ERR_PTR(-EINVAL);
  365. /* Check if the completion queue has enough free space to
  366. * accept an event from this io.
  367. */
  368. spin_lock_irq(&ctx->ctx_lock);
  369. ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
  370. if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
  371. list_add(&req->ki_list, &ctx->active_reqs);
  372. ctx->reqs_active++;
  373. okay = 1;
  374. }
  375. kunmap_atomic(ring, KM_USER0);
  376. spin_unlock_irq(&ctx->ctx_lock);
  377. if (!okay) {
  378. kmem_cache_free(kiocb_cachep, req);
  379. req = NULL;
  380. }
  381. return req;
  382. }
  383. static inline struct kiocb *aio_get_req(struct kioctx *ctx)
  384. {
  385. struct kiocb *req;
  386. /* Handle a potential starvation case -- should be exceedingly rare as
  387. * requests will be stuck on fput_head only if the aio_fput_routine is
  388. * delayed and the requests were the last user of the struct file.
  389. */
  390. req = __aio_get_req(ctx);
  391. if (unlikely(NULL == req)) {
  392. aio_fput_routine(NULL);
  393. req = __aio_get_req(ctx);
  394. }
  395. return req;
  396. }
  397. static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
  398. {
  399. assert_spin_locked(&ctx->ctx_lock);
  400. if (!IS_ERR(req->ki_eventfd))
  401. fput(req->ki_eventfd);
  402. if (req->ki_dtor)
  403. req->ki_dtor(req);
  404. if (req->ki_iovec != &req->ki_inline_vec)
  405. kfree(req->ki_iovec);
  406. kmem_cache_free(kiocb_cachep, req);
  407. ctx->reqs_active--;
  408. if (unlikely(!ctx->reqs_active && ctx->dead))
  409. wake_up(&ctx->wait);
  410. }
  411. static void aio_fput_routine(struct work_struct *data)
  412. {
  413. spin_lock_irq(&fput_lock);
  414. while (likely(!list_empty(&fput_head))) {
  415. struct kiocb *req = list_kiocb(fput_head.next);
  416. struct kioctx *ctx = req->ki_ctx;
  417. list_del(&req->ki_list);
  418. spin_unlock_irq(&fput_lock);
  419. /* Complete the fput */
  420. __fput(req->ki_filp);
  421. /* Link the iocb into the context's free list */
  422. spin_lock_irq(&ctx->ctx_lock);
  423. really_put_req(ctx, req);
  424. spin_unlock_irq(&ctx->ctx_lock);
  425. put_ioctx(ctx);
  426. spin_lock_irq(&fput_lock);
  427. }
  428. spin_unlock_irq(&fput_lock);
  429. }
  430. /* __aio_put_req
  431. * Returns true if this put was the last user of the request.
  432. */
  433. static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
  434. {
  435. dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n",
  436. req, atomic_read(&req->ki_filp->f_count));
  437. assert_spin_locked(&ctx->ctx_lock);
  438. req->ki_users --;
  439. BUG_ON(req->ki_users < 0);
  440. if (likely(req->ki_users))
  441. return 0;
  442. list_del(&req->ki_list); /* remove from active_reqs */
  443. req->ki_cancel = NULL;
  444. req->ki_retry = NULL;
  445. /* Must be done under the lock to serialise against cancellation.
  446. * Call this aio_fput as it duplicates fput via the fput_work.
  447. */
  448. if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
  449. get_ioctx(ctx);
  450. spin_lock(&fput_lock);
  451. list_add(&req->ki_list, &fput_head);
  452. spin_unlock(&fput_lock);
  453. queue_work(aio_wq, &fput_work);
  454. } else
  455. really_put_req(ctx, req);
  456. return 1;
  457. }
  458. /* aio_put_req
  459. * Returns true if this put was the last user of the kiocb,
  460. * false if the request is still in use.
  461. */
  462. int aio_put_req(struct kiocb *req)
  463. {
  464. struct kioctx *ctx = req->ki_ctx;
  465. int ret;
  466. spin_lock_irq(&ctx->ctx_lock);
  467. ret = __aio_put_req(ctx, req);
  468. spin_unlock_irq(&ctx->ctx_lock);
  469. return ret;
  470. }
  471. /* Lookup an ioctx id. ioctx_list is lockless for reads.
  472. * FIXME: this is O(n) and is only suitable for development.
  473. */
  474. static struct kioctx *lookup_ioctx(unsigned long ctx_id)
  475. {
  476. struct kioctx *ioctx;
  477. struct mm_struct *mm;
  478. mm = current->mm;
  479. read_lock(&mm->ioctx_list_lock);
  480. for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
  481. if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
  482. get_ioctx(ioctx);
  483. break;
  484. }
  485. read_unlock(&mm->ioctx_list_lock);
  486. return ioctx;
  487. }
  488. /*
  489. * use_mm
  490. * Makes the calling kernel thread take on the specified
  491. * mm context.
  492. * Called by the retry thread execute retries within the
  493. * iocb issuer's mm context, so that copy_from/to_user
  494. * operations work seamlessly for aio.
  495. * (Note: this routine is intended to be called only
  496. * from a kernel thread context)
  497. */
  498. static void use_mm(struct mm_struct *mm)
  499. {
  500. struct mm_struct *active_mm;
  501. struct task_struct *tsk = current;
  502. task_lock(tsk);
  503. tsk->flags |= PF_BORROWED_MM;
  504. active_mm = tsk->active_mm;
  505. atomic_inc(&mm->mm_count);
  506. tsk->mm = mm;
  507. tsk->active_mm = mm;
  508. /*
  509. * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise
  510. * it won't work. Update it accordingly if you change it here
  511. */
  512. switch_mm(active_mm, mm, tsk);
  513. task_unlock(tsk);
  514. mmdrop(active_mm);
  515. }
  516. /*
  517. * unuse_mm
  518. * Reverses the effect of use_mm, i.e. releases the
  519. * specified mm context which was earlier taken on
  520. * by the calling kernel thread
  521. * (Note: this routine is intended to be called only
  522. * from a kernel thread context)
  523. */
  524. static void unuse_mm(struct mm_struct *mm)
  525. {
  526. struct task_struct *tsk = current;
  527. task_lock(tsk);
  528. tsk->flags &= ~PF_BORROWED_MM;
  529. tsk->mm = NULL;
  530. /* active_mm is still 'mm' */
  531. enter_lazy_tlb(mm, tsk);
  532. task_unlock(tsk);
  533. }
  534. /*
  535. * Queue up a kiocb to be retried. Assumes that the kiocb
  536. * has already been marked as kicked, and places it on
  537. * the retry run list for the corresponding ioctx, if it
  538. * isn't already queued. Returns 1 if it actually queued
  539. * the kiocb (to tell the caller to activate the work
  540. * queue to process it), or 0, if it found that it was
  541. * already queued.
  542. */
  543. static inline int __queue_kicked_iocb(struct kiocb *iocb)
  544. {
  545. struct kioctx *ctx = iocb->ki_ctx;
  546. assert_spin_locked(&ctx->ctx_lock);
  547. if (list_empty(&iocb->ki_run_list)) {
  548. list_add_tail(&iocb->ki_run_list,
  549. &ctx->run_list);
  550. return 1;
  551. }
  552. return 0;
  553. }
  554. /* aio_run_iocb
  555. * This is the core aio execution routine. It is
  556. * invoked both for initial i/o submission and
  557. * subsequent retries via the aio_kick_handler.
  558. * Expects to be invoked with iocb->ki_ctx->lock
  559. * already held. The lock is released and reacquired
  560. * as needed during processing.
  561. *
  562. * Calls the iocb retry method (already setup for the
  563. * iocb on initial submission) for operation specific
  564. * handling, but takes care of most of common retry
  565. * execution details for a given iocb. The retry method
  566. * needs to be non-blocking as far as possible, to avoid
  567. * holding up other iocbs waiting to be serviced by the
  568. * retry kernel thread.
  569. *
  570. * The trickier parts in this code have to do with
  571. * ensuring that only one retry instance is in progress
  572. * for a given iocb at any time. Providing that guarantee
  573. * simplifies the coding of individual aio operations as
  574. * it avoids various potential races.
  575. */
  576. static ssize_t aio_run_iocb(struct kiocb *iocb)
  577. {
  578. struct kioctx *ctx = iocb->ki_ctx;
  579. ssize_t (*retry)(struct kiocb *);
  580. ssize_t ret;
  581. if (!(retry = iocb->ki_retry)) {
  582. printk("aio_run_iocb: iocb->ki_retry = NULL\n");
  583. return 0;
  584. }
  585. /*
  586. * We don't want the next retry iteration for this
  587. * operation to start until this one has returned and
  588. * updated the iocb state. However, wait_queue functions
  589. * can trigger a kick_iocb from interrupt context in the
  590. * meantime, indicating that data is available for the next
  591. * iteration. We want to remember that and enable the
  592. * next retry iteration _after_ we are through with
  593. * this one.
  594. *
  595. * So, in order to be able to register a "kick", but
  596. * prevent it from being queued now, we clear the kick
  597. * flag, but make the kick code *think* that the iocb is
  598. * still on the run list until we are actually done.
  599. * When we are done with this iteration, we check if
  600. * the iocb was kicked in the meantime and if so, queue
  601. * it up afresh.
  602. */
  603. kiocbClearKicked(iocb);
  604. /*
  605. * This is so that aio_complete knows it doesn't need to
  606. * pull the iocb off the run list (We can't just call
  607. * INIT_LIST_HEAD because we don't want a kick_iocb to
  608. * queue this on the run list yet)
  609. */
  610. iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
  611. spin_unlock_irq(&ctx->ctx_lock);
  612. /* Quit retrying if the i/o has been cancelled */
  613. if (kiocbIsCancelled(iocb)) {
  614. ret = -EINTR;
  615. aio_complete(iocb, ret, 0);
  616. /* must not access the iocb after this */
  617. goto out;
  618. }
  619. /*
  620. * Now we are all set to call the retry method in async
  621. * context.
  622. */
  623. ret = retry(iocb);
  624. if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
  625. BUG_ON(!list_empty(&iocb->ki_wait.task_list));
  626. aio_complete(iocb, ret, 0);
  627. }
  628. out:
  629. spin_lock_irq(&ctx->ctx_lock);
  630. if (-EIOCBRETRY == ret) {
  631. /*
  632. * OK, now that we are done with this iteration
  633. * and know that there is more left to go,
  634. * this is where we let go so that a subsequent
  635. * "kick" can start the next iteration
  636. */
  637. /* will make __queue_kicked_iocb succeed from here on */
  638. INIT_LIST_HEAD(&iocb->ki_run_list);
  639. /* we must queue the next iteration ourselves, if it
  640. * has already been kicked */
  641. if (kiocbIsKicked(iocb)) {
  642. __queue_kicked_iocb(iocb);
  643. /*
  644. * __queue_kicked_iocb will always return 1 here, because
  645. * iocb->ki_run_list is empty at this point so it should
  646. * be safe to unconditionally queue the context into the
  647. * work queue.
  648. */
  649. aio_queue_work(ctx);
  650. }
  651. }
  652. return ret;
  653. }
  654. /*
  655. * __aio_run_iocbs:
  656. * Process all pending retries queued on the ioctx
  657. * run list.
  658. * Assumes it is operating within the aio issuer's mm
  659. * context.
  660. */
  661. static int __aio_run_iocbs(struct kioctx *ctx)
  662. {
  663. struct kiocb *iocb;
  664. struct list_head run_list;
  665. assert_spin_locked(&ctx->ctx_lock);
  666. list_replace_init(&ctx->run_list, &run_list);
  667. while (!list_empty(&run_list)) {
  668. iocb = list_entry(run_list.next, struct kiocb,
  669. ki_run_list);
  670. list_del(&iocb->ki_run_list);
  671. /*
  672. * Hold an extra reference while retrying i/o.
  673. */
  674. iocb->ki_users++; /* grab extra reference */
  675. aio_run_iocb(iocb);
  676. __aio_put_req(ctx, iocb);
  677. }
  678. if (!list_empty(&ctx->run_list))
  679. return 1;
  680. return 0;
  681. }
  682. static void aio_queue_work(struct kioctx * ctx)
  683. {
  684. unsigned long timeout;
  685. /*
  686. * if someone is waiting, get the work started right
  687. * away, otherwise, use a longer delay
  688. */
  689. smp_mb();
  690. if (waitqueue_active(&ctx->wait))
  691. timeout = 1;
  692. else
  693. timeout = HZ/10;
  694. queue_delayed_work(aio_wq, &ctx->wq, timeout);
  695. }
  696. /*
  697. * aio_run_iocbs:
  698. * Process all pending retries queued on the ioctx
  699. * run list.
  700. * Assumes it is operating within the aio issuer's mm
  701. * context.
  702. */
  703. static inline void aio_run_iocbs(struct kioctx *ctx)
  704. {
  705. int requeue;
  706. spin_lock_irq(&ctx->ctx_lock);
  707. requeue = __aio_run_iocbs(ctx);
  708. spin_unlock_irq(&ctx->ctx_lock);
  709. if (requeue)
  710. aio_queue_work(ctx);
  711. }
  712. /*
  713. * just like aio_run_iocbs, but keeps running them until
  714. * the list stays empty
  715. */
  716. static inline void aio_run_all_iocbs(struct kioctx *ctx)
  717. {
  718. spin_lock_irq(&ctx->ctx_lock);
  719. while (__aio_run_iocbs(ctx))
  720. ;
  721. spin_unlock_irq(&ctx->ctx_lock);
  722. }
  723. /*
  724. * aio_kick_handler:
  725. * Work queue handler triggered to process pending
  726. * retries on an ioctx. Takes on the aio issuer's
  727. * mm context before running the iocbs, so that
  728. * copy_xxx_user operates on the issuer's address
  729. * space.
  730. * Run on aiod's context.
  731. */
  732. static void aio_kick_handler(struct work_struct *work)
  733. {
  734. struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
  735. mm_segment_t oldfs = get_fs();
  736. struct mm_struct *mm;
  737. int requeue;
  738. set_fs(USER_DS);
  739. use_mm(ctx->mm);
  740. spin_lock_irq(&ctx->ctx_lock);
  741. requeue =__aio_run_iocbs(ctx);
  742. mm = ctx->mm;
  743. spin_unlock_irq(&ctx->ctx_lock);
  744. unuse_mm(mm);
  745. set_fs(oldfs);
  746. /*
  747. * we're in a worker thread already, don't use queue_delayed_work,
  748. */
  749. if (requeue)
  750. queue_delayed_work(aio_wq, &ctx->wq, 0);
  751. }
  752. /*
  753. * Called by kick_iocb to queue the kiocb for retry
  754. * and if required activate the aio work queue to process
  755. * it
  756. */
  757. static void try_queue_kicked_iocb(struct kiocb *iocb)
  758. {
  759. struct kioctx *ctx = iocb->ki_ctx;
  760. unsigned long flags;
  761. int run = 0;
  762. /* We're supposed to be the only path putting the iocb back on the run
  763. * list. If we find that the iocb is *back* on a wait queue already
  764. * than retry has happened before we could queue the iocb. This also
  765. * means that the retry could have completed and freed our iocb, no
  766. * good. */
  767. BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
  768. spin_lock_irqsave(&ctx->ctx_lock, flags);
  769. /* set this inside the lock so that we can't race with aio_run_iocb()
  770. * testing it and putting the iocb on the run list under the lock */
  771. if (!kiocbTryKick(iocb))
  772. run = __queue_kicked_iocb(iocb);
  773. spin_unlock_irqrestore(&ctx->ctx_lock, flags);
  774. if (run)
  775. aio_queue_work(ctx);
  776. }
  777. /*
  778. * kick_iocb:
  779. * Called typically from a wait queue callback context
  780. * (aio_wake_function) to trigger a retry of the iocb.
  781. * The retry is usually executed by aio workqueue
  782. * threads (See aio_kick_handler).
  783. */
  784. void kick_iocb(struct kiocb *iocb)
  785. {
  786. /* sync iocbs are easy: they can only ever be executing from a
  787. * single context. */
  788. if (is_sync_kiocb(iocb)) {
  789. kiocbSetKicked(iocb);
  790. wake_up_process(iocb->ki_obj.tsk);
  791. return;
  792. }
  793. try_queue_kicked_iocb(iocb);
  794. }
  795. EXPORT_SYMBOL(kick_iocb);
  796. /* aio_complete
  797. * Called when the io request on the given iocb is complete.
  798. * Returns true if this is the last user of the request. The
  799. * only other user of the request can be the cancellation code.
  800. */
  801. int aio_complete(struct kiocb *iocb, long res, long res2)
  802. {
  803. struct kioctx *ctx = iocb->ki_ctx;
  804. struct aio_ring_info *info;
  805. struct aio_ring *ring;
  806. struct io_event *event;
  807. unsigned long flags;
  808. unsigned long tail;
  809. int ret;
  810. /*
  811. * Special case handling for sync iocbs:
  812. * - events go directly into the iocb for fast handling
  813. * - the sync task with the iocb in its stack holds the single iocb
  814. * ref, no other paths have a way to get another ref
  815. * - the sync task helpfully left a reference to itself in the iocb
  816. */
  817. if (is_sync_kiocb(iocb)) {
  818. BUG_ON(iocb->ki_users != 1);
  819. iocb->ki_user_data = res;
  820. iocb->ki_users = 0;
  821. wake_up_process(iocb->ki_obj.tsk);
  822. return 1;
  823. }
  824. info = &ctx->ring_info;
  825. /* add a completion event to the ring buffer.
  826. * must be done holding ctx->ctx_lock to prevent
  827. * other code from messing with the tail
  828. * pointer since we might be called from irq
  829. * context.
  830. */
  831. spin_lock_irqsave(&ctx->ctx_lock, flags);
  832. if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
  833. list_del_init(&iocb->ki_run_list);
  834. /*
  835. * cancelled requests don't get events, userland was given one
  836. * when the event got cancelled.
  837. */
  838. if (kiocbIsCancelled(iocb))
  839. goto put_rq;
  840. ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
  841. tail = info->tail;
  842. event = aio_ring_event(info, tail, KM_IRQ0);
  843. if (++tail >= info->nr)
  844. tail = 0;
  845. event->obj = (u64)(unsigned long)iocb->ki_obj.user;
  846. event->data = iocb->ki_user_data;
  847. event->res = res;
  848. event->res2 = res2;
  849. dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
  850. ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
  851. res, res2);
  852. /* after flagging the request as done, we
  853. * must never even look at it again
  854. */
  855. smp_wmb(); /* make event visible before updating tail */
  856. info->tail = tail;
  857. ring->tail = tail;
  858. put_aio_ring_event(event, KM_IRQ0);
  859. kunmap_atomic(ring, KM_IRQ1);
  860. pr_debug("added to ring %p at [%lu]\n", iocb, tail);
  861. /*
  862. * Check if the user asked us to deliver the result through an
  863. * eventfd. The eventfd_signal() function is safe to be called
  864. * from IRQ context.
  865. */
  866. if (!IS_ERR(iocb->ki_eventfd))
  867. eventfd_signal(iocb->ki_eventfd, 1);
  868. put_rq:
  869. /* everything turned out well, dispose of the aiocb. */
  870. ret = __aio_put_req(ctx, iocb);
  871. /*
  872. * We have to order our ring_info tail store above and test
  873. * of the wait list below outside the wait lock. This is
  874. * like in wake_up_bit() where clearing a bit has to be
  875. * ordered with the unlocked test.
  876. */
  877. smp_mb();
  878. if (waitqueue_active(&ctx->wait))
  879. wake_up(&ctx->wait);
  880. spin_unlock_irqrestore(&ctx->ctx_lock, flags);
  881. return ret;
  882. }
  883. /* aio_read_evt
  884. * Pull an event off of the ioctx's event ring. Returns the number of
  885. * events fetched (0 or 1 ;-)
  886. * FIXME: make this use cmpxchg.
  887. * TODO: make the ringbuffer user mmap()able (requires FIXME).
  888. */
  889. static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
  890. {
  891. struct aio_ring_info *info = &ioctx->ring_info;
  892. struct aio_ring *ring;
  893. unsigned long head;
  894. int ret = 0;
  895. ring = kmap_atomic(info->ring_pages[0], KM_USER0);
  896. dprintk("in aio_read_evt h%lu t%lu m%lu\n",
  897. (unsigned long)ring->head, (unsigned long)ring->tail,
  898. (unsigned long)ring->nr);
  899. if (ring->head == ring->tail)
  900. goto out;
  901. spin_lock(&info->ring_lock);
  902. head = ring->head % info->nr;
  903. if (head != ring->tail) {
  904. struct io_event *evp = aio_ring_event(info, head, KM_USER1);
  905. *ent = *evp;
  906. head = (head + 1) % info->nr;
  907. smp_mb(); /* finish reading the event before updatng the head */
  908. ring->head = head;
  909. ret = 1;
  910. put_aio_ring_event(evp, KM_USER1);
  911. }
  912. spin_unlock(&info->ring_lock);
  913. out:
  914. kunmap_atomic(ring, KM_USER0);
  915. dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
  916. (unsigned long)ring->head, (unsigned long)ring->tail);
  917. return ret;
  918. }
  919. struct aio_timeout {
  920. struct timer_list timer;
  921. int timed_out;
  922. struct task_struct *p;
  923. };
  924. static void timeout_func(unsigned long data)
  925. {
  926. struct aio_timeout *to = (struct aio_timeout *)data;
  927. to->timed_out = 1;
  928. wake_up_process(to->p);
  929. }
  930. static inline void init_timeout(struct aio_timeout *to)
  931. {
  932. init_timer(&to->timer);
  933. to->timer.data = (unsigned long)to;
  934. to->timer.function = timeout_func;
  935. to->timed_out = 0;
  936. to->p = current;
  937. }
  938. static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
  939. const struct timespec *ts)
  940. {
  941. to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
  942. if (time_after(to->timer.expires, jiffies))
  943. add_timer(&to->timer);
  944. else
  945. to->timed_out = 1;
  946. }
  947. static inline void clear_timeout(struct aio_timeout *to)
  948. {
  949. del_singleshot_timer_sync(&to->timer);
  950. }
  951. static int read_events(struct kioctx *ctx,
  952. long min_nr, long nr,
  953. struct io_event __user *event,
  954. struct timespec __user *timeout)
  955. {
  956. long start_jiffies = jiffies;
  957. struct task_struct *tsk = current;
  958. DECLARE_WAITQUEUE(wait, tsk);
  959. int ret;
  960. int i = 0;
  961. struct io_event ent;
  962. struct aio_timeout to;
  963. int retry = 0;
  964. /* needed to zero any padding within an entry (there shouldn't be
  965. * any, but C is fun!
  966. */
  967. memset(&ent, 0, sizeof(ent));
  968. retry:
  969. ret = 0;
  970. while (likely(i < nr)) {
  971. ret = aio_read_evt(ctx, &ent);
  972. if (unlikely(ret <= 0))
  973. break;
  974. dprintk("read event: %Lx %Lx %Lx %Lx\n",
  975. ent.data, ent.obj, ent.res, ent.res2);
  976. /* Could we split the check in two? */
  977. ret = -EFAULT;
  978. if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
  979. dprintk("aio: lost an event due to EFAULT.\n");
  980. break;
  981. }
  982. ret = 0;
  983. /* Good, event copied to userland, update counts. */
  984. event ++;
  985. i ++;
  986. }
  987. if (min_nr <= i)
  988. return i;
  989. if (ret)
  990. return ret;
  991. /* End fast path */
  992. /* racey check, but it gets redone */
  993. if (!retry && unlikely(!list_empty(&ctx->run_list))) {
  994. retry = 1;
  995. aio_run_all_iocbs(ctx);
  996. goto retry;
  997. }
  998. init_timeout(&to);
  999. if (timeout) {
  1000. struct timespec ts;
  1001. ret = -EFAULT;
  1002. if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
  1003. goto out;
  1004. set_timeout(start_jiffies, &to, &ts);
  1005. }
  1006. while (likely(i < nr)) {
  1007. add_wait_queue_exclusive(&ctx->wait, &wait);
  1008. do {
  1009. set_task_state(tsk, TASK_INTERRUPTIBLE);
  1010. ret = aio_read_evt(ctx, &ent);
  1011. if (ret)
  1012. break;
  1013. if (min_nr <= i)
  1014. break;
  1015. if (unlikely(ctx->dead)) {
  1016. ret = -EINVAL;
  1017. break;
  1018. }
  1019. if (to.timed_out) /* Only check after read evt */
  1020. break;
  1021. /* Try to only show up in io wait if there are ops
  1022. * in flight */
  1023. if (ctx->reqs_active)
  1024. io_schedule();
  1025. else
  1026. schedule();
  1027. if (signal_pending(tsk)) {
  1028. ret = -EINTR;
  1029. break;
  1030. }
  1031. /*ret = aio_read_evt(ctx, &ent);*/
  1032. } while (1) ;
  1033. set_task_state(tsk, TASK_RUNNING);
  1034. remove_wait_queue(&ctx->wait, &wait);
  1035. if (unlikely(ret <= 0))
  1036. break;
  1037. ret = -EFAULT;
  1038. if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
  1039. dprintk("aio: lost an event due to EFAULT.\n");
  1040. break;
  1041. }
  1042. /* Good, event copied to userland, update counts. */
  1043. event ++;
  1044. i ++;
  1045. }
  1046. if (timeout)
  1047. clear_timeout(&to);
  1048. out:
  1049. return i ? i : ret;
  1050. }
  1051. /* Take an ioctx and remove it from the list of ioctx's. Protects
  1052. * against races with itself via ->dead.
  1053. */
  1054. static void io_destroy(struct kioctx *ioctx)
  1055. {
  1056. struct mm_struct *mm = current->mm;
  1057. struct kioctx **tmp;
  1058. int was_dead;
  1059. /* delete the entry from the list is someone else hasn't already */
  1060. write_lock(&mm->ioctx_list_lock);
  1061. was_dead = ioctx->dead;
  1062. ioctx->dead = 1;
  1063. for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx;
  1064. tmp = &(*tmp)->next)
  1065. ;
  1066. if (*tmp)
  1067. *tmp = ioctx->next;
  1068. write_unlock(&mm->ioctx_list_lock);
  1069. dprintk("aio_release(%p)\n", ioctx);
  1070. if (likely(!was_dead))
  1071. put_ioctx(ioctx); /* twice for the list */
  1072. aio_cancel_all(ioctx);
  1073. wait_for_all_aios(ioctx);
  1074. /*
  1075. * Wake up any waiters. The setting of ctx->dead must be seen
  1076. * by other CPUs at this point. Right now, we rely on the
  1077. * locking done by the above calls to ensure this consistency.
  1078. */
  1079. wake_up(&ioctx->wait);
  1080. put_ioctx(ioctx); /* once for the lookup */
  1081. }
  1082. /* sys_io_setup:
  1083. * Create an aio_context capable of receiving at least nr_events.
  1084. * ctxp must not point to an aio_context that already exists, and
  1085. * must be initialized to 0 prior to the call. On successful
  1086. * creation of the aio_context, *ctxp is filled in with the resulting
  1087. * handle. May fail with -EINVAL if *ctxp is not initialized,
  1088. * if the specified nr_events exceeds internal limits. May fail
  1089. * with -EAGAIN if the specified nr_events exceeds the user's limit
  1090. * of available events. May fail with -ENOMEM if insufficient kernel
  1091. * resources are available. May fail with -EFAULT if an invalid
  1092. * pointer is passed for ctxp. Will fail with -ENOSYS if not
  1093. * implemented.
  1094. */
  1095. asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
  1096. {
  1097. struct kioctx *ioctx = NULL;
  1098. unsigned long ctx;
  1099. long ret;
  1100. ret = get_user(ctx, ctxp);
  1101. if (unlikely(ret))
  1102. goto out;
  1103. ret = -EINVAL;
  1104. if (unlikely(ctx || nr_events == 0)) {
  1105. pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
  1106. ctx, nr_events);
  1107. goto out;
  1108. }
  1109. ioctx = ioctx_alloc(nr_events);
  1110. ret = PTR_ERR(ioctx);
  1111. if (!IS_ERR(ioctx)) {
  1112. ret = put_user(ioctx->user_id, ctxp);
  1113. if (!ret)
  1114. return 0;
  1115. get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
  1116. io_destroy(ioctx);
  1117. }
  1118. out:
  1119. return ret;
  1120. }
  1121. /* sys_io_destroy:
  1122. * Destroy the aio_context specified. May cancel any outstanding
  1123. * AIOs and block on completion. Will fail with -ENOSYS if not
  1124. * implemented. May fail with -EFAULT if the context pointed to
  1125. * is invalid.
  1126. */
  1127. asmlinkage long sys_io_destroy(aio_context_t ctx)
  1128. {
  1129. struct kioctx *ioctx = lookup_ioctx(ctx);
  1130. if (likely(NULL != ioctx)) {
  1131. io_destroy(ioctx);
  1132. return 0;
  1133. }
  1134. pr_debug("EINVAL: io_destroy: invalid context id\n");
  1135. return -EINVAL;
  1136. }
  1137. static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
  1138. {
  1139. struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
  1140. BUG_ON(ret <= 0);
  1141. while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
  1142. ssize_t this = min((ssize_t)iov->iov_len, ret);
  1143. iov->iov_base += this;
  1144. iov->iov_len -= this;
  1145. iocb->ki_left -= this;
  1146. ret -= this;
  1147. if (iov->iov_len == 0) {
  1148. iocb->ki_cur_seg++;
  1149. iov++;
  1150. }
  1151. }
  1152. /* the caller should not have done more io than what fit in
  1153. * the remaining iovecs */
  1154. BUG_ON(ret > 0 && iocb->ki_left == 0);
  1155. }
  1156. static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
  1157. {
  1158. struct file *file = iocb->ki_filp;
  1159. struct address_space *mapping = file->f_mapping;
  1160. struct inode *inode = mapping->host;
  1161. ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
  1162. unsigned long, loff_t);
  1163. ssize_t ret = 0;
  1164. unsigned short opcode;
  1165. if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
  1166. (iocb->ki_opcode == IOCB_CMD_PREAD)) {
  1167. rw_op = file->f_op->aio_read;
  1168. opcode = IOCB_CMD_PREADV;
  1169. } else {
  1170. rw_op = file->f_op->aio_write;
  1171. opcode = IOCB_CMD_PWRITEV;
  1172. }
  1173. /* This matches the pread()/pwrite() logic */
  1174. if (iocb->ki_pos < 0)
  1175. return -EINVAL;
  1176. do {
  1177. ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
  1178. iocb->ki_nr_segs - iocb->ki_cur_seg,
  1179. iocb->ki_pos);
  1180. if (ret > 0)
  1181. aio_advance_iovec(iocb, ret);
  1182. /* retry all partial writes. retry partial reads as long as its a
  1183. * regular file. */
  1184. } while (ret > 0 && iocb->ki_left > 0 &&
  1185. (opcode == IOCB_CMD_PWRITEV ||
  1186. (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
  1187. /* This means we must have transferred all that we could */
  1188. /* No need to retry anymore */
  1189. if ((ret == 0) || (iocb->ki_left == 0))
  1190. ret = iocb->ki_nbytes - iocb->ki_left;
  1191. /* If we managed to write some out we return that, rather than
  1192. * the eventual error. */
  1193. if (opcode == IOCB_CMD_PWRITEV
  1194. && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
  1195. && iocb->ki_nbytes - iocb->ki_left)
  1196. ret = iocb->ki_nbytes - iocb->ki_left;
  1197. return ret;
  1198. }
  1199. static ssize_t aio_fdsync(struct kiocb *iocb)
  1200. {
  1201. struct file *file = iocb->ki_filp;
  1202. ssize_t ret = -EINVAL;
  1203. if (file->f_op->aio_fsync)
  1204. ret = file->f_op->aio_fsync(iocb, 1);
  1205. return ret;
  1206. }
  1207. static ssize_t aio_fsync(struct kiocb *iocb)
  1208. {
  1209. struct file *file = iocb->ki_filp;
  1210. ssize_t ret = -EINVAL;
  1211. if (file->f_op->aio_fsync)
  1212. ret = file->f_op->aio_fsync(iocb, 0);
  1213. return ret;
  1214. }
  1215. static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
  1216. {
  1217. ssize_t ret;
  1218. ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
  1219. kiocb->ki_nbytes, 1,
  1220. &kiocb->ki_inline_vec, &kiocb->ki_iovec);
  1221. if (ret < 0)
  1222. goto out;
  1223. kiocb->ki_nr_segs = kiocb->ki_nbytes;
  1224. kiocb->ki_cur_seg = 0;
  1225. /* ki_nbytes/left now reflect bytes instead of segs */
  1226. kiocb->ki_nbytes = ret;
  1227. kiocb->ki_left = ret;
  1228. ret = 0;
  1229. out:
  1230. return ret;
  1231. }
  1232. static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
  1233. {
  1234. kiocb->ki_iovec = &kiocb->ki_inline_vec;
  1235. kiocb->ki_iovec->iov_base = kiocb->ki_buf;
  1236. kiocb->ki_iovec->iov_len = kiocb->ki_left;
  1237. kiocb->ki_nr_segs = 1;
  1238. kiocb->ki_cur_seg = 0;
  1239. return 0;
  1240. }
  1241. /*
  1242. * aio_setup_iocb:
  1243. * Performs the initial checks and aio retry method
  1244. * setup for the kiocb at the time of io submission.
  1245. */
  1246. static ssize_t aio_setup_iocb(struct kiocb *kiocb)
  1247. {
  1248. struct file *file = kiocb->ki_filp;
  1249. ssize_t ret = 0;
  1250. switch (kiocb->ki_opcode) {
  1251. case IOCB_CMD_PREAD:
  1252. ret = -EBADF;
  1253. if (unlikely(!(file->f_mode & FMODE_READ)))
  1254. break;
  1255. ret = -EFAULT;
  1256. if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
  1257. kiocb->ki_left)))
  1258. break;
  1259. ret = security_file_permission(file, MAY_READ);
  1260. if (unlikely(ret))
  1261. break;
  1262. ret = aio_setup_single_vector(kiocb);
  1263. if (ret)
  1264. break;
  1265. ret = -EINVAL;
  1266. if (file->f_op->aio_read)
  1267. kiocb->ki_retry = aio_rw_vect_retry;
  1268. break;
  1269. case IOCB_CMD_PWRITE:
  1270. ret = -EBADF;
  1271. if (unlikely(!(file->f_mode & FMODE_WRITE)))
  1272. break;
  1273. ret = -EFAULT;
  1274. if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
  1275. kiocb->ki_left)))
  1276. break;
  1277. ret = security_file_permission(file, MAY_WRITE);
  1278. if (unlikely(ret))
  1279. break;
  1280. ret = aio_setup_single_vector(kiocb);
  1281. if (ret)
  1282. break;
  1283. ret = -EINVAL;
  1284. if (file->f_op->aio_write)
  1285. kiocb->ki_retry = aio_rw_vect_retry;
  1286. break;
  1287. case IOCB_CMD_PREADV:
  1288. ret = -EBADF;
  1289. if (unlikely(!(file->f_mode & FMODE_READ)))
  1290. break;
  1291. ret = security_file_permission(file, MAY_READ);
  1292. if (unlikely(ret))
  1293. break;
  1294. ret = aio_setup_vectored_rw(READ, kiocb);
  1295. if (ret)
  1296. break;
  1297. ret = -EINVAL;
  1298. if (file->f_op->aio_read)
  1299. kiocb->ki_retry = aio_rw_vect_retry;
  1300. break;
  1301. case IOCB_CMD_PWRITEV:
  1302. ret = -EBADF;
  1303. if (unlikely(!(file->f_mode & FMODE_WRITE)))
  1304. break;
  1305. ret = security_file_permission(file, MAY_WRITE);
  1306. if (unlikely(ret))
  1307. break;
  1308. ret = aio_setup_vectored_rw(WRITE, kiocb);
  1309. if (ret)
  1310. break;
  1311. ret = -EINVAL;
  1312. if (file->f_op->aio_write)
  1313. kiocb->ki_retry = aio_rw_vect_retry;
  1314. break;
  1315. case IOCB_CMD_FDSYNC:
  1316. ret = -EINVAL;
  1317. if (file->f_op->aio_fsync)
  1318. kiocb->ki_retry = aio_fdsync;
  1319. break;
  1320. case IOCB_CMD_FSYNC:
  1321. ret = -EINVAL;
  1322. if (file->f_op->aio_fsync)
  1323. kiocb->ki_retry = aio_fsync;
  1324. break;
  1325. default:
  1326. dprintk("EINVAL: io_submit: no operation provided\n");
  1327. ret = -EINVAL;
  1328. }
  1329. if (!kiocb->ki_retry)
  1330. return ret;
  1331. return 0;
  1332. }
  1333. /*
  1334. * aio_wake_function:
  1335. * wait queue callback function for aio notification,
  1336. * Simply triggers a retry of the operation via kick_iocb.
  1337. *
  1338. * This callback is specified in the wait queue entry in
  1339. * a kiocb.
  1340. *
  1341. * Note:
  1342. * This routine is executed with the wait queue lock held.
  1343. * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
  1344. * the ioctx lock inside the wait queue lock. This is safe
  1345. * because this callback isn't used for wait queues which
  1346. * are nested inside ioctx lock (i.e. ctx->wait)
  1347. */
  1348. static int aio_wake_function(wait_queue_t *wait, unsigned mode,
  1349. int sync, void *key)
  1350. {
  1351. struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
  1352. list_del_init(&wait->task_list);
  1353. kick_iocb(iocb);
  1354. return 1;
  1355. }
  1356. static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
  1357. struct iocb *iocb)
  1358. {
  1359. struct kiocb *req;
  1360. struct file *file;
  1361. ssize_t ret;
  1362. /* enforce forwards compatibility on users */
  1363. if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
  1364. pr_debug("EINVAL: io_submit: reserve field set\n");
  1365. return -EINVAL;
  1366. }
  1367. /* prevent overflows */
  1368. if (unlikely(
  1369. (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
  1370. (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
  1371. ((ssize_t)iocb->aio_nbytes < 0)
  1372. )) {
  1373. pr_debug("EINVAL: io_submit: overflow check\n");
  1374. return -EINVAL;
  1375. }
  1376. file = fget(iocb->aio_fildes);
  1377. if (unlikely(!file))
  1378. return -EBADF;
  1379. req = aio_get_req(ctx); /* returns with 2 references to req */
  1380. if (unlikely(!req)) {
  1381. fput(file);
  1382. return -EAGAIN;
  1383. }
  1384. req->ki_filp = file;
  1385. if (iocb->aio_flags & IOCB_FLAG_RESFD) {
  1386. /*
  1387. * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
  1388. * instance of the file* now. The file descriptor must be
  1389. * an eventfd() fd, and will be signaled for each completed
  1390. * event using the eventfd_signal() function.
  1391. */
  1392. req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
  1393. if (unlikely(IS_ERR(req->ki_eventfd))) {
  1394. ret = PTR_ERR(req->ki_eventfd);
  1395. goto out_put_req;
  1396. }
  1397. }
  1398. ret = put_user(req->ki_key, &user_iocb->aio_key);
  1399. if (unlikely(ret)) {
  1400. dprintk("EFAULT: aio_key\n");
  1401. goto out_put_req;
  1402. }
  1403. req->ki_obj.user = user_iocb;
  1404. req->ki_user_data = iocb->aio_data;
  1405. req->ki_pos = iocb->aio_offset;
  1406. req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
  1407. req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
  1408. req->ki_opcode = iocb->aio_lio_opcode;
  1409. init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
  1410. INIT_LIST_HEAD(&req->ki_wait.task_list);
  1411. ret = aio_setup_iocb(req);
  1412. if (ret)
  1413. goto out_put_req;
  1414. spin_lock_irq(&ctx->ctx_lock);
  1415. aio_run_iocb(req);
  1416. if (!list_empty(&ctx->run_list)) {
  1417. /* drain the run list */
  1418. while (__aio_run_iocbs(ctx))
  1419. ;
  1420. }
  1421. spin_unlock_irq(&ctx->ctx_lock);
  1422. aio_put_req(req); /* drop extra ref to req */
  1423. return 0;
  1424. out_put_req:
  1425. aio_put_req(req); /* drop extra ref to req */
  1426. aio_put_req(req); /* drop i/o ref to req */
  1427. return ret;
  1428. }
  1429. /* sys_io_submit:
  1430. * Queue the nr iocbs pointed to by iocbpp for processing. Returns
  1431. * the number of iocbs queued. May return -EINVAL if the aio_context
  1432. * specified by ctx_id is invalid, if nr is < 0, if the iocb at
  1433. * *iocbpp[0] is not properly initialized, if the operation specified
  1434. * is invalid for the file descriptor in the iocb. May fail with
  1435. * -EFAULT if any of the data structures point to invalid data. May
  1436. * fail with -EBADF if the file descriptor specified in the first
  1437. * iocb is invalid. May fail with -EAGAIN if insufficient resources
  1438. * are available to queue any iocbs. Will return 0 if nr is 0. Will
  1439. * fail with -ENOSYS if not implemented.
  1440. */
  1441. asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
  1442. struct iocb __user * __user *iocbpp)
  1443. {
  1444. struct kioctx *ctx;
  1445. long ret = 0;
  1446. int i;
  1447. if (unlikely(nr < 0))
  1448. return -EINVAL;
  1449. if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
  1450. return -EFAULT;
  1451. ctx = lookup_ioctx(ctx_id);
  1452. if (unlikely(!ctx)) {
  1453. pr_debug("EINVAL: io_submit: invalid context id\n");
  1454. return -EINVAL;
  1455. }
  1456. /*
  1457. * AKPM: should this return a partial result if some of the IOs were
  1458. * successfully submitted?
  1459. */
  1460. for (i=0; i<nr; i++) {
  1461. struct iocb __user *user_iocb;
  1462. struct iocb tmp;
  1463. if (unlikely(__get_user(user_iocb, iocbpp + i))) {
  1464. ret = -EFAULT;
  1465. break;
  1466. }
  1467. if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
  1468. ret = -EFAULT;
  1469. break;
  1470. }
  1471. ret = io_submit_one(ctx, user_iocb, &tmp);
  1472. if (ret)
  1473. break;
  1474. }
  1475. put_ioctx(ctx);
  1476. return i ? i : ret;
  1477. }
  1478. /* lookup_kiocb
  1479. * Finds a given iocb for cancellation.
  1480. */
  1481. static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
  1482. u32 key)
  1483. {
  1484. struct list_head *pos;
  1485. assert_spin_locked(&ctx->ctx_lock);
  1486. /* TODO: use a hash or array, this sucks. */
  1487. list_for_each(pos, &ctx->active_reqs) {
  1488. struct kiocb *kiocb = list_kiocb(pos);
  1489. if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
  1490. return kiocb;
  1491. }
  1492. return NULL;
  1493. }
  1494. /* sys_io_cancel:
  1495. * Attempts to cancel an iocb previously passed to io_submit. If
  1496. * the operation is successfully cancelled, the resulting event is
  1497. * copied into the memory pointed to by result without being placed
  1498. * into the completion queue and 0 is returned. May fail with
  1499. * -EFAULT if any of the data structures pointed to are invalid.
  1500. * May fail with -EINVAL if aio_context specified by ctx_id is
  1501. * invalid. May fail with -EAGAIN if the iocb specified was not
  1502. * cancelled. Will fail with -ENOSYS if not implemented.
  1503. */
  1504. asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
  1505. struct io_event __user *result)
  1506. {
  1507. int (*cancel)(struct kiocb *iocb, struct io_event *res);
  1508. struct kioctx *ctx;
  1509. struct kiocb *kiocb;
  1510. u32 key;
  1511. int ret;
  1512. ret = get_user(key, &iocb->aio_key);
  1513. if (unlikely(ret))
  1514. return -EFAULT;
  1515. ctx = lookup_ioctx(ctx_id);
  1516. if (unlikely(!ctx))
  1517. return -EINVAL;
  1518. spin_lock_irq(&ctx->ctx_lock);
  1519. ret = -EAGAIN;
  1520. kiocb = lookup_kiocb(ctx, iocb, key);
  1521. if (kiocb && kiocb->ki_cancel) {
  1522. cancel = kiocb->ki_cancel;
  1523. kiocb->ki_users ++;
  1524. kiocbSetCancelled(kiocb);
  1525. } else
  1526. cancel = NULL;
  1527. spin_unlock_irq(&ctx->ctx_lock);
  1528. if (NULL != cancel) {
  1529. struct io_event tmp;
  1530. pr_debug("calling cancel\n");
  1531. memset(&tmp, 0, sizeof(tmp));
  1532. tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
  1533. tmp.data = kiocb->ki_user_data;
  1534. ret = cancel(kiocb, &tmp);
  1535. if (!ret) {
  1536. /* Cancellation succeeded -- copy the result
  1537. * into the user's buffer.
  1538. */
  1539. if (copy_to_user(result, &tmp, sizeof(tmp)))
  1540. ret = -EFAULT;
  1541. }
  1542. } else
  1543. ret = -EINVAL;
  1544. put_ioctx(ctx);
  1545. return ret;
  1546. }
  1547. /* io_getevents:
  1548. * Attempts to read at least min_nr events and up to nr events from
  1549. * the completion queue for the aio_context specified by ctx_id. May
  1550. * fail with -EINVAL if ctx_id is invalid, if min_nr is out of range,
  1551. * if nr is out of range, if when is out of range. May fail with
  1552. * -EFAULT if any of the memory specified to is invalid. May return
  1553. * 0 or < min_nr if no events are available and the timeout specified
  1554. * by when has elapsed, where when == NULL specifies an infinite
  1555. * timeout. Note that the timeout pointed to by when is relative and
  1556. * will be updated if not NULL and the operation blocks. Will fail
  1557. * with -ENOSYS if not implemented.
  1558. */
  1559. asmlinkage long sys_io_getevents(aio_context_t ctx_id,
  1560. long min_nr,
  1561. long nr,
  1562. struct io_event __user *events,
  1563. struct timespec __user *timeout)
  1564. {
  1565. struct kioctx *ioctx = lookup_ioctx(ctx_id);
  1566. long ret = -EINVAL;
  1567. if (likely(ioctx)) {
  1568. if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
  1569. ret = read_events(ioctx, min_nr, nr, events, timeout);
  1570. put_ioctx(ioctx);
  1571. }
  1572. asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
  1573. return ret;
  1574. }
  1575. __initcall(aio_setup);
  1576. EXPORT_SYMBOL(aio_complete);
  1577. EXPORT_SYMBOL(aio_put_req);
  1578. EXPORT_SYMBOL(wait_on_sync_kiocb);