blk-core.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
  4. * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
  6. * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
  7. * - July2000
  8. * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  9. */
  10. /*
  11. * This handles all read/write requests to block devices
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/bio.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/highmem.h>
  19. #include <linux/mm.h>
  20. #include <linux/kernel_stat.h>
  21. #include <linux/string.h>
  22. #include <linux/init.h>
  23. #include <linux/completion.h>
  24. #include <linux/slab.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/task_io_accounting_ops.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/cpu.h>
  30. #include <linux/blktrace_api.h>
  31. #include <linux/fault-inject.h>
  32. #include "blk.h"
  33. static int __make_request(struct request_queue *q, struct bio *bio);
  34. /*
  35. * For the allocated request tables
  36. */
  37. struct kmem_cache *request_cachep;
  38. /*
  39. * For queue allocation
  40. */
  41. struct kmem_cache *blk_requestq_cachep;
  42. /*
  43. * Controlling structure to kblockd
  44. */
  45. static struct workqueue_struct *kblockd_workqueue;
  46. static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  47. static void drive_stat_acct(struct request *rq, int new_io)
  48. {
  49. int rw = rq_data_dir(rq);
  50. if (!blk_fs_request(rq) || !rq->rq_disk)
  51. return;
  52. if (!new_io) {
  53. __all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
  54. } else {
  55. struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
  56. disk_round_stats(rq->rq_disk);
  57. rq->rq_disk->in_flight++;
  58. if (part) {
  59. part_round_stats(part);
  60. part->in_flight++;
  61. }
  62. }
  63. }
  64. void blk_queue_congestion_threshold(struct request_queue *q)
  65. {
  66. int nr;
  67. nr = q->nr_requests - (q->nr_requests / 8) + 1;
  68. if (nr > q->nr_requests)
  69. nr = q->nr_requests;
  70. q->nr_congestion_on = nr;
  71. nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  72. if (nr < 1)
  73. nr = 1;
  74. q->nr_congestion_off = nr;
  75. }
  76. /**
  77. * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  78. * @bdev: device
  79. *
  80. * Locates the passed device's request queue and returns the address of its
  81. * backing_dev_info
  82. *
  83. * Will return NULL if the request queue cannot be located.
  84. */
  85. struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  86. {
  87. struct backing_dev_info *ret = NULL;
  88. struct request_queue *q = bdev_get_queue(bdev);
  89. if (q)
  90. ret = &q->backing_dev_info;
  91. return ret;
  92. }
  93. EXPORT_SYMBOL(blk_get_backing_dev_info);
  94. /*
  95. * We can't just memset() the structure, since the allocation path
  96. * already stored some information in the request.
  97. */
  98. void rq_init(struct request_queue *q, struct request *rq)
  99. {
  100. INIT_LIST_HEAD(&rq->queuelist);
  101. INIT_LIST_HEAD(&rq->donelist);
  102. rq->q = q;
  103. rq->sector = rq->hard_sector = (sector_t) -1;
  104. rq->nr_sectors = rq->hard_nr_sectors = 0;
  105. rq->current_nr_sectors = rq->hard_cur_sectors = 0;
  106. rq->bio = rq->biotail = NULL;
  107. INIT_HLIST_NODE(&rq->hash);
  108. RB_CLEAR_NODE(&rq->rb_node);
  109. rq->rq_disk = NULL;
  110. rq->nr_phys_segments = 0;
  111. rq->nr_hw_segments = 0;
  112. rq->ioprio = 0;
  113. rq->special = NULL;
  114. rq->buffer = NULL;
  115. rq->tag = -1;
  116. rq->errors = 0;
  117. rq->ref_count = 1;
  118. rq->cmd_len = 0;
  119. memset(rq->cmd, 0, sizeof(rq->cmd));
  120. rq->data_len = 0;
  121. rq->sense_len = 0;
  122. rq->data = NULL;
  123. rq->sense = NULL;
  124. rq->end_io = NULL;
  125. rq->end_io_data = NULL;
  126. rq->next_rq = NULL;
  127. }
  128. static void req_bio_endio(struct request *rq, struct bio *bio,
  129. unsigned int nbytes, int error)
  130. {
  131. struct request_queue *q = rq->q;
  132. if (&q->bar_rq != rq) {
  133. if (error)
  134. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  135. else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  136. error = -EIO;
  137. if (unlikely(nbytes > bio->bi_size)) {
  138. printk(KERN_ERR "%s: want %u bytes done, %u left\n",
  139. __FUNCTION__, nbytes, bio->bi_size);
  140. nbytes = bio->bi_size;
  141. }
  142. bio->bi_size -= nbytes;
  143. bio->bi_sector += (nbytes >> 9);
  144. if (bio->bi_size == 0)
  145. bio_endio(bio, error);
  146. } else {
  147. /*
  148. * Okay, this is the barrier request in progress, just
  149. * record the error;
  150. */
  151. if (error && !q->orderr)
  152. q->orderr = error;
  153. }
  154. }
  155. void blk_dump_rq_flags(struct request *rq, char *msg)
  156. {
  157. int bit;
  158. printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
  159. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
  160. rq->cmd_flags);
  161. printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
  162. (unsigned long long)rq->sector,
  163. rq->nr_sectors,
  164. rq->current_nr_sectors);
  165. printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
  166. rq->bio, rq->biotail,
  167. rq->buffer, rq->data,
  168. rq->data_len);
  169. if (blk_pc_request(rq)) {
  170. printk(KERN_INFO " cdb: ");
  171. for (bit = 0; bit < sizeof(rq->cmd); bit++)
  172. printk("%02x ", rq->cmd[bit]);
  173. printk("\n");
  174. }
  175. }
  176. EXPORT_SYMBOL(blk_dump_rq_flags);
  177. /*
  178. * "plug" the device if there are no outstanding requests: this will
  179. * force the transfer to start only after we have put all the requests
  180. * on the list.
  181. *
  182. * This is called with interrupts off and no requests on the queue and
  183. * with the queue lock held.
  184. */
  185. void blk_plug_device(struct request_queue *q)
  186. {
  187. WARN_ON(!irqs_disabled());
  188. /*
  189. * don't plug a stopped queue, it must be paired with blk_start_queue()
  190. * which will restart the queueing
  191. */
  192. if (blk_queue_stopped(q))
  193. return;
  194. if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
  195. mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
  196. blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
  197. }
  198. }
  199. EXPORT_SYMBOL(blk_plug_device);
  200. /*
  201. * remove the queue from the plugged list, if present. called with
  202. * queue lock held and interrupts disabled.
  203. */
  204. int blk_remove_plug(struct request_queue *q)
  205. {
  206. WARN_ON(!irqs_disabled());
  207. if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
  208. return 0;
  209. del_timer(&q->unplug_timer);
  210. return 1;
  211. }
  212. EXPORT_SYMBOL(blk_remove_plug);
  213. /*
  214. * remove the plug and let it rip..
  215. */
  216. void __generic_unplug_device(struct request_queue *q)
  217. {
  218. if (unlikely(blk_queue_stopped(q)))
  219. return;
  220. if (!blk_remove_plug(q))
  221. return;
  222. q->request_fn(q);
  223. }
  224. EXPORT_SYMBOL(__generic_unplug_device);
  225. /**
  226. * generic_unplug_device - fire a request queue
  227. * @q: The &struct request_queue in question
  228. *
  229. * Description:
  230. * Linux uses plugging to build bigger requests queues before letting
  231. * the device have at them. If a queue is plugged, the I/O scheduler
  232. * is still adding and merging requests on the queue. Once the queue
  233. * gets unplugged, the request_fn defined for the queue is invoked and
  234. * transfers started.
  235. **/
  236. void generic_unplug_device(struct request_queue *q)
  237. {
  238. spin_lock_irq(q->queue_lock);
  239. __generic_unplug_device(q);
  240. spin_unlock_irq(q->queue_lock);
  241. }
  242. EXPORT_SYMBOL(generic_unplug_device);
  243. static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
  244. struct page *page)
  245. {
  246. struct request_queue *q = bdi->unplug_io_data;
  247. blk_unplug(q);
  248. }
  249. void blk_unplug_work(struct work_struct *work)
  250. {
  251. struct request_queue *q =
  252. container_of(work, struct request_queue, unplug_work);
  253. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
  254. q->rq.count[READ] + q->rq.count[WRITE]);
  255. q->unplug_fn(q);
  256. }
  257. void blk_unplug_timeout(unsigned long data)
  258. {
  259. struct request_queue *q = (struct request_queue *)data;
  260. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
  261. q->rq.count[READ] + q->rq.count[WRITE]);
  262. kblockd_schedule_work(&q->unplug_work);
  263. }
  264. void blk_unplug(struct request_queue *q)
  265. {
  266. /*
  267. * devices don't necessarily have an ->unplug_fn defined
  268. */
  269. if (q->unplug_fn) {
  270. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
  271. q->rq.count[READ] + q->rq.count[WRITE]);
  272. q->unplug_fn(q);
  273. }
  274. }
  275. EXPORT_SYMBOL(blk_unplug);
  276. /**
  277. * blk_start_queue - restart a previously stopped queue
  278. * @q: The &struct request_queue in question
  279. *
  280. * Description:
  281. * blk_start_queue() will clear the stop flag on the queue, and call
  282. * the request_fn for the queue if it was in a stopped state when
  283. * entered. Also see blk_stop_queue(). Queue lock must be held.
  284. **/
  285. void blk_start_queue(struct request_queue *q)
  286. {
  287. WARN_ON(!irqs_disabled());
  288. clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
  289. /*
  290. * one level of recursion is ok and is much faster than kicking
  291. * the unplug handling
  292. */
  293. if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
  294. q->request_fn(q);
  295. clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
  296. } else {
  297. blk_plug_device(q);
  298. kblockd_schedule_work(&q->unplug_work);
  299. }
  300. }
  301. EXPORT_SYMBOL(blk_start_queue);
  302. /**
  303. * blk_stop_queue - stop a queue
  304. * @q: The &struct request_queue in question
  305. *
  306. * Description:
  307. * The Linux block layer assumes that a block driver will consume all
  308. * entries on the request queue when the request_fn strategy is called.
  309. * Often this will not happen, because of hardware limitations (queue
  310. * depth settings). If a device driver gets a 'queue full' response,
  311. * or if it simply chooses not to queue more I/O at one point, it can
  312. * call this function to prevent the request_fn from being called until
  313. * the driver has signalled it's ready to go again. This happens by calling
  314. * blk_start_queue() to restart queue operations. Queue lock must be held.
  315. **/
  316. void blk_stop_queue(struct request_queue *q)
  317. {
  318. blk_remove_plug(q);
  319. set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
  320. }
  321. EXPORT_SYMBOL(blk_stop_queue);
  322. /**
  323. * blk_sync_queue - cancel any pending callbacks on a queue
  324. * @q: the queue
  325. *
  326. * Description:
  327. * The block layer may perform asynchronous callback activity
  328. * on a queue, such as calling the unplug function after a timeout.
  329. * A block device may call blk_sync_queue to ensure that any
  330. * such activity is cancelled, thus allowing it to release resources
  331. * that the callbacks might use. The caller must already have made sure
  332. * that its ->make_request_fn will not re-add plugging prior to calling
  333. * this function.
  334. *
  335. */
  336. void blk_sync_queue(struct request_queue *q)
  337. {
  338. del_timer_sync(&q->unplug_timer);
  339. kblockd_flush_work(&q->unplug_work);
  340. }
  341. EXPORT_SYMBOL(blk_sync_queue);
  342. /**
  343. * blk_run_queue - run a single device queue
  344. * @q: The queue to run
  345. */
  346. void blk_run_queue(struct request_queue *q)
  347. {
  348. unsigned long flags;
  349. spin_lock_irqsave(q->queue_lock, flags);
  350. blk_remove_plug(q);
  351. /*
  352. * Only recurse once to avoid overrunning the stack, let the unplug
  353. * handling reinvoke the handler shortly if we already got there.
  354. */
  355. if (!elv_queue_empty(q)) {
  356. if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
  357. q->request_fn(q);
  358. clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
  359. } else {
  360. blk_plug_device(q);
  361. kblockd_schedule_work(&q->unplug_work);
  362. }
  363. }
  364. spin_unlock_irqrestore(q->queue_lock, flags);
  365. }
  366. EXPORT_SYMBOL(blk_run_queue);
  367. void blk_put_queue(struct request_queue *q)
  368. {
  369. kobject_put(&q->kobj);
  370. }
  371. EXPORT_SYMBOL(blk_put_queue);
  372. void blk_cleanup_queue(struct request_queue *q)
  373. {
  374. mutex_lock(&q->sysfs_lock);
  375. set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
  376. mutex_unlock(&q->sysfs_lock);
  377. if (q->elevator)
  378. elevator_exit(q->elevator);
  379. blk_put_queue(q);
  380. }
  381. EXPORT_SYMBOL(blk_cleanup_queue);
  382. static int blk_init_free_list(struct request_queue *q)
  383. {
  384. struct request_list *rl = &q->rq;
  385. rl->count[READ] = rl->count[WRITE] = 0;
  386. rl->starved[READ] = rl->starved[WRITE] = 0;
  387. rl->elvpriv = 0;
  388. init_waitqueue_head(&rl->wait[READ]);
  389. init_waitqueue_head(&rl->wait[WRITE]);
  390. rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
  391. mempool_free_slab, request_cachep, q->node);
  392. if (!rl->rq_pool)
  393. return -ENOMEM;
  394. return 0;
  395. }
  396. struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
  397. {
  398. return blk_alloc_queue_node(gfp_mask, -1);
  399. }
  400. EXPORT_SYMBOL(blk_alloc_queue);
  401. struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  402. {
  403. struct request_queue *q;
  404. int err;
  405. q = kmem_cache_alloc_node(blk_requestq_cachep,
  406. gfp_mask | __GFP_ZERO, node_id);
  407. if (!q)
  408. return NULL;
  409. q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
  410. q->backing_dev_info.unplug_io_data = q;
  411. err = bdi_init(&q->backing_dev_info);
  412. if (err) {
  413. kmem_cache_free(blk_requestq_cachep, q);
  414. return NULL;
  415. }
  416. init_timer(&q->unplug_timer);
  417. kobject_init(&q->kobj, &blk_queue_ktype);
  418. mutex_init(&q->sysfs_lock);
  419. return q;
  420. }
  421. EXPORT_SYMBOL(blk_alloc_queue_node);
  422. /**
  423. * blk_init_queue - prepare a request queue for use with a block device
  424. * @rfn: The function to be called to process requests that have been
  425. * placed on the queue.
  426. * @lock: Request queue spin lock
  427. *
  428. * Description:
  429. * If a block device wishes to use the standard request handling procedures,
  430. * which sorts requests and coalesces adjacent requests, then it must
  431. * call blk_init_queue(). The function @rfn will be called when there
  432. * are requests on the queue that need to be processed. If the device
  433. * supports plugging, then @rfn may not be called immediately when requests
  434. * are available on the queue, but may be called at some time later instead.
  435. * Plugged queues are generally unplugged when a buffer belonging to one
  436. * of the requests on the queue is needed, or due to memory pressure.
  437. *
  438. * @rfn is not required, or even expected, to remove all requests off the
  439. * queue, but only as many as it can handle at a time. If it does leave
  440. * requests on the queue, it is responsible for arranging that the requests
  441. * get dealt with eventually.
  442. *
  443. * The queue spin lock must be held while manipulating the requests on the
  444. * request queue; this lock will be taken also from interrupt context, so irq
  445. * disabling is needed for it.
  446. *
  447. * Function returns a pointer to the initialized request queue, or NULL if
  448. * it didn't succeed.
  449. *
  450. * Note:
  451. * blk_init_queue() must be paired with a blk_cleanup_queue() call
  452. * when the block device is deactivated (such as at module unload).
  453. **/
  454. struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
  455. {
  456. return blk_init_queue_node(rfn, lock, -1);
  457. }
  458. EXPORT_SYMBOL(blk_init_queue);
  459. struct request_queue *
  460. blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  461. {
  462. struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  463. if (!q)
  464. return NULL;
  465. q->node = node_id;
  466. if (blk_init_free_list(q)) {
  467. kmem_cache_free(blk_requestq_cachep, q);
  468. return NULL;
  469. }
  470. /*
  471. * if caller didn't supply a lock, they get per-queue locking with
  472. * our embedded lock
  473. */
  474. if (!lock) {
  475. spin_lock_init(&q->__queue_lock);
  476. lock = &q->__queue_lock;
  477. }
  478. q->request_fn = rfn;
  479. q->prep_rq_fn = NULL;
  480. q->unplug_fn = generic_unplug_device;
  481. q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
  482. q->queue_lock = lock;
  483. blk_queue_segment_boundary(q, 0xffffffff);
  484. blk_queue_make_request(q, __make_request);
  485. blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
  486. blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
  487. blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  488. q->sg_reserved_size = INT_MAX;
  489. /*
  490. * all done
  491. */
  492. if (!elevator_init(q, NULL)) {
  493. blk_queue_congestion_threshold(q);
  494. return q;
  495. }
  496. blk_put_queue(q);
  497. return NULL;
  498. }
  499. EXPORT_SYMBOL(blk_init_queue_node);
  500. int blk_get_queue(struct request_queue *q)
  501. {
  502. if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
  503. kobject_get(&q->kobj);
  504. return 0;
  505. }
  506. return 1;
  507. }
  508. EXPORT_SYMBOL(blk_get_queue);
  509. static inline void blk_free_request(struct request_queue *q, struct request *rq)
  510. {
  511. if (rq->cmd_flags & REQ_ELVPRIV)
  512. elv_put_request(q, rq);
  513. mempool_free(rq, q->rq.rq_pool);
  514. }
  515. static struct request *
  516. blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
  517. {
  518. struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
  519. if (!rq)
  520. return NULL;
  521. /*
  522. * first three bits are identical in rq->cmd_flags and bio->bi_rw,
  523. * see bio.h and blkdev.h
  524. */
  525. rq->cmd_flags = rw | REQ_ALLOCED;
  526. if (priv) {
  527. if (unlikely(elv_set_request(q, rq, gfp_mask))) {
  528. mempool_free(rq, q->rq.rq_pool);
  529. return NULL;
  530. }
  531. rq->cmd_flags |= REQ_ELVPRIV;
  532. }
  533. return rq;
  534. }
  535. /*
  536. * ioc_batching returns true if the ioc is a valid batching request and
  537. * should be given priority access to a request.
  538. */
  539. static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
  540. {
  541. if (!ioc)
  542. return 0;
  543. /*
  544. * Make sure the process is able to allocate at least 1 request
  545. * even if the batch times out, otherwise we could theoretically
  546. * lose wakeups.
  547. */
  548. return ioc->nr_batch_requests == q->nr_batching ||
  549. (ioc->nr_batch_requests > 0
  550. && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  551. }
  552. /*
  553. * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
  554. * will cause the process to be a "batcher" on all queues in the system. This
  555. * is the behaviour we want though - once it gets a wakeup it should be given
  556. * a nice run.
  557. */
  558. static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
  559. {
  560. if (!ioc || ioc_batching(q, ioc))
  561. return;
  562. ioc->nr_batch_requests = q->nr_batching;
  563. ioc->last_waited = jiffies;
  564. }
  565. static void __freed_request(struct request_queue *q, int rw)
  566. {
  567. struct request_list *rl = &q->rq;
  568. if (rl->count[rw] < queue_congestion_off_threshold(q))
  569. blk_clear_queue_congested(q, rw);
  570. if (rl->count[rw] + 1 <= q->nr_requests) {
  571. if (waitqueue_active(&rl->wait[rw]))
  572. wake_up(&rl->wait[rw]);
  573. blk_clear_queue_full(q, rw);
  574. }
  575. }
  576. /*
  577. * A request has just been released. Account for it, update the full and
  578. * congestion status, wake up any waiters. Called under q->queue_lock.
  579. */
  580. static void freed_request(struct request_queue *q, int rw, int priv)
  581. {
  582. struct request_list *rl = &q->rq;
  583. rl->count[rw]--;
  584. if (priv)
  585. rl->elvpriv--;
  586. __freed_request(q, rw);
  587. if (unlikely(rl->starved[rw ^ 1]))
  588. __freed_request(q, rw ^ 1);
  589. }
  590. #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
  591. /*
  592. * Get a free request, queue_lock must be held.
  593. * Returns NULL on failure, with queue_lock held.
  594. * Returns !NULL on success, with queue_lock *not held*.
  595. */
  596. static struct request *get_request(struct request_queue *q, int rw_flags,
  597. struct bio *bio, gfp_t gfp_mask)
  598. {
  599. struct request *rq = NULL;
  600. struct request_list *rl = &q->rq;
  601. struct io_context *ioc = NULL;
  602. const int rw = rw_flags & 0x01;
  603. int may_queue, priv;
  604. may_queue = elv_may_queue(q, rw_flags);
  605. if (may_queue == ELV_MQUEUE_NO)
  606. goto rq_starved;
  607. if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
  608. if (rl->count[rw]+1 >= q->nr_requests) {
  609. ioc = current_io_context(GFP_ATOMIC, q->node);
  610. /*
  611. * The queue will fill after this allocation, so set
  612. * it as full, and mark this process as "batching".
  613. * This process will be allowed to complete a batch of
  614. * requests, others will be blocked.
  615. */
  616. if (!blk_queue_full(q, rw)) {
  617. ioc_set_batching(q, ioc);
  618. blk_set_queue_full(q, rw);
  619. } else {
  620. if (may_queue != ELV_MQUEUE_MUST
  621. && !ioc_batching(q, ioc)) {
  622. /*
  623. * The queue is full and the allocating
  624. * process is not a "batcher", and not
  625. * exempted by the IO scheduler
  626. */
  627. goto out;
  628. }
  629. }
  630. }
  631. blk_set_queue_congested(q, rw);
  632. }
  633. /*
  634. * Only allow batching queuers to allocate up to 50% over the defined
  635. * limit of requests, otherwise we could have thousands of requests
  636. * allocated with any setting of ->nr_requests
  637. */
  638. if (rl->count[rw] >= (3 * q->nr_requests / 2))
  639. goto out;
  640. rl->count[rw]++;
  641. rl->starved[rw] = 0;
  642. priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
  643. if (priv)
  644. rl->elvpriv++;
  645. spin_unlock_irq(q->queue_lock);
  646. rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
  647. if (unlikely(!rq)) {
  648. /*
  649. * Allocation failed presumably due to memory. Undo anything
  650. * we might have messed up.
  651. *
  652. * Allocating task should really be put onto the front of the
  653. * wait queue, but this is pretty rare.
  654. */
  655. spin_lock_irq(q->queue_lock);
  656. freed_request(q, rw, priv);
  657. /*
  658. * in the very unlikely event that allocation failed and no
  659. * requests for this direction was pending, mark us starved
  660. * so that freeing of a request in the other direction will
  661. * notice us. another possible fix would be to split the
  662. * rq mempool into READ and WRITE
  663. */
  664. rq_starved:
  665. if (unlikely(rl->count[rw] == 0))
  666. rl->starved[rw] = 1;
  667. goto out;
  668. }
  669. /*
  670. * ioc may be NULL here, and ioc_batching will be false. That's
  671. * OK, if the queue is under the request limit then requests need
  672. * not count toward the nr_batch_requests limit. There will always
  673. * be some limit enforced by BLK_BATCH_TIME.
  674. */
  675. if (ioc_batching(q, ioc))
  676. ioc->nr_batch_requests--;
  677. rq_init(q, rq);
  678. blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
  679. out:
  680. return rq;
  681. }
  682. /*
  683. * No available requests for this queue, unplug the device and wait for some
  684. * requests to become available.
  685. *
  686. * Called with q->queue_lock held, and returns with it unlocked.
  687. */
  688. static struct request *get_request_wait(struct request_queue *q, int rw_flags,
  689. struct bio *bio)
  690. {
  691. const int rw = rw_flags & 0x01;
  692. struct request *rq;
  693. rq = get_request(q, rw_flags, bio, GFP_NOIO);
  694. while (!rq) {
  695. DEFINE_WAIT(wait);
  696. struct request_list *rl = &q->rq;
  697. prepare_to_wait_exclusive(&rl->wait[rw], &wait,
  698. TASK_UNINTERRUPTIBLE);
  699. rq = get_request(q, rw_flags, bio, GFP_NOIO);
  700. if (!rq) {
  701. struct io_context *ioc;
  702. blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
  703. __generic_unplug_device(q);
  704. spin_unlock_irq(q->queue_lock);
  705. io_schedule();
  706. /*
  707. * After sleeping, we become a "batching" process and
  708. * will be able to allocate at least one request, and
  709. * up to a big batch of them for a small period time.
  710. * See ioc_batching, ioc_set_batching
  711. */
  712. ioc = current_io_context(GFP_NOIO, q->node);
  713. ioc_set_batching(q, ioc);
  714. spin_lock_irq(q->queue_lock);
  715. }
  716. finish_wait(&rl->wait[rw], &wait);
  717. }
  718. return rq;
  719. }
  720. struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
  721. {
  722. struct request *rq;
  723. BUG_ON(rw != READ && rw != WRITE);
  724. spin_lock_irq(q->queue_lock);
  725. if (gfp_mask & __GFP_WAIT) {
  726. rq = get_request_wait(q, rw, NULL);
  727. } else {
  728. rq = get_request(q, rw, NULL, gfp_mask);
  729. if (!rq)
  730. spin_unlock_irq(q->queue_lock);
  731. }
  732. /* q->queue_lock is unlocked at this point */
  733. return rq;
  734. }
  735. EXPORT_SYMBOL(blk_get_request);
  736. /**
  737. * blk_start_queueing - initiate dispatch of requests to device
  738. * @q: request queue to kick into gear
  739. *
  740. * This is basically a helper to remove the need to know whether a queue
  741. * is plugged or not if someone just wants to initiate dispatch of requests
  742. * for this queue.
  743. *
  744. * The queue lock must be held with interrupts disabled.
  745. */
  746. void blk_start_queueing(struct request_queue *q)
  747. {
  748. if (!blk_queue_plugged(q))
  749. q->request_fn(q);
  750. else
  751. __generic_unplug_device(q);
  752. }
  753. EXPORT_SYMBOL(blk_start_queueing);
  754. /**
  755. * blk_requeue_request - put a request back on queue
  756. * @q: request queue where request should be inserted
  757. * @rq: request to be inserted
  758. *
  759. * Description:
  760. * Drivers often keep queueing requests until the hardware cannot accept
  761. * more, when that condition happens we need to put the request back
  762. * on the queue. Must be called with queue lock held.
  763. */
  764. void blk_requeue_request(struct request_queue *q, struct request *rq)
  765. {
  766. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  767. if (blk_rq_tagged(rq))
  768. blk_queue_end_tag(q, rq);
  769. elv_requeue_request(q, rq);
  770. }
  771. EXPORT_SYMBOL(blk_requeue_request);
  772. /**
  773. * blk_insert_request - insert a special request in to a request queue
  774. * @q: request queue where request should be inserted
  775. * @rq: request to be inserted
  776. * @at_head: insert request at head or tail of queue
  777. * @data: private data
  778. *
  779. * Description:
  780. * Many block devices need to execute commands asynchronously, so they don't
  781. * block the whole kernel from preemption during request execution. This is
  782. * accomplished normally by inserting aritficial requests tagged as
  783. * REQ_SPECIAL in to the corresponding request queue, and letting them be
  784. * scheduled for actual execution by the request queue.
  785. *
  786. * We have the option of inserting the head or the tail of the queue.
  787. * Typically we use the tail for new ioctls and so forth. We use the head
  788. * of the queue for things like a QUEUE_FULL message from a device, or a
  789. * host that is unable to accept a particular command.
  790. */
  791. void blk_insert_request(struct request_queue *q, struct request *rq,
  792. int at_head, void *data)
  793. {
  794. int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  795. unsigned long flags;
  796. /*
  797. * tell I/O scheduler that this isn't a regular read/write (ie it
  798. * must not attempt merges on this) and that it acts as a soft
  799. * barrier
  800. */
  801. rq->cmd_type = REQ_TYPE_SPECIAL;
  802. rq->cmd_flags |= REQ_SOFTBARRIER;
  803. rq->special = data;
  804. spin_lock_irqsave(q->queue_lock, flags);
  805. /*
  806. * If command is tagged, release the tag
  807. */
  808. if (blk_rq_tagged(rq))
  809. blk_queue_end_tag(q, rq);
  810. drive_stat_acct(rq, 1);
  811. __elv_add_request(q, rq, where, 0);
  812. blk_start_queueing(q);
  813. spin_unlock_irqrestore(q->queue_lock, flags);
  814. }
  815. EXPORT_SYMBOL(blk_insert_request);
  816. /*
  817. * add-request adds a request to the linked list.
  818. * queue lock is held and interrupts disabled, as we muck with the
  819. * request queue list.
  820. */
  821. static inline void add_request(struct request_queue *q, struct request *req)
  822. {
  823. drive_stat_acct(req, 1);
  824. /*
  825. * elevator indicated where it wants this request to be
  826. * inserted at elevator_merge time
  827. */
  828. __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
  829. }
  830. /*
  831. * disk_round_stats() - Round off the performance stats on a struct
  832. * disk_stats.
  833. *
  834. * The average IO queue length and utilisation statistics are maintained
  835. * by observing the current state of the queue length and the amount of
  836. * time it has been in this state for.
  837. *
  838. * Normally, that accounting is done on IO completion, but that can result
  839. * in more than a second's worth of IO being accounted for within any one
  840. * second, leading to >100% utilisation. To deal with that, we call this
  841. * function to do a round-off before returning the results when reading
  842. * /proc/diskstats. This accounts immediately for all queue usage up to
  843. * the current jiffies and restarts the counters again.
  844. */
  845. void disk_round_stats(struct gendisk *disk)
  846. {
  847. unsigned long now = jiffies;
  848. if (now == disk->stamp)
  849. return;
  850. if (disk->in_flight) {
  851. __disk_stat_add(disk, time_in_queue,
  852. disk->in_flight * (now - disk->stamp));
  853. __disk_stat_add(disk, io_ticks, (now - disk->stamp));
  854. }
  855. disk->stamp = now;
  856. }
  857. EXPORT_SYMBOL_GPL(disk_round_stats);
  858. void part_round_stats(struct hd_struct *part)
  859. {
  860. unsigned long now = jiffies;
  861. if (now == part->stamp)
  862. return;
  863. if (part->in_flight) {
  864. __part_stat_add(part, time_in_queue,
  865. part->in_flight * (now - part->stamp));
  866. __part_stat_add(part, io_ticks, (now - part->stamp));
  867. }
  868. part->stamp = now;
  869. }
  870. /*
  871. * queue lock must be held
  872. */
  873. void __blk_put_request(struct request_queue *q, struct request *req)
  874. {
  875. if (unlikely(!q))
  876. return;
  877. if (unlikely(--req->ref_count))
  878. return;
  879. elv_completed_request(q, req);
  880. /*
  881. * Request may not have originated from ll_rw_blk. if not,
  882. * it didn't come out of our reserved rq pools
  883. */
  884. if (req->cmd_flags & REQ_ALLOCED) {
  885. int rw = rq_data_dir(req);
  886. int priv = req->cmd_flags & REQ_ELVPRIV;
  887. BUG_ON(!list_empty(&req->queuelist));
  888. BUG_ON(!hlist_unhashed(&req->hash));
  889. blk_free_request(q, req);
  890. freed_request(q, rw, priv);
  891. }
  892. }
  893. EXPORT_SYMBOL_GPL(__blk_put_request);
  894. void blk_put_request(struct request *req)
  895. {
  896. unsigned long flags;
  897. struct request_queue *q = req->q;
  898. /*
  899. * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
  900. * following if (q) test.
  901. */
  902. if (q) {
  903. spin_lock_irqsave(q->queue_lock, flags);
  904. __blk_put_request(q, req);
  905. spin_unlock_irqrestore(q->queue_lock, flags);
  906. }
  907. }
  908. EXPORT_SYMBOL(blk_put_request);
  909. void init_request_from_bio(struct request *req, struct bio *bio)
  910. {
  911. req->cmd_type = REQ_TYPE_FS;
  912. /*
  913. * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
  914. */
  915. if (bio_rw_ahead(bio) || bio_failfast(bio))
  916. req->cmd_flags |= REQ_FAILFAST;
  917. /*
  918. * REQ_BARRIER implies no merging, but lets make it explicit
  919. */
  920. if (unlikely(bio_barrier(bio)))
  921. req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
  922. if (bio_sync(bio))
  923. req->cmd_flags |= REQ_RW_SYNC;
  924. if (bio_rw_meta(bio))
  925. req->cmd_flags |= REQ_RW_META;
  926. req->errors = 0;
  927. req->hard_sector = req->sector = bio->bi_sector;
  928. req->ioprio = bio_prio(bio);
  929. req->start_time = jiffies;
  930. blk_rq_bio_prep(req->q, req, bio);
  931. }
  932. static int __make_request(struct request_queue *q, struct bio *bio)
  933. {
  934. struct request *req;
  935. int el_ret, nr_sectors, barrier, err;
  936. const unsigned short prio = bio_prio(bio);
  937. const int sync = bio_sync(bio);
  938. int rw_flags;
  939. nr_sectors = bio_sectors(bio);
  940. /*
  941. * low level driver can indicate that it wants pages above a
  942. * certain limit bounced to low memory (ie for highmem, or even
  943. * ISA dma in theory)
  944. */
  945. blk_queue_bounce(q, &bio);
  946. barrier = bio_barrier(bio);
  947. if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
  948. err = -EOPNOTSUPP;
  949. goto end_io;
  950. }
  951. spin_lock_irq(q->queue_lock);
  952. if (unlikely(barrier) || elv_queue_empty(q))
  953. goto get_rq;
  954. el_ret = elv_merge(q, &req, bio);
  955. switch (el_ret) {
  956. case ELEVATOR_BACK_MERGE:
  957. BUG_ON(!rq_mergeable(req));
  958. if (!ll_back_merge_fn(q, req, bio))
  959. break;
  960. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  961. req->biotail->bi_next = bio;
  962. req->biotail = bio;
  963. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  964. req->ioprio = ioprio_best(req->ioprio, prio);
  965. drive_stat_acct(req, 0);
  966. if (!attempt_back_merge(q, req))
  967. elv_merged_request(q, req, el_ret);
  968. goto out;
  969. case ELEVATOR_FRONT_MERGE:
  970. BUG_ON(!rq_mergeable(req));
  971. if (!ll_front_merge_fn(q, req, bio))
  972. break;
  973. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  974. bio->bi_next = req->bio;
  975. req->bio = bio;
  976. /*
  977. * may not be valid. if the low level driver said
  978. * it didn't need a bounce buffer then it better
  979. * not touch req->buffer either...
  980. */
  981. req->buffer = bio_data(bio);
  982. req->current_nr_sectors = bio_cur_sectors(bio);
  983. req->hard_cur_sectors = req->current_nr_sectors;
  984. req->sector = req->hard_sector = bio->bi_sector;
  985. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  986. req->ioprio = ioprio_best(req->ioprio, prio);
  987. drive_stat_acct(req, 0);
  988. if (!attempt_front_merge(q, req))
  989. elv_merged_request(q, req, el_ret);
  990. goto out;
  991. /* ELV_NO_MERGE: elevator says don't/can't merge. */
  992. default:
  993. ;
  994. }
  995. get_rq:
  996. /*
  997. * This sync check and mask will be re-done in init_request_from_bio(),
  998. * but we need to set it earlier to expose the sync flag to the
  999. * rq allocator and io schedulers.
  1000. */
  1001. rw_flags = bio_data_dir(bio);
  1002. if (sync)
  1003. rw_flags |= REQ_RW_SYNC;
  1004. /*
  1005. * Grab a free request. This is might sleep but can not fail.
  1006. * Returns with the queue unlocked.
  1007. */
  1008. req = get_request_wait(q, rw_flags, bio);
  1009. /*
  1010. * After dropping the lock and possibly sleeping here, our request
  1011. * may now be mergeable after it had proven unmergeable (above).
  1012. * We don't worry about that case for efficiency. It won't happen
  1013. * often, and the elevators are able to handle it.
  1014. */
  1015. init_request_from_bio(req, bio);
  1016. spin_lock_irq(q->queue_lock);
  1017. if (elv_queue_empty(q))
  1018. blk_plug_device(q);
  1019. add_request(q, req);
  1020. out:
  1021. if (sync)
  1022. __generic_unplug_device(q);
  1023. spin_unlock_irq(q->queue_lock);
  1024. return 0;
  1025. end_io:
  1026. bio_endio(bio, err);
  1027. return 0;
  1028. }
  1029. /*
  1030. * If bio->bi_dev is a partition, remap the location
  1031. */
  1032. static inline void blk_partition_remap(struct bio *bio)
  1033. {
  1034. struct block_device *bdev = bio->bi_bdev;
  1035. if (bio_sectors(bio) && bdev != bdev->bd_contains) {
  1036. struct hd_struct *p = bdev->bd_part;
  1037. bio->bi_sector += p->start_sect;
  1038. bio->bi_bdev = bdev->bd_contains;
  1039. blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
  1040. bdev->bd_dev, bio->bi_sector,
  1041. bio->bi_sector - p->start_sect);
  1042. }
  1043. }
  1044. static void handle_bad_sector(struct bio *bio)
  1045. {
  1046. char b[BDEVNAME_SIZE];
  1047. printk(KERN_INFO "attempt to access beyond end of device\n");
  1048. printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
  1049. bdevname(bio->bi_bdev, b),
  1050. bio->bi_rw,
  1051. (unsigned long long)bio->bi_sector + bio_sectors(bio),
  1052. (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
  1053. set_bit(BIO_EOF, &bio->bi_flags);
  1054. }
  1055. #ifdef CONFIG_FAIL_MAKE_REQUEST
  1056. static DECLARE_FAULT_ATTR(fail_make_request);
  1057. static int __init setup_fail_make_request(char *str)
  1058. {
  1059. return setup_fault_attr(&fail_make_request, str);
  1060. }
  1061. __setup("fail_make_request=", setup_fail_make_request);
  1062. static int should_fail_request(struct bio *bio)
  1063. {
  1064. if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
  1065. (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
  1066. return should_fail(&fail_make_request, bio->bi_size);
  1067. return 0;
  1068. }
  1069. static int __init fail_make_request_debugfs(void)
  1070. {
  1071. return init_fault_attr_dentries(&fail_make_request,
  1072. "fail_make_request");
  1073. }
  1074. late_initcall(fail_make_request_debugfs);
  1075. #else /* CONFIG_FAIL_MAKE_REQUEST */
  1076. static inline int should_fail_request(struct bio *bio)
  1077. {
  1078. return 0;
  1079. }
  1080. #endif /* CONFIG_FAIL_MAKE_REQUEST */
  1081. /*
  1082. * Check whether this bio extends beyond the end of the device.
  1083. */
  1084. static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
  1085. {
  1086. sector_t maxsector;
  1087. if (!nr_sectors)
  1088. return 0;
  1089. /* Test device or partition size, when known. */
  1090. maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
  1091. if (maxsector) {
  1092. sector_t sector = bio->bi_sector;
  1093. if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
  1094. /*
  1095. * This may well happen - the kernel calls bread()
  1096. * without checking the size of the device, e.g., when
  1097. * mounting a device.
  1098. */
  1099. handle_bad_sector(bio);
  1100. return 1;
  1101. }
  1102. }
  1103. return 0;
  1104. }
  1105. /**
  1106. * generic_make_request: hand a buffer to its device driver for I/O
  1107. * @bio: The bio describing the location in memory and on the device.
  1108. *
  1109. * generic_make_request() is used to make I/O requests of block
  1110. * devices. It is passed a &struct bio, which describes the I/O that needs
  1111. * to be done.
  1112. *
  1113. * generic_make_request() does not return any status. The
  1114. * success/failure status of the request, along with notification of
  1115. * completion, is delivered asynchronously through the bio->bi_end_io
  1116. * function described (one day) else where.
  1117. *
  1118. * The caller of generic_make_request must make sure that bi_io_vec
  1119. * are set to describe the memory buffer, and that bi_dev and bi_sector are
  1120. * set to describe the device address, and the
  1121. * bi_end_io and optionally bi_private are set to describe how
  1122. * completion notification should be signaled.
  1123. *
  1124. * generic_make_request and the drivers it calls may use bi_next if this
  1125. * bio happens to be merged with someone else, and may change bi_dev and
  1126. * bi_sector for remaps as it sees fit. So the values of these fields
  1127. * should NOT be depended on after the call to generic_make_request.
  1128. */
  1129. static inline void __generic_make_request(struct bio *bio)
  1130. {
  1131. struct request_queue *q;
  1132. sector_t old_sector;
  1133. int ret, nr_sectors = bio_sectors(bio);
  1134. dev_t old_dev;
  1135. int err = -EIO;
  1136. might_sleep();
  1137. if (bio_check_eod(bio, nr_sectors))
  1138. goto end_io;
  1139. /*
  1140. * Resolve the mapping until finished. (drivers are
  1141. * still free to implement/resolve their own stacking
  1142. * by explicitly returning 0)
  1143. *
  1144. * NOTE: we don't repeat the blk_size check for each new device.
  1145. * Stacking drivers are expected to know what they are doing.
  1146. */
  1147. old_sector = -1;
  1148. old_dev = 0;
  1149. do {
  1150. char b[BDEVNAME_SIZE];
  1151. q = bdev_get_queue(bio->bi_bdev);
  1152. if (!q) {
  1153. printk(KERN_ERR
  1154. "generic_make_request: Trying to access "
  1155. "nonexistent block-device %s (%Lu)\n",
  1156. bdevname(bio->bi_bdev, b),
  1157. (long long) bio->bi_sector);
  1158. end_io:
  1159. bio_endio(bio, err);
  1160. break;
  1161. }
  1162. if (unlikely(nr_sectors > q->max_hw_sectors)) {
  1163. printk(KERN_ERR "bio too big device %s (%u > %u)\n",
  1164. bdevname(bio->bi_bdev, b),
  1165. bio_sectors(bio),
  1166. q->max_hw_sectors);
  1167. goto end_io;
  1168. }
  1169. if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
  1170. goto end_io;
  1171. if (should_fail_request(bio))
  1172. goto end_io;
  1173. /*
  1174. * If this device has partitions, remap block n
  1175. * of partition p to block n+start(p) of the disk.
  1176. */
  1177. blk_partition_remap(bio);
  1178. if (old_sector != -1)
  1179. blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
  1180. old_sector);
  1181. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  1182. old_sector = bio->bi_sector;
  1183. old_dev = bio->bi_bdev->bd_dev;
  1184. if (bio_check_eod(bio, nr_sectors))
  1185. goto end_io;
  1186. if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
  1187. err = -EOPNOTSUPP;
  1188. goto end_io;
  1189. }
  1190. ret = q->make_request_fn(q, bio);
  1191. } while (ret);
  1192. }
  1193. /*
  1194. * We only want one ->make_request_fn to be active at a time,
  1195. * else stack usage with stacked devices could be a problem.
  1196. * So use current->bio_{list,tail} to keep a list of requests
  1197. * submited by a make_request_fn function.
  1198. * current->bio_tail is also used as a flag to say if
  1199. * generic_make_request is currently active in this task or not.
  1200. * If it is NULL, then no make_request is active. If it is non-NULL,
  1201. * then a make_request is active, and new requests should be added
  1202. * at the tail
  1203. */
  1204. void generic_make_request(struct bio *bio)
  1205. {
  1206. if (current->bio_tail) {
  1207. /* make_request is active */
  1208. *(current->bio_tail) = bio;
  1209. bio->bi_next = NULL;
  1210. current->bio_tail = &bio->bi_next;
  1211. return;
  1212. }
  1213. /* following loop may be a bit non-obvious, and so deserves some
  1214. * explanation.
  1215. * Before entering the loop, bio->bi_next is NULL (as all callers
  1216. * ensure that) so we have a list with a single bio.
  1217. * We pretend that we have just taken it off a longer list, so
  1218. * we assign bio_list to the next (which is NULL) and bio_tail
  1219. * to &bio_list, thus initialising the bio_list of new bios to be
  1220. * added. __generic_make_request may indeed add some more bios
  1221. * through a recursive call to generic_make_request. If it
  1222. * did, we find a non-NULL value in bio_list and re-enter the loop
  1223. * from the top. In this case we really did just take the bio
  1224. * of the top of the list (no pretending) and so fixup bio_list and
  1225. * bio_tail or bi_next, and call into __generic_make_request again.
  1226. *
  1227. * The loop was structured like this to make only one call to
  1228. * __generic_make_request (which is important as it is large and
  1229. * inlined) and to keep the structure simple.
  1230. */
  1231. BUG_ON(bio->bi_next);
  1232. do {
  1233. current->bio_list = bio->bi_next;
  1234. if (bio->bi_next == NULL)
  1235. current->bio_tail = &current->bio_list;
  1236. else
  1237. bio->bi_next = NULL;
  1238. __generic_make_request(bio);
  1239. bio = current->bio_list;
  1240. } while (bio);
  1241. current->bio_tail = NULL; /* deactivate */
  1242. }
  1243. EXPORT_SYMBOL(generic_make_request);
  1244. /**
  1245. * submit_bio: submit a bio to the block device layer for I/O
  1246. * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  1247. * @bio: The &struct bio which describes the I/O
  1248. *
  1249. * submit_bio() is very similar in purpose to generic_make_request(), and
  1250. * uses that function to do most of the work. Both are fairly rough
  1251. * interfaces, @bio must be presetup and ready for I/O.
  1252. *
  1253. */
  1254. void submit_bio(int rw, struct bio *bio)
  1255. {
  1256. int count = bio_sectors(bio);
  1257. bio->bi_rw |= rw;
  1258. /*
  1259. * If it's a regular read/write or a barrier with data attached,
  1260. * go through the normal accounting stuff before submission.
  1261. */
  1262. if (!bio_empty_barrier(bio)) {
  1263. BIO_BUG_ON(!bio->bi_size);
  1264. BIO_BUG_ON(!bio->bi_io_vec);
  1265. if (rw & WRITE) {
  1266. count_vm_events(PGPGOUT, count);
  1267. } else {
  1268. task_io_account_read(bio->bi_size);
  1269. count_vm_events(PGPGIN, count);
  1270. }
  1271. if (unlikely(block_dump)) {
  1272. char b[BDEVNAME_SIZE];
  1273. printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
  1274. current->comm, task_pid_nr(current),
  1275. (rw & WRITE) ? "WRITE" : "READ",
  1276. (unsigned long long)bio->bi_sector,
  1277. bdevname(bio->bi_bdev, b));
  1278. }
  1279. }
  1280. generic_make_request(bio);
  1281. }
  1282. EXPORT_SYMBOL(submit_bio);
  1283. /**
  1284. * __end_that_request_first - end I/O on a request
  1285. * @req: the request being processed
  1286. * @error: 0 for success, < 0 for error
  1287. * @nr_bytes: number of bytes to complete
  1288. *
  1289. * Description:
  1290. * Ends I/O on a number of bytes attached to @req, and sets it up
  1291. * for the next range of segments (if any) in the cluster.
  1292. *
  1293. * Return:
  1294. * 0 - we are done with this request, call end_that_request_last()
  1295. * 1 - still buffers pending for this request
  1296. **/
  1297. static int __end_that_request_first(struct request *req, int error,
  1298. int nr_bytes)
  1299. {
  1300. int total_bytes, bio_nbytes, next_idx = 0;
  1301. struct bio *bio;
  1302. blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
  1303. /*
  1304. * for a REQ_BLOCK_PC request, we want to carry any eventual
  1305. * sense key with us all the way through
  1306. */
  1307. if (!blk_pc_request(req))
  1308. req->errors = 0;
  1309. if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
  1310. printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
  1311. req->rq_disk ? req->rq_disk->disk_name : "?",
  1312. (unsigned long long)req->sector);
  1313. }
  1314. if (blk_fs_request(req) && req->rq_disk) {
  1315. const int rw = rq_data_dir(req);
  1316. all_stat_add(req->rq_disk, sectors[rw],
  1317. nr_bytes >> 9, req->sector);
  1318. }
  1319. total_bytes = bio_nbytes = 0;
  1320. while ((bio = req->bio) != NULL) {
  1321. int nbytes;
  1322. /*
  1323. * For an empty barrier request, the low level driver must
  1324. * store a potential error location in ->sector. We pass
  1325. * that back up in ->bi_sector.
  1326. */
  1327. if (blk_empty_barrier(req))
  1328. bio->bi_sector = req->sector;
  1329. if (nr_bytes >= bio->bi_size) {
  1330. req->bio = bio->bi_next;
  1331. nbytes = bio->bi_size;
  1332. req_bio_endio(req, bio, nbytes, error);
  1333. next_idx = 0;
  1334. bio_nbytes = 0;
  1335. } else {
  1336. int idx = bio->bi_idx + next_idx;
  1337. if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
  1338. blk_dump_rq_flags(req, "__end_that");
  1339. printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
  1340. __FUNCTION__, bio->bi_idx,
  1341. bio->bi_vcnt);
  1342. break;
  1343. }
  1344. nbytes = bio_iovec_idx(bio, idx)->bv_len;
  1345. BIO_BUG_ON(nbytes > bio->bi_size);
  1346. /*
  1347. * not a complete bvec done
  1348. */
  1349. if (unlikely(nbytes > nr_bytes)) {
  1350. bio_nbytes += nr_bytes;
  1351. total_bytes += nr_bytes;
  1352. break;
  1353. }
  1354. /*
  1355. * advance to the next vector
  1356. */
  1357. next_idx++;
  1358. bio_nbytes += nbytes;
  1359. }
  1360. total_bytes += nbytes;
  1361. nr_bytes -= nbytes;
  1362. bio = req->bio;
  1363. if (bio) {
  1364. /*
  1365. * end more in this run, or just return 'not-done'
  1366. */
  1367. if (unlikely(nr_bytes <= 0))
  1368. break;
  1369. }
  1370. }
  1371. /*
  1372. * completely done
  1373. */
  1374. if (!req->bio)
  1375. return 0;
  1376. /*
  1377. * if the request wasn't completed, update state
  1378. */
  1379. if (bio_nbytes) {
  1380. req_bio_endio(req, bio, bio_nbytes, error);
  1381. bio->bi_idx += next_idx;
  1382. bio_iovec(bio)->bv_offset += nr_bytes;
  1383. bio_iovec(bio)->bv_len -= nr_bytes;
  1384. }
  1385. blk_recalc_rq_sectors(req, total_bytes >> 9);
  1386. blk_recalc_rq_segments(req);
  1387. return 1;
  1388. }
  1389. /*
  1390. * splice the completion data to a local structure and hand off to
  1391. * process_completion_queue() to complete the requests
  1392. */
  1393. static void blk_done_softirq(struct softirq_action *h)
  1394. {
  1395. struct list_head *cpu_list, local_list;
  1396. local_irq_disable();
  1397. cpu_list = &__get_cpu_var(blk_cpu_done);
  1398. list_replace_init(cpu_list, &local_list);
  1399. local_irq_enable();
  1400. while (!list_empty(&local_list)) {
  1401. struct request *rq;
  1402. rq = list_entry(local_list.next, struct request, donelist);
  1403. list_del_init(&rq->donelist);
  1404. rq->q->softirq_done_fn(rq);
  1405. }
  1406. }
  1407. static int __cpuinit blk_cpu_notify(struct notifier_block *self,
  1408. unsigned long action, void *hcpu)
  1409. {
  1410. /*
  1411. * If a CPU goes away, splice its entries to the current CPU
  1412. * and trigger a run of the softirq
  1413. */
  1414. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  1415. int cpu = (unsigned long) hcpu;
  1416. local_irq_disable();
  1417. list_splice_init(&per_cpu(blk_cpu_done, cpu),
  1418. &__get_cpu_var(blk_cpu_done));
  1419. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  1420. local_irq_enable();
  1421. }
  1422. return NOTIFY_OK;
  1423. }
  1424. static struct notifier_block blk_cpu_notifier __cpuinitdata = {
  1425. .notifier_call = blk_cpu_notify,
  1426. };
  1427. /**
  1428. * blk_complete_request - end I/O on a request
  1429. * @req: the request being processed
  1430. *
  1431. * Description:
  1432. * Ends all I/O on a request. It does not handle partial completions,
  1433. * unless the driver actually implements this in its completion callback
  1434. * through requeueing. The actual completion happens out-of-order,
  1435. * through a softirq handler. The user must have registered a completion
  1436. * callback through blk_queue_softirq_done().
  1437. **/
  1438. void blk_complete_request(struct request *req)
  1439. {
  1440. struct list_head *cpu_list;
  1441. unsigned long flags;
  1442. BUG_ON(!req->q->softirq_done_fn);
  1443. local_irq_save(flags);
  1444. cpu_list = &__get_cpu_var(blk_cpu_done);
  1445. list_add_tail(&req->donelist, cpu_list);
  1446. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  1447. local_irq_restore(flags);
  1448. }
  1449. EXPORT_SYMBOL(blk_complete_request);
  1450. /*
  1451. * queue lock must be held
  1452. */
  1453. static void end_that_request_last(struct request *req, int error)
  1454. {
  1455. struct gendisk *disk = req->rq_disk;
  1456. if (blk_rq_tagged(req))
  1457. blk_queue_end_tag(req->q, req);
  1458. if (blk_queued_rq(req))
  1459. blkdev_dequeue_request(req);
  1460. if (unlikely(laptop_mode) && blk_fs_request(req))
  1461. laptop_io_completion();
  1462. /*
  1463. * Account IO completion. bar_rq isn't accounted as a normal
  1464. * IO on queueing nor completion. Accounting the containing
  1465. * request is enough.
  1466. */
  1467. if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
  1468. unsigned long duration = jiffies - req->start_time;
  1469. const int rw = rq_data_dir(req);
  1470. struct hd_struct *part = get_part(disk, req->sector);
  1471. __all_stat_inc(disk, ios[rw], req->sector);
  1472. __all_stat_add(disk, ticks[rw], duration, req->sector);
  1473. disk_round_stats(disk);
  1474. disk->in_flight--;
  1475. if (part) {
  1476. part_round_stats(part);
  1477. part->in_flight--;
  1478. }
  1479. }
  1480. if (req->end_io)
  1481. req->end_io(req, error);
  1482. else {
  1483. if (blk_bidi_rq(req))
  1484. __blk_put_request(req->next_rq->q, req->next_rq);
  1485. __blk_put_request(req->q, req);
  1486. }
  1487. }
  1488. static inline void __end_request(struct request *rq, int uptodate,
  1489. unsigned int nr_bytes)
  1490. {
  1491. int error = 0;
  1492. if (uptodate <= 0)
  1493. error = uptodate ? uptodate : -EIO;
  1494. __blk_end_request(rq, error, nr_bytes);
  1495. }
  1496. /**
  1497. * blk_rq_bytes - Returns bytes left to complete in the entire request
  1498. **/
  1499. unsigned int blk_rq_bytes(struct request *rq)
  1500. {
  1501. if (blk_fs_request(rq))
  1502. return rq->hard_nr_sectors << 9;
  1503. return rq->data_len;
  1504. }
  1505. EXPORT_SYMBOL_GPL(blk_rq_bytes);
  1506. /**
  1507. * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
  1508. **/
  1509. unsigned int blk_rq_cur_bytes(struct request *rq)
  1510. {
  1511. if (blk_fs_request(rq))
  1512. return rq->current_nr_sectors << 9;
  1513. if (rq->bio)
  1514. return rq->bio->bi_size;
  1515. return rq->data_len;
  1516. }
  1517. EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
  1518. /**
  1519. * end_queued_request - end all I/O on a queued request
  1520. * @rq: the request being processed
  1521. * @uptodate: error value or 0/1 uptodate flag
  1522. *
  1523. * Description:
  1524. * Ends all I/O on a request, and removes it from the block layer queues.
  1525. * Not suitable for normal IO completion, unless the driver still has
  1526. * the request attached to the block layer.
  1527. *
  1528. **/
  1529. void end_queued_request(struct request *rq, int uptodate)
  1530. {
  1531. __end_request(rq, uptodate, blk_rq_bytes(rq));
  1532. }
  1533. EXPORT_SYMBOL(end_queued_request);
  1534. /**
  1535. * end_dequeued_request - end all I/O on a dequeued request
  1536. * @rq: the request being processed
  1537. * @uptodate: error value or 0/1 uptodate flag
  1538. *
  1539. * Description:
  1540. * Ends all I/O on a request. The request must already have been
  1541. * dequeued using blkdev_dequeue_request(), as is normally the case
  1542. * for most drivers.
  1543. *
  1544. **/
  1545. void end_dequeued_request(struct request *rq, int uptodate)
  1546. {
  1547. __end_request(rq, uptodate, blk_rq_bytes(rq));
  1548. }
  1549. EXPORT_SYMBOL(end_dequeued_request);
  1550. /**
  1551. * end_request - end I/O on the current segment of the request
  1552. * @req: the request being processed
  1553. * @uptodate: error value or 0/1 uptodate flag
  1554. *
  1555. * Description:
  1556. * Ends I/O on the current segment of a request. If that is the only
  1557. * remaining segment, the request is also completed and freed.
  1558. *
  1559. * This is a remnant of how older block drivers handled IO completions.
  1560. * Modern drivers typically end IO on the full request in one go, unless
  1561. * they have a residual value to account for. For that case this function
  1562. * isn't really useful, unless the residual just happens to be the
  1563. * full current segment. In other words, don't use this function in new
  1564. * code. Either use end_request_completely(), or the
  1565. * end_that_request_chunk() (along with end_that_request_last()) for
  1566. * partial completions.
  1567. *
  1568. **/
  1569. void end_request(struct request *req, int uptodate)
  1570. {
  1571. __end_request(req, uptodate, req->hard_cur_sectors << 9);
  1572. }
  1573. EXPORT_SYMBOL(end_request);
  1574. /**
  1575. * blk_end_io - Generic end_io function to complete a request.
  1576. * @rq: the request being processed
  1577. * @error: 0 for success, < 0 for error
  1578. * @nr_bytes: number of bytes to complete @rq
  1579. * @bidi_bytes: number of bytes to complete @rq->next_rq
  1580. * @drv_callback: function called between completion of bios in the request
  1581. * and completion of the request.
  1582. * If the callback returns non 0, this helper returns without
  1583. * completion of the request.
  1584. *
  1585. * Description:
  1586. * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
  1587. * If @rq has leftover, sets it up for the next range of segments.
  1588. *
  1589. * Return:
  1590. * 0 - we are done with this request
  1591. * 1 - this request is not freed yet, it still has pending buffers.
  1592. **/
  1593. static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
  1594. unsigned int bidi_bytes,
  1595. int (drv_callback)(struct request *))
  1596. {
  1597. struct request_queue *q = rq->q;
  1598. unsigned long flags = 0UL;
  1599. if (blk_fs_request(rq) || blk_pc_request(rq)) {
  1600. if (__end_that_request_first(rq, error, nr_bytes))
  1601. return 1;
  1602. /* Bidi request must be completed as a whole */
  1603. if (blk_bidi_rq(rq) &&
  1604. __end_that_request_first(rq->next_rq, error, bidi_bytes))
  1605. return 1;
  1606. }
  1607. /* Special feature for tricky drivers */
  1608. if (drv_callback && drv_callback(rq))
  1609. return 1;
  1610. add_disk_randomness(rq->rq_disk);
  1611. spin_lock_irqsave(q->queue_lock, flags);
  1612. end_that_request_last(rq, error);
  1613. spin_unlock_irqrestore(q->queue_lock, flags);
  1614. return 0;
  1615. }
  1616. /**
  1617. * blk_end_request - Helper function for drivers to complete the request.
  1618. * @rq: the request being processed
  1619. * @error: 0 for success, < 0 for error
  1620. * @nr_bytes: number of bytes to complete
  1621. *
  1622. * Description:
  1623. * Ends I/O on a number of bytes attached to @rq.
  1624. * If @rq has leftover, sets it up for the next range of segments.
  1625. *
  1626. * Return:
  1627. * 0 - we are done with this request
  1628. * 1 - still buffers pending for this request
  1629. **/
  1630. int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  1631. {
  1632. return blk_end_io(rq, error, nr_bytes, 0, NULL);
  1633. }
  1634. EXPORT_SYMBOL_GPL(blk_end_request);
  1635. /**
  1636. * __blk_end_request - Helper function for drivers to complete the request.
  1637. * @rq: the request being processed
  1638. * @error: 0 for success, < 0 for error
  1639. * @nr_bytes: number of bytes to complete
  1640. *
  1641. * Description:
  1642. * Must be called with queue lock held unlike blk_end_request().
  1643. *
  1644. * Return:
  1645. * 0 - we are done with this request
  1646. * 1 - still buffers pending for this request
  1647. **/
  1648. int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  1649. {
  1650. if (blk_fs_request(rq) || blk_pc_request(rq)) {
  1651. if (__end_that_request_first(rq, error, nr_bytes))
  1652. return 1;
  1653. }
  1654. add_disk_randomness(rq->rq_disk);
  1655. end_that_request_last(rq, error);
  1656. return 0;
  1657. }
  1658. EXPORT_SYMBOL_GPL(__blk_end_request);
  1659. /**
  1660. * blk_end_bidi_request - Helper function for drivers to complete bidi request.
  1661. * @rq: the bidi request being processed
  1662. * @error: 0 for success, < 0 for error
  1663. * @nr_bytes: number of bytes to complete @rq
  1664. * @bidi_bytes: number of bytes to complete @rq->next_rq
  1665. *
  1666. * Description:
  1667. * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
  1668. *
  1669. * Return:
  1670. * 0 - we are done with this request
  1671. * 1 - still buffers pending for this request
  1672. **/
  1673. int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
  1674. unsigned int bidi_bytes)
  1675. {
  1676. return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
  1677. }
  1678. EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  1679. /**
  1680. * blk_end_request_callback - Special helper function for tricky drivers
  1681. * @rq: the request being processed
  1682. * @error: 0 for success, < 0 for error
  1683. * @nr_bytes: number of bytes to complete
  1684. * @drv_callback: function called between completion of bios in the request
  1685. * and completion of the request.
  1686. * If the callback returns non 0, this helper returns without
  1687. * completion of the request.
  1688. *
  1689. * Description:
  1690. * Ends I/O on a number of bytes attached to @rq.
  1691. * If @rq has leftover, sets it up for the next range of segments.
  1692. *
  1693. * This special helper function is used only for existing tricky drivers.
  1694. * (e.g. cdrom_newpc_intr() of ide-cd)
  1695. * This interface will be removed when such drivers are rewritten.
  1696. * Don't use this interface in other places anymore.
  1697. *
  1698. * Return:
  1699. * 0 - we are done with this request
  1700. * 1 - this request is not freed yet.
  1701. * this request still has pending buffers or
  1702. * the driver doesn't want to finish this request yet.
  1703. **/
  1704. int blk_end_request_callback(struct request *rq, int error,
  1705. unsigned int nr_bytes,
  1706. int (drv_callback)(struct request *))
  1707. {
  1708. return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
  1709. }
  1710. EXPORT_SYMBOL_GPL(blk_end_request_callback);
  1711. void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  1712. struct bio *bio)
  1713. {
  1714. /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
  1715. rq->cmd_flags |= (bio->bi_rw & 3);
  1716. rq->nr_phys_segments = bio_phys_segments(q, bio);
  1717. rq->nr_hw_segments = bio_hw_segments(q, bio);
  1718. rq->current_nr_sectors = bio_cur_sectors(bio);
  1719. rq->hard_cur_sectors = rq->current_nr_sectors;
  1720. rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
  1721. rq->buffer = bio_data(bio);
  1722. rq->data_len = bio->bi_size;
  1723. rq->bio = rq->biotail = bio;
  1724. if (bio->bi_bdev)
  1725. rq->rq_disk = bio->bi_bdev->bd_disk;
  1726. }
  1727. int kblockd_schedule_work(struct work_struct *work)
  1728. {
  1729. return queue_work(kblockd_workqueue, work);
  1730. }
  1731. EXPORT_SYMBOL(kblockd_schedule_work);
  1732. void kblockd_flush_work(struct work_struct *work)
  1733. {
  1734. cancel_work_sync(work);
  1735. }
  1736. EXPORT_SYMBOL(kblockd_flush_work);
  1737. int __init blk_dev_init(void)
  1738. {
  1739. int i;
  1740. kblockd_workqueue = create_workqueue("kblockd");
  1741. if (!kblockd_workqueue)
  1742. panic("Failed to create kblockd\n");
  1743. request_cachep = kmem_cache_create("blkdev_requests",
  1744. sizeof(struct request), 0, SLAB_PANIC, NULL);
  1745. blk_requestq_cachep = kmem_cache_create("blkdev_queue",
  1746. sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
  1747. for_each_possible_cpu(i)
  1748. INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
  1749. open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
  1750. register_hotcpu_notifier(&blk_cpu_notifier);
  1751. return 0;
  1752. }