blk-core.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
  4. * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
  6. * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
  7. * - July2000
  8. * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  9. */
  10. /*
  11. * This handles all read/write requests to block devices
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/bio.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/highmem.h>
  19. #include <linux/mm.h>
  20. #include <linux/kernel_stat.h>
  21. #include <linux/string.h>
  22. #include <linux/init.h>
  23. #include <linux/completion.h>
  24. #include <linux/slab.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/task_io_accounting_ops.h>
  28. #include <linux/blktrace_api.h>
  29. #include <linux/fault-inject.h>
  30. #include <trace/block.h>
  31. #include "blk.h"
  32. DEFINE_TRACE(block_plug);
  33. DEFINE_TRACE(block_unplug_io);
  34. DEFINE_TRACE(block_unplug_timer);
  35. DEFINE_TRACE(block_getrq);
  36. DEFINE_TRACE(block_sleeprq);
  37. DEFINE_TRACE(block_rq_requeue);
  38. DEFINE_TRACE(block_bio_backmerge);
  39. DEFINE_TRACE(block_bio_frontmerge);
  40. DEFINE_TRACE(block_bio_queue);
  41. DEFINE_TRACE(block_rq_complete);
  42. DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */
  43. EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
  44. static int __make_request(struct request_queue *q, struct bio *bio);
  45. /*
  46. * For the allocated request tables
  47. */
  48. static struct kmem_cache *request_cachep;
  49. /*
  50. * For queue allocation
  51. */
  52. struct kmem_cache *blk_requestq_cachep;
  53. /*
  54. * Controlling structure to kblockd
  55. */
  56. static struct workqueue_struct *kblockd_workqueue;
  57. static void drive_stat_acct(struct request *rq, int new_io)
  58. {
  59. struct hd_struct *part;
  60. int rw = rq_data_dir(rq);
  61. int cpu;
  62. if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
  63. return;
  64. cpu = part_stat_lock();
  65. part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
  66. if (!new_io)
  67. part_stat_inc(cpu, part, merges[rw]);
  68. else {
  69. part_round_stats(cpu, part);
  70. part_inc_in_flight(part);
  71. }
  72. part_stat_unlock();
  73. }
  74. void blk_queue_congestion_threshold(struct request_queue *q)
  75. {
  76. int nr;
  77. nr = q->nr_requests - (q->nr_requests / 8) + 1;
  78. if (nr > q->nr_requests)
  79. nr = q->nr_requests;
  80. q->nr_congestion_on = nr;
  81. nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  82. if (nr < 1)
  83. nr = 1;
  84. q->nr_congestion_off = nr;
  85. }
  86. /**
  87. * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  88. * @bdev: device
  89. *
  90. * Locates the passed device's request queue and returns the address of its
  91. * backing_dev_info
  92. *
  93. * Will return NULL if the request queue cannot be located.
  94. */
  95. struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  96. {
  97. struct backing_dev_info *ret = NULL;
  98. struct request_queue *q = bdev_get_queue(bdev);
  99. if (q)
  100. ret = &q->backing_dev_info;
  101. return ret;
  102. }
  103. EXPORT_SYMBOL(blk_get_backing_dev_info);
  104. void blk_rq_init(struct request_queue *q, struct request *rq)
  105. {
  106. memset(rq, 0, sizeof(*rq));
  107. INIT_LIST_HEAD(&rq->queuelist);
  108. INIT_LIST_HEAD(&rq->timeout_list);
  109. rq->cpu = -1;
  110. rq->q = q;
  111. rq->sector = rq->hard_sector = (sector_t) -1;
  112. INIT_HLIST_NODE(&rq->hash);
  113. RB_CLEAR_NODE(&rq->rb_node);
  114. rq->cmd = rq->__cmd;
  115. rq->cmd_len = BLK_MAX_CDB;
  116. rq->tag = -1;
  117. rq->ref_count = 1;
  118. }
  119. EXPORT_SYMBOL(blk_rq_init);
  120. static void req_bio_endio(struct request *rq, struct bio *bio,
  121. unsigned int nbytes, int error)
  122. {
  123. struct request_queue *q = rq->q;
  124. if (&q->bar_rq != rq) {
  125. if (error)
  126. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  127. else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  128. error = -EIO;
  129. if (unlikely(nbytes > bio->bi_size)) {
  130. printk(KERN_ERR "%s: want %u bytes done, %u left\n",
  131. __func__, nbytes, bio->bi_size);
  132. nbytes = bio->bi_size;
  133. }
  134. if (unlikely(rq->cmd_flags & REQ_QUIET))
  135. set_bit(BIO_QUIET, &bio->bi_flags);
  136. bio->bi_size -= nbytes;
  137. bio->bi_sector += (nbytes >> 9);
  138. if (bio_integrity(bio))
  139. bio_integrity_advance(bio, nbytes);
  140. if (bio->bi_size == 0)
  141. bio_endio(bio, error);
  142. } else {
  143. /*
  144. * Okay, this is the barrier request in progress, just
  145. * record the error;
  146. */
  147. if (error && !q->orderr)
  148. q->orderr = error;
  149. }
  150. }
  151. void blk_dump_rq_flags(struct request *rq, char *msg)
  152. {
  153. int bit;
  154. printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
  155. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
  156. rq->cmd_flags);
  157. printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
  158. (unsigned long long)rq->sector,
  159. rq->nr_sectors,
  160. rq->current_nr_sectors);
  161. printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
  162. rq->bio, rq->biotail,
  163. rq->buffer, rq->data,
  164. rq->data_len);
  165. if (blk_pc_request(rq)) {
  166. printk(KERN_INFO " cdb: ");
  167. for (bit = 0; bit < BLK_MAX_CDB; bit++)
  168. printk("%02x ", rq->cmd[bit]);
  169. printk("\n");
  170. }
  171. }
  172. EXPORT_SYMBOL(blk_dump_rq_flags);
  173. /*
  174. * "plug" the device if there are no outstanding requests: this will
  175. * force the transfer to start only after we have put all the requests
  176. * on the list.
  177. *
  178. * This is called with interrupts off and no requests on the queue and
  179. * with the queue lock held.
  180. */
  181. void blk_plug_device(struct request_queue *q)
  182. {
  183. WARN_ON(!irqs_disabled());
  184. /*
  185. * don't plug a stopped queue, it must be paired with blk_start_queue()
  186. * which will restart the queueing
  187. */
  188. if (blk_queue_stopped(q))
  189. return;
  190. if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
  191. mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
  192. trace_block_plug(q);
  193. }
  194. }
  195. EXPORT_SYMBOL(blk_plug_device);
  196. /**
  197. * blk_plug_device_unlocked - plug a device without queue lock held
  198. * @q: The &struct request_queue to plug
  199. *
  200. * Description:
  201. * Like @blk_plug_device(), but grabs the queue lock and disables
  202. * interrupts.
  203. **/
  204. void blk_plug_device_unlocked(struct request_queue *q)
  205. {
  206. unsigned long flags;
  207. spin_lock_irqsave(q->queue_lock, flags);
  208. blk_plug_device(q);
  209. spin_unlock_irqrestore(q->queue_lock, flags);
  210. }
  211. EXPORT_SYMBOL(blk_plug_device_unlocked);
  212. /*
  213. * remove the queue from the plugged list, if present. called with
  214. * queue lock held and interrupts disabled.
  215. */
  216. int blk_remove_plug(struct request_queue *q)
  217. {
  218. WARN_ON(!irqs_disabled());
  219. if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
  220. return 0;
  221. del_timer(&q->unplug_timer);
  222. return 1;
  223. }
  224. EXPORT_SYMBOL(blk_remove_plug);
  225. /*
  226. * remove the plug and let it rip..
  227. */
  228. void __generic_unplug_device(struct request_queue *q)
  229. {
  230. if (unlikely(blk_queue_stopped(q)))
  231. return;
  232. if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
  233. return;
  234. q->request_fn(q);
  235. }
  236. /**
  237. * generic_unplug_device - fire a request queue
  238. * @q: The &struct request_queue in question
  239. *
  240. * Description:
  241. * Linux uses plugging to build bigger requests queues before letting
  242. * the device have at them. If a queue is plugged, the I/O scheduler
  243. * is still adding and merging requests on the queue. Once the queue
  244. * gets unplugged, the request_fn defined for the queue is invoked and
  245. * transfers started.
  246. **/
  247. void generic_unplug_device(struct request_queue *q)
  248. {
  249. if (blk_queue_plugged(q)) {
  250. spin_lock_irq(q->queue_lock);
  251. __generic_unplug_device(q);
  252. spin_unlock_irq(q->queue_lock);
  253. }
  254. }
  255. EXPORT_SYMBOL(generic_unplug_device);
  256. static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
  257. struct page *page)
  258. {
  259. struct request_queue *q = bdi->unplug_io_data;
  260. blk_unplug(q);
  261. }
  262. void blk_unplug_work(struct work_struct *work)
  263. {
  264. struct request_queue *q =
  265. container_of(work, struct request_queue, unplug_work);
  266. trace_block_unplug_io(q);
  267. q->unplug_fn(q);
  268. }
  269. void blk_unplug_timeout(unsigned long data)
  270. {
  271. struct request_queue *q = (struct request_queue *)data;
  272. trace_block_unplug_timer(q);
  273. kblockd_schedule_work(q, &q->unplug_work);
  274. }
  275. void blk_unplug(struct request_queue *q)
  276. {
  277. /*
  278. * devices don't necessarily have an ->unplug_fn defined
  279. */
  280. if (q->unplug_fn) {
  281. trace_block_unplug_io(q);
  282. q->unplug_fn(q);
  283. }
  284. }
  285. EXPORT_SYMBOL(blk_unplug);
  286. static void blk_invoke_request_fn(struct request_queue *q)
  287. {
  288. if (unlikely(blk_queue_stopped(q)))
  289. return;
  290. /*
  291. * one level of recursion is ok and is much faster than kicking
  292. * the unplug handling
  293. */
  294. if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
  295. q->request_fn(q);
  296. queue_flag_clear(QUEUE_FLAG_REENTER, q);
  297. } else {
  298. queue_flag_set(QUEUE_FLAG_PLUGGED, q);
  299. kblockd_schedule_work(q, &q->unplug_work);
  300. }
  301. }
  302. /**
  303. * blk_start_queue - restart a previously stopped queue
  304. * @q: The &struct request_queue in question
  305. *
  306. * Description:
  307. * blk_start_queue() will clear the stop flag on the queue, and call
  308. * the request_fn for the queue if it was in a stopped state when
  309. * entered. Also see blk_stop_queue(). Queue lock must be held.
  310. **/
  311. void blk_start_queue(struct request_queue *q)
  312. {
  313. WARN_ON(!irqs_disabled());
  314. queue_flag_clear(QUEUE_FLAG_STOPPED, q);
  315. blk_invoke_request_fn(q);
  316. }
  317. EXPORT_SYMBOL(blk_start_queue);
  318. /**
  319. * blk_stop_queue - stop a queue
  320. * @q: The &struct request_queue in question
  321. *
  322. * Description:
  323. * The Linux block layer assumes that a block driver will consume all
  324. * entries on the request queue when the request_fn strategy is called.
  325. * Often this will not happen, because of hardware limitations (queue
  326. * depth settings). If a device driver gets a 'queue full' response,
  327. * or if it simply chooses not to queue more I/O at one point, it can
  328. * call this function to prevent the request_fn from being called until
  329. * the driver has signalled it's ready to go again. This happens by calling
  330. * blk_start_queue() to restart queue operations. Queue lock must be held.
  331. **/
  332. void blk_stop_queue(struct request_queue *q)
  333. {
  334. blk_remove_plug(q);
  335. queue_flag_set(QUEUE_FLAG_STOPPED, q);
  336. }
  337. EXPORT_SYMBOL(blk_stop_queue);
  338. /**
  339. * blk_sync_queue - cancel any pending callbacks on a queue
  340. * @q: the queue
  341. *
  342. * Description:
  343. * The block layer may perform asynchronous callback activity
  344. * on a queue, such as calling the unplug function after a timeout.
  345. * A block device may call blk_sync_queue to ensure that any
  346. * such activity is cancelled, thus allowing it to release resources
  347. * that the callbacks might use. The caller must already have made sure
  348. * that its ->make_request_fn will not re-add plugging prior to calling
  349. * this function.
  350. *
  351. */
  352. void blk_sync_queue(struct request_queue *q)
  353. {
  354. del_timer_sync(&q->unplug_timer);
  355. del_timer_sync(&q->timeout);
  356. cancel_work_sync(&q->unplug_work);
  357. }
  358. EXPORT_SYMBOL(blk_sync_queue);
  359. /**
  360. * __blk_run_queue - run a single device queue
  361. * @q: The queue to run
  362. *
  363. * Description:
  364. * See @blk_run_queue. This variant must be called with the queue lock
  365. * held and interrupts disabled.
  366. *
  367. */
  368. void __blk_run_queue(struct request_queue *q)
  369. {
  370. blk_remove_plug(q);
  371. /*
  372. * Only recurse once to avoid overrunning the stack, let the unplug
  373. * handling reinvoke the handler shortly if we already got there.
  374. */
  375. if (!elv_queue_empty(q))
  376. blk_invoke_request_fn(q);
  377. }
  378. EXPORT_SYMBOL(__blk_run_queue);
  379. /**
  380. * blk_run_queue - run a single device queue
  381. * @q: The queue to run
  382. *
  383. * Description:
  384. * Invoke request handling on this queue, if it has pending work to do.
  385. * May be used to restart queueing when a request has completed. Also
  386. * See @blk_start_queueing.
  387. *
  388. */
  389. void blk_run_queue(struct request_queue *q)
  390. {
  391. unsigned long flags;
  392. spin_lock_irqsave(q->queue_lock, flags);
  393. __blk_run_queue(q);
  394. spin_unlock_irqrestore(q->queue_lock, flags);
  395. }
  396. EXPORT_SYMBOL(blk_run_queue);
  397. void blk_put_queue(struct request_queue *q)
  398. {
  399. kobject_put(&q->kobj);
  400. }
  401. void blk_cleanup_queue(struct request_queue *q)
  402. {
  403. /*
  404. * We know we have process context here, so we can be a little
  405. * cautious and ensure that pending block actions on this device
  406. * are done before moving on. Going into this function, we should
  407. * not have processes doing IO to this device.
  408. */
  409. blk_sync_queue(q);
  410. mutex_lock(&q->sysfs_lock);
  411. queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
  412. mutex_unlock(&q->sysfs_lock);
  413. if (q->elevator)
  414. elevator_exit(q->elevator);
  415. blk_put_queue(q);
  416. }
  417. EXPORT_SYMBOL(blk_cleanup_queue);
  418. static int blk_init_free_list(struct request_queue *q)
  419. {
  420. struct request_list *rl = &q->rq;
  421. rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
  422. rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
  423. rl->elvpriv = 0;
  424. init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
  425. init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
  426. rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
  427. mempool_free_slab, request_cachep, q->node);
  428. if (!rl->rq_pool)
  429. return -ENOMEM;
  430. return 0;
  431. }
  432. struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
  433. {
  434. return blk_alloc_queue_node(gfp_mask, -1);
  435. }
  436. EXPORT_SYMBOL(blk_alloc_queue);
  437. struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  438. {
  439. struct request_queue *q;
  440. int err;
  441. q = kmem_cache_alloc_node(blk_requestq_cachep,
  442. gfp_mask | __GFP_ZERO, node_id);
  443. if (!q)
  444. return NULL;
  445. q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
  446. q->backing_dev_info.unplug_io_data = q;
  447. err = bdi_init(&q->backing_dev_info);
  448. if (err) {
  449. kmem_cache_free(blk_requestq_cachep, q);
  450. return NULL;
  451. }
  452. init_timer(&q->unplug_timer);
  453. setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
  454. INIT_LIST_HEAD(&q->timeout_list);
  455. INIT_WORK(&q->unplug_work, blk_unplug_work);
  456. kobject_init(&q->kobj, &blk_queue_ktype);
  457. mutex_init(&q->sysfs_lock);
  458. spin_lock_init(&q->__queue_lock);
  459. return q;
  460. }
  461. EXPORT_SYMBOL(blk_alloc_queue_node);
  462. /**
  463. * blk_init_queue - prepare a request queue for use with a block device
  464. * @rfn: The function to be called to process requests that have been
  465. * placed on the queue.
  466. * @lock: Request queue spin lock
  467. *
  468. * Description:
  469. * If a block device wishes to use the standard request handling procedures,
  470. * which sorts requests and coalesces adjacent requests, then it must
  471. * call blk_init_queue(). The function @rfn will be called when there
  472. * are requests on the queue that need to be processed. If the device
  473. * supports plugging, then @rfn may not be called immediately when requests
  474. * are available on the queue, but may be called at some time later instead.
  475. * Plugged queues are generally unplugged when a buffer belonging to one
  476. * of the requests on the queue is needed, or due to memory pressure.
  477. *
  478. * @rfn is not required, or even expected, to remove all requests off the
  479. * queue, but only as many as it can handle at a time. If it does leave
  480. * requests on the queue, it is responsible for arranging that the requests
  481. * get dealt with eventually.
  482. *
  483. * The queue spin lock must be held while manipulating the requests on the
  484. * request queue; this lock will be taken also from interrupt context, so irq
  485. * disabling is needed for it.
  486. *
  487. * Function returns a pointer to the initialized request queue, or %NULL if
  488. * it didn't succeed.
  489. *
  490. * Note:
  491. * blk_init_queue() must be paired with a blk_cleanup_queue() call
  492. * when the block device is deactivated (such as at module unload).
  493. **/
  494. struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
  495. {
  496. return blk_init_queue_node(rfn, lock, -1);
  497. }
  498. EXPORT_SYMBOL(blk_init_queue);
  499. struct request_queue *
  500. blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  501. {
  502. struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  503. if (!q)
  504. return NULL;
  505. q->node = node_id;
  506. if (blk_init_free_list(q)) {
  507. kmem_cache_free(blk_requestq_cachep, q);
  508. return NULL;
  509. }
  510. /*
  511. * if caller didn't supply a lock, they get per-queue locking with
  512. * our embedded lock
  513. */
  514. if (!lock)
  515. lock = &q->__queue_lock;
  516. q->request_fn = rfn;
  517. q->prep_rq_fn = NULL;
  518. q->unplug_fn = generic_unplug_device;
  519. q->queue_flags = QUEUE_FLAG_DEFAULT;
  520. q->queue_lock = lock;
  521. /*
  522. * This also sets hw/phys segments, boundary and size
  523. */
  524. blk_queue_make_request(q, __make_request);
  525. q->sg_reserved_size = INT_MAX;
  526. blk_set_cmd_filter_defaults(&q->cmd_filter);
  527. /*
  528. * all done
  529. */
  530. if (!elevator_init(q, NULL)) {
  531. blk_queue_congestion_threshold(q);
  532. return q;
  533. }
  534. blk_put_queue(q);
  535. return NULL;
  536. }
  537. EXPORT_SYMBOL(blk_init_queue_node);
  538. int blk_get_queue(struct request_queue *q)
  539. {
  540. if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
  541. kobject_get(&q->kobj);
  542. return 0;
  543. }
  544. return 1;
  545. }
  546. static inline void blk_free_request(struct request_queue *q, struct request *rq)
  547. {
  548. if (rq->cmd_flags & REQ_ELVPRIV)
  549. elv_put_request(q, rq);
  550. mempool_free(rq, q->rq.rq_pool);
  551. }
  552. static struct request *
  553. blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
  554. {
  555. struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
  556. if (!rq)
  557. return NULL;
  558. blk_rq_init(q, rq);
  559. rq->cmd_flags = rw | REQ_ALLOCED;
  560. if (priv) {
  561. if (unlikely(elv_set_request(q, rq, gfp_mask))) {
  562. mempool_free(rq, q->rq.rq_pool);
  563. return NULL;
  564. }
  565. rq->cmd_flags |= REQ_ELVPRIV;
  566. }
  567. return rq;
  568. }
  569. /*
  570. * ioc_batching returns true if the ioc is a valid batching request and
  571. * should be given priority access to a request.
  572. */
  573. static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
  574. {
  575. if (!ioc)
  576. return 0;
  577. /*
  578. * Make sure the process is able to allocate at least 1 request
  579. * even if the batch times out, otherwise we could theoretically
  580. * lose wakeups.
  581. */
  582. return ioc->nr_batch_requests == q->nr_batching ||
  583. (ioc->nr_batch_requests > 0
  584. && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  585. }
  586. /*
  587. * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
  588. * will cause the process to be a "batcher" on all queues in the system. This
  589. * is the behaviour we want though - once it gets a wakeup it should be given
  590. * a nice run.
  591. */
  592. static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
  593. {
  594. if (!ioc || ioc_batching(q, ioc))
  595. return;
  596. ioc->nr_batch_requests = q->nr_batching;
  597. ioc->last_waited = jiffies;
  598. }
  599. static void __freed_request(struct request_queue *q, int sync)
  600. {
  601. struct request_list *rl = &q->rq;
  602. if (rl->count[sync] < queue_congestion_off_threshold(q))
  603. blk_clear_queue_congested(q, sync);
  604. if (rl->count[sync] + 1 <= q->nr_requests) {
  605. if (waitqueue_active(&rl->wait[sync]))
  606. wake_up(&rl->wait[sync]);
  607. blk_clear_queue_full(q, sync);
  608. }
  609. }
  610. /*
  611. * A request has just been released. Account for it, update the full and
  612. * congestion status, wake up any waiters. Called under q->queue_lock.
  613. */
  614. static void freed_request(struct request_queue *q, int sync, int priv)
  615. {
  616. struct request_list *rl = &q->rq;
  617. rl->count[sync]--;
  618. if (priv)
  619. rl->elvpriv--;
  620. __freed_request(q, sync);
  621. if (unlikely(rl->starved[sync ^ 1]))
  622. __freed_request(q, sync ^ 1);
  623. }
  624. /*
  625. * Get a free request, queue_lock must be held.
  626. * Returns NULL on failure, with queue_lock held.
  627. * Returns !NULL on success, with queue_lock *not held*.
  628. */
  629. static struct request *get_request(struct request_queue *q, int rw_flags,
  630. struct bio *bio, gfp_t gfp_mask)
  631. {
  632. struct request *rq = NULL;
  633. struct request_list *rl = &q->rq;
  634. struct io_context *ioc = NULL;
  635. const bool is_sync = rw_is_sync(rw_flags) != 0;
  636. int may_queue, priv;
  637. may_queue = elv_may_queue(q, rw_flags);
  638. if (may_queue == ELV_MQUEUE_NO)
  639. goto rq_starved;
  640. if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
  641. if (rl->count[is_sync]+1 >= q->nr_requests) {
  642. ioc = current_io_context(GFP_ATOMIC, q->node);
  643. /*
  644. * The queue will fill after this allocation, so set
  645. * it as full, and mark this process as "batching".
  646. * This process will be allowed to complete a batch of
  647. * requests, others will be blocked.
  648. */
  649. if (!blk_queue_full(q, is_sync)) {
  650. ioc_set_batching(q, ioc);
  651. blk_set_queue_full(q, is_sync);
  652. } else {
  653. if (may_queue != ELV_MQUEUE_MUST
  654. && !ioc_batching(q, ioc)) {
  655. /*
  656. * The queue is full and the allocating
  657. * process is not a "batcher", and not
  658. * exempted by the IO scheduler
  659. */
  660. goto out;
  661. }
  662. }
  663. }
  664. blk_set_queue_congested(q, is_sync);
  665. }
  666. /*
  667. * Only allow batching queuers to allocate up to 50% over the defined
  668. * limit of requests, otherwise we could have thousands of requests
  669. * allocated with any setting of ->nr_requests
  670. */
  671. if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
  672. goto out;
  673. rl->count[is_sync]++;
  674. rl->starved[is_sync] = 0;
  675. priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
  676. if (priv)
  677. rl->elvpriv++;
  678. spin_unlock_irq(q->queue_lock);
  679. rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
  680. if (unlikely(!rq)) {
  681. /*
  682. * Allocation failed presumably due to memory. Undo anything
  683. * we might have messed up.
  684. *
  685. * Allocating task should really be put onto the front of the
  686. * wait queue, but this is pretty rare.
  687. */
  688. spin_lock_irq(q->queue_lock);
  689. freed_request(q, is_sync, priv);
  690. /*
  691. * in the very unlikely event that allocation failed and no
  692. * requests for this direction was pending, mark us starved
  693. * so that freeing of a request in the other direction will
  694. * notice us. another possible fix would be to split the
  695. * rq mempool into READ and WRITE
  696. */
  697. rq_starved:
  698. if (unlikely(rl->count[is_sync] == 0))
  699. rl->starved[is_sync] = 1;
  700. goto out;
  701. }
  702. /*
  703. * ioc may be NULL here, and ioc_batching will be false. That's
  704. * OK, if the queue is under the request limit then requests need
  705. * not count toward the nr_batch_requests limit. There will always
  706. * be some limit enforced by BLK_BATCH_TIME.
  707. */
  708. if (ioc_batching(q, ioc))
  709. ioc->nr_batch_requests--;
  710. trace_block_getrq(q, bio, rw_flags & 1);
  711. out:
  712. return rq;
  713. }
  714. /*
  715. * No available requests for this queue, unplug the device and wait for some
  716. * requests to become available.
  717. *
  718. * Called with q->queue_lock held, and returns with it unlocked.
  719. */
  720. static struct request *get_request_wait(struct request_queue *q, int rw_flags,
  721. struct bio *bio)
  722. {
  723. const bool is_sync = rw_is_sync(rw_flags) != 0;
  724. struct request *rq;
  725. rq = get_request(q, rw_flags, bio, GFP_NOIO);
  726. while (!rq) {
  727. DEFINE_WAIT(wait);
  728. struct io_context *ioc;
  729. struct request_list *rl = &q->rq;
  730. prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
  731. TASK_UNINTERRUPTIBLE);
  732. trace_block_sleeprq(q, bio, rw_flags & 1);
  733. __generic_unplug_device(q);
  734. spin_unlock_irq(q->queue_lock);
  735. io_schedule();
  736. /*
  737. * After sleeping, we become a "batching" process and
  738. * will be able to allocate at least one request, and
  739. * up to a big batch of them for a small period time.
  740. * See ioc_batching, ioc_set_batching
  741. */
  742. ioc = current_io_context(GFP_NOIO, q->node);
  743. ioc_set_batching(q, ioc);
  744. spin_lock_irq(q->queue_lock);
  745. finish_wait(&rl->wait[is_sync], &wait);
  746. rq = get_request(q, rw_flags, bio, GFP_NOIO);
  747. };
  748. return rq;
  749. }
  750. struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
  751. {
  752. struct request *rq;
  753. BUG_ON(rw != READ && rw != WRITE);
  754. spin_lock_irq(q->queue_lock);
  755. if (gfp_mask & __GFP_WAIT) {
  756. rq = get_request_wait(q, rw, NULL);
  757. } else {
  758. rq = get_request(q, rw, NULL, gfp_mask);
  759. if (!rq)
  760. spin_unlock_irq(q->queue_lock);
  761. }
  762. /* q->queue_lock is unlocked at this point */
  763. return rq;
  764. }
  765. EXPORT_SYMBOL(blk_get_request);
  766. /**
  767. * blk_start_queueing - initiate dispatch of requests to device
  768. * @q: request queue to kick into gear
  769. *
  770. * This is basically a helper to remove the need to know whether a queue
  771. * is plugged or not if someone just wants to initiate dispatch of requests
  772. * for this queue. Should be used to start queueing on a device outside
  773. * of ->request_fn() context. Also see @blk_run_queue.
  774. *
  775. * The queue lock must be held with interrupts disabled.
  776. */
  777. void blk_start_queueing(struct request_queue *q)
  778. {
  779. if (!blk_queue_plugged(q)) {
  780. if (unlikely(blk_queue_stopped(q)))
  781. return;
  782. q->request_fn(q);
  783. } else
  784. __generic_unplug_device(q);
  785. }
  786. EXPORT_SYMBOL(blk_start_queueing);
  787. /**
  788. * blk_requeue_request - put a request back on queue
  789. * @q: request queue where request should be inserted
  790. * @rq: request to be inserted
  791. *
  792. * Description:
  793. * Drivers often keep queueing requests until the hardware cannot accept
  794. * more, when that condition happens we need to put the request back
  795. * on the queue. Must be called with queue lock held.
  796. */
  797. void blk_requeue_request(struct request_queue *q, struct request *rq)
  798. {
  799. blk_delete_timer(rq);
  800. blk_clear_rq_complete(rq);
  801. trace_block_rq_requeue(q, rq);
  802. if (blk_rq_tagged(rq))
  803. blk_queue_end_tag(q, rq);
  804. elv_requeue_request(q, rq);
  805. }
  806. EXPORT_SYMBOL(blk_requeue_request);
  807. /**
  808. * blk_insert_request - insert a special request into a request queue
  809. * @q: request queue where request should be inserted
  810. * @rq: request to be inserted
  811. * @at_head: insert request at head or tail of queue
  812. * @data: private data
  813. *
  814. * Description:
  815. * Many block devices need to execute commands asynchronously, so they don't
  816. * block the whole kernel from preemption during request execution. This is
  817. * accomplished normally by inserting aritficial requests tagged as
  818. * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
  819. * be scheduled for actual execution by the request queue.
  820. *
  821. * We have the option of inserting the head or the tail of the queue.
  822. * Typically we use the tail for new ioctls and so forth. We use the head
  823. * of the queue for things like a QUEUE_FULL message from a device, or a
  824. * host that is unable to accept a particular command.
  825. */
  826. void blk_insert_request(struct request_queue *q, struct request *rq,
  827. int at_head, void *data)
  828. {
  829. int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  830. unsigned long flags;
  831. /*
  832. * tell I/O scheduler that this isn't a regular read/write (ie it
  833. * must not attempt merges on this) and that it acts as a soft
  834. * barrier
  835. */
  836. rq->cmd_type = REQ_TYPE_SPECIAL;
  837. rq->cmd_flags |= REQ_SOFTBARRIER;
  838. rq->special = data;
  839. spin_lock_irqsave(q->queue_lock, flags);
  840. /*
  841. * If command is tagged, release the tag
  842. */
  843. if (blk_rq_tagged(rq))
  844. blk_queue_end_tag(q, rq);
  845. drive_stat_acct(rq, 1);
  846. __elv_add_request(q, rq, where, 0);
  847. blk_start_queueing(q);
  848. spin_unlock_irqrestore(q->queue_lock, flags);
  849. }
  850. EXPORT_SYMBOL(blk_insert_request);
  851. /*
  852. * add-request adds a request to the linked list.
  853. * queue lock is held and interrupts disabled, as we muck with the
  854. * request queue list.
  855. */
  856. static inline void add_request(struct request_queue *q, struct request *req)
  857. {
  858. drive_stat_acct(req, 1);
  859. /*
  860. * elevator indicated where it wants this request to be
  861. * inserted at elevator_merge time
  862. */
  863. __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
  864. }
  865. static void part_round_stats_single(int cpu, struct hd_struct *part,
  866. unsigned long now)
  867. {
  868. if (now == part->stamp)
  869. return;
  870. if (part->in_flight) {
  871. __part_stat_add(cpu, part, time_in_queue,
  872. part->in_flight * (now - part->stamp));
  873. __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
  874. }
  875. part->stamp = now;
  876. }
  877. /**
  878. * part_round_stats() - Round off the performance stats on a struct disk_stats.
  879. * @cpu: cpu number for stats access
  880. * @part: target partition
  881. *
  882. * The average IO queue length and utilisation statistics are maintained
  883. * by observing the current state of the queue length and the amount of
  884. * time it has been in this state for.
  885. *
  886. * Normally, that accounting is done on IO completion, but that can result
  887. * in more than a second's worth of IO being accounted for within any one
  888. * second, leading to >100% utilisation. To deal with that, we call this
  889. * function to do a round-off before returning the results when reading
  890. * /proc/diskstats. This accounts immediately for all queue usage up to
  891. * the current jiffies and restarts the counters again.
  892. */
  893. void part_round_stats(int cpu, struct hd_struct *part)
  894. {
  895. unsigned long now = jiffies;
  896. if (part->partno)
  897. part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
  898. part_round_stats_single(cpu, part, now);
  899. }
  900. EXPORT_SYMBOL_GPL(part_round_stats);
  901. /*
  902. * queue lock must be held
  903. */
  904. void __blk_put_request(struct request_queue *q, struct request *req)
  905. {
  906. if (unlikely(!q))
  907. return;
  908. if (unlikely(--req->ref_count))
  909. return;
  910. elv_completed_request(q, req);
  911. /* this is a bio leak */
  912. WARN_ON(req->bio != NULL);
  913. /*
  914. * Request may not have originated from ll_rw_blk. if not,
  915. * it didn't come out of our reserved rq pools
  916. */
  917. if (req->cmd_flags & REQ_ALLOCED) {
  918. int is_sync = rq_is_sync(req) != 0;
  919. int priv = req->cmd_flags & REQ_ELVPRIV;
  920. BUG_ON(!list_empty(&req->queuelist));
  921. BUG_ON(!hlist_unhashed(&req->hash));
  922. blk_free_request(q, req);
  923. freed_request(q, is_sync, priv);
  924. }
  925. }
  926. EXPORT_SYMBOL_GPL(__blk_put_request);
  927. void blk_put_request(struct request *req)
  928. {
  929. unsigned long flags;
  930. struct request_queue *q = req->q;
  931. spin_lock_irqsave(q->queue_lock, flags);
  932. __blk_put_request(q, req);
  933. spin_unlock_irqrestore(q->queue_lock, flags);
  934. }
  935. EXPORT_SYMBOL(blk_put_request);
  936. void init_request_from_bio(struct request *req, struct bio *bio)
  937. {
  938. req->cpu = bio->bi_comp_cpu;
  939. req->cmd_type = REQ_TYPE_FS;
  940. /*
  941. * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
  942. */
  943. if (bio_rw_ahead(bio))
  944. req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
  945. REQ_FAILFAST_DRIVER);
  946. if (bio_failfast_dev(bio))
  947. req->cmd_flags |= REQ_FAILFAST_DEV;
  948. if (bio_failfast_transport(bio))
  949. req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
  950. if (bio_failfast_driver(bio))
  951. req->cmd_flags |= REQ_FAILFAST_DRIVER;
  952. /*
  953. * REQ_BARRIER implies no merging, but lets make it explicit
  954. */
  955. if (unlikely(bio_discard(bio))) {
  956. req->cmd_flags |= REQ_DISCARD;
  957. if (bio_barrier(bio))
  958. req->cmd_flags |= REQ_SOFTBARRIER;
  959. req->q->prepare_discard_fn(req->q, req);
  960. } else if (unlikely(bio_barrier(bio)))
  961. req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
  962. if (bio_sync(bio))
  963. req->cmd_flags |= REQ_RW_SYNC;
  964. if (bio_rw_meta(bio))
  965. req->cmd_flags |= REQ_RW_META;
  966. if (bio_noidle(bio))
  967. req->cmd_flags |= REQ_NOIDLE;
  968. req->errors = 0;
  969. req->hard_sector = req->sector = bio->bi_sector;
  970. req->ioprio = bio_prio(bio);
  971. req->start_time = jiffies;
  972. blk_rq_bio_prep(req->q, req, bio);
  973. }
  974. /*
  975. * Only disabling plugging for non-rotational devices if it does tagging
  976. * as well, otherwise we do need the proper merging
  977. */
  978. static inline bool queue_should_plug(struct request_queue *q)
  979. {
  980. return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
  981. }
  982. static int __make_request(struct request_queue *q, struct bio *bio)
  983. {
  984. struct request *req;
  985. int el_ret, nr_sectors;
  986. const unsigned short prio = bio_prio(bio);
  987. const int sync = bio_sync(bio);
  988. const int unplug = bio_unplug(bio);
  989. int rw_flags;
  990. nr_sectors = bio_sectors(bio);
  991. /*
  992. * low level driver can indicate that it wants pages above a
  993. * certain limit bounced to low memory (ie for highmem, or even
  994. * ISA dma in theory)
  995. */
  996. blk_queue_bounce(q, &bio);
  997. spin_lock_irq(q->queue_lock);
  998. if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
  999. goto get_rq;
  1000. el_ret = elv_merge(q, &req, bio);
  1001. switch (el_ret) {
  1002. case ELEVATOR_BACK_MERGE:
  1003. BUG_ON(!rq_mergeable(req));
  1004. if (!ll_back_merge_fn(q, req, bio))
  1005. break;
  1006. trace_block_bio_backmerge(q, bio);
  1007. req->biotail->bi_next = bio;
  1008. req->biotail = bio;
  1009. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  1010. req->ioprio = ioprio_best(req->ioprio, prio);
  1011. if (!blk_rq_cpu_valid(req))
  1012. req->cpu = bio->bi_comp_cpu;
  1013. drive_stat_acct(req, 0);
  1014. if (!attempt_back_merge(q, req))
  1015. elv_merged_request(q, req, el_ret);
  1016. goto out;
  1017. case ELEVATOR_FRONT_MERGE:
  1018. BUG_ON(!rq_mergeable(req));
  1019. if (!ll_front_merge_fn(q, req, bio))
  1020. break;
  1021. trace_block_bio_frontmerge(q, bio);
  1022. bio->bi_next = req->bio;
  1023. req->bio = bio;
  1024. /*
  1025. * may not be valid. if the low level driver said
  1026. * it didn't need a bounce buffer then it better
  1027. * not touch req->buffer either...
  1028. */
  1029. req->buffer = bio_data(bio);
  1030. req->current_nr_sectors = bio_cur_sectors(bio);
  1031. req->hard_cur_sectors = req->current_nr_sectors;
  1032. req->sector = req->hard_sector = bio->bi_sector;
  1033. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  1034. req->ioprio = ioprio_best(req->ioprio, prio);
  1035. if (!blk_rq_cpu_valid(req))
  1036. req->cpu = bio->bi_comp_cpu;
  1037. drive_stat_acct(req, 0);
  1038. if (!attempt_front_merge(q, req))
  1039. elv_merged_request(q, req, el_ret);
  1040. goto out;
  1041. /* ELV_NO_MERGE: elevator says don't/can't merge. */
  1042. default:
  1043. ;
  1044. }
  1045. get_rq:
  1046. /*
  1047. * This sync check and mask will be re-done in init_request_from_bio(),
  1048. * but we need to set it earlier to expose the sync flag to the
  1049. * rq allocator and io schedulers.
  1050. */
  1051. rw_flags = bio_data_dir(bio);
  1052. if (sync)
  1053. rw_flags |= REQ_RW_SYNC;
  1054. /*
  1055. * Grab a free request. This is might sleep but can not fail.
  1056. * Returns with the queue unlocked.
  1057. */
  1058. req = get_request_wait(q, rw_flags, bio);
  1059. /*
  1060. * After dropping the lock and possibly sleeping here, our request
  1061. * may now be mergeable after it had proven unmergeable (above).
  1062. * We don't worry about that case for efficiency. It won't happen
  1063. * often, and the elevators are able to handle it.
  1064. */
  1065. init_request_from_bio(req, bio);
  1066. spin_lock_irq(q->queue_lock);
  1067. if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
  1068. bio_flagged(bio, BIO_CPU_AFFINE))
  1069. req->cpu = blk_cpu_to_group(smp_processor_id());
  1070. if (queue_should_plug(q) && elv_queue_empty(q))
  1071. blk_plug_device(q);
  1072. add_request(q, req);
  1073. out:
  1074. if (unplug || !queue_should_plug(q))
  1075. __generic_unplug_device(q);
  1076. spin_unlock_irq(q->queue_lock);
  1077. return 0;
  1078. }
  1079. /*
  1080. * If bio->bi_dev is a partition, remap the location
  1081. */
  1082. static inline void blk_partition_remap(struct bio *bio)
  1083. {
  1084. struct block_device *bdev = bio->bi_bdev;
  1085. if (bio_sectors(bio) && bdev != bdev->bd_contains) {
  1086. struct hd_struct *p = bdev->bd_part;
  1087. bio->bi_sector += p->start_sect;
  1088. bio->bi_bdev = bdev->bd_contains;
  1089. trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
  1090. bdev->bd_dev, bio->bi_sector,
  1091. bio->bi_sector - p->start_sect);
  1092. }
  1093. }
  1094. static void handle_bad_sector(struct bio *bio)
  1095. {
  1096. char b[BDEVNAME_SIZE];
  1097. printk(KERN_INFO "attempt to access beyond end of device\n");
  1098. printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
  1099. bdevname(bio->bi_bdev, b),
  1100. bio->bi_rw,
  1101. (unsigned long long)bio->bi_sector + bio_sectors(bio),
  1102. (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
  1103. set_bit(BIO_EOF, &bio->bi_flags);
  1104. }
  1105. #ifdef CONFIG_FAIL_MAKE_REQUEST
  1106. static DECLARE_FAULT_ATTR(fail_make_request);
  1107. static int __init setup_fail_make_request(char *str)
  1108. {
  1109. return setup_fault_attr(&fail_make_request, str);
  1110. }
  1111. __setup("fail_make_request=", setup_fail_make_request);
  1112. static int should_fail_request(struct bio *bio)
  1113. {
  1114. struct hd_struct *part = bio->bi_bdev->bd_part;
  1115. if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
  1116. return should_fail(&fail_make_request, bio->bi_size);
  1117. return 0;
  1118. }
  1119. static int __init fail_make_request_debugfs(void)
  1120. {
  1121. return init_fault_attr_dentries(&fail_make_request,
  1122. "fail_make_request");
  1123. }
  1124. late_initcall(fail_make_request_debugfs);
  1125. #else /* CONFIG_FAIL_MAKE_REQUEST */
  1126. static inline int should_fail_request(struct bio *bio)
  1127. {
  1128. return 0;
  1129. }
  1130. #endif /* CONFIG_FAIL_MAKE_REQUEST */
  1131. /*
  1132. * Check whether this bio extends beyond the end of the device.
  1133. */
  1134. static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
  1135. {
  1136. sector_t maxsector;
  1137. if (!nr_sectors)
  1138. return 0;
  1139. /* Test device or partition size, when known. */
  1140. maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
  1141. if (maxsector) {
  1142. sector_t sector = bio->bi_sector;
  1143. if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
  1144. /*
  1145. * This may well happen - the kernel calls bread()
  1146. * without checking the size of the device, e.g., when
  1147. * mounting a device.
  1148. */
  1149. handle_bad_sector(bio);
  1150. return 1;
  1151. }
  1152. }
  1153. return 0;
  1154. }
  1155. /**
  1156. * generic_make_request - hand a buffer to its device driver for I/O
  1157. * @bio: The bio describing the location in memory and on the device.
  1158. *
  1159. * generic_make_request() is used to make I/O requests of block
  1160. * devices. It is passed a &struct bio, which describes the I/O that needs
  1161. * to be done.
  1162. *
  1163. * generic_make_request() does not return any status. The
  1164. * success/failure status of the request, along with notification of
  1165. * completion, is delivered asynchronously through the bio->bi_end_io
  1166. * function described (one day) else where.
  1167. *
  1168. * The caller of generic_make_request must make sure that bi_io_vec
  1169. * are set to describe the memory buffer, and that bi_dev and bi_sector are
  1170. * set to describe the device address, and the
  1171. * bi_end_io and optionally bi_private are set to describe how
  1172. * completion notification should be signaled.
  1173. *
  1174. * generic_make_request and the drivers it calls may use bi_next if this
  1175. * bio happens to be merged with someone else, and may change bi_dev and
  1176. * bi_sector for remaps as it sees fit. So the values of these fields
  1177. * should NOT be depended on after the call to generic_make_request.
  1178. */
  1179. static inline void __generic_make_request(struct bio *bio)
  1180. {
  1181. struct request_queue *q;
  1182. sector_t old_sector;
  1183. int ret, nr_sectors = bio_sectors(bio);
  1184. dev_t old_dev;
  1185. int err = -EIO;
  1186. might_sleep();
  1187. if (bio_check_eod(bio, nr_sectors))
  1188. goto end_io;
  1189. /*
  1190. * Resolve the mapping until finished. (drivers are
  1191. * still free to implement/resolve their own stacking
  1192. * by explicitly returning 0)
  1193. *
  1194. * NOTE: we don't repeat the blk_size check for each new device.
  1195. * Stacking drivers are expected to know what they are doing.
  1196. */
  1197. old_sector = -1;
  1198. old_dev = 0;
  1199. do {
  1200. char b[BDEVNAME_SIZE];
  1201. q = bdev_get_queue(bio->bi_bdev);
  1202. if (unlikely(!q)) {
  1203. printk(KERN_ERR
  1204. "generic_make_request: Trying to access "
  1205. "nonexistent block-device %s (%Lu)\n",
  1206. bdevname(bio->bi_bdev, b),
  1207. (long long) bio->bi_sector);
  1208. goto end_io;
  1209. }
  1210. if (unlikely(nr_sectors > q->max_hw_sectors)) {
  1211. printk(KERN_ERR "bio too big device %s (%u > %u)\n",
  1212. bdevname(bio->bi_bdev, b),
  1213. bio_sectors(bio),
  1214. q->max_hw_sectors);
  1215. goto end_io;
  1216. }
  1217. if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
  1218. goto end_io;
  1219. if (should_fail_request(bio))
  1220. goto end_io;
  1221. /*
  1222. * If this device has partitions, remap block n
  1223. * of partition p to block n+start(p) of the disk.
  1224. */
  1225. blk_partition_remap(bio);
  1226. if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
  1227. goto end_io;
  1228. if (old_sector != -1)
  1229. trace_block_remap(q, bio, old_dev, bio->bi_sector,
  1230. old_sector);
  1231. trace_block_bio_queue(q, bio);
  1232. old_sector = bio->bi_sector;
  1233. old_dev = bio->bi_bdev->bd_dev;
  1234. if (bio_check_eod(bio, nr_sectors))
  1235. goto end_io;
  1236. if (bio_discard(bio) && !q->prepare_discard_fn) {
  1237. err = -EOPNOTSUPP;
  1238. goto end_io;
  1239. }
  1240. if (bio_barrier(bio) && bio_has_data(bio) &&
  1241. (q->next_ordered == QUEUE_ORDERED_NONE)) {
  1242. err = -EOPNOTSUPP;
  1243. goto end_io;
  1244. }
  1245. ret = q->make_request_fn(q, bio);
  1246. } while (ret);
  1247. return;
  1248. end_io:
  1249. bio_endio(bio, err);
  1250. }
  1251. /*
  1252. * We only want one ->make_request_fn to be active at a time,
  1253. * else stack usage with stacked devices could be a problem.
  1254. * So use current->bio_{list,tail} to keep a list of requests
  1255. * submited by a make_request_fn function.
  1256. * current->bio_tail is also used as a flag to say if
  1257. * generic_make_request is currently active in this task or not.
  1258. * If it is NULL, then no make_request is active. If it is non-NULL,
  1259. * then a make_request is active, and new requests should be added
  1260. * at the tail
  1261. */
  1262. void generic_make_request(struct bio *bio)
  1263. {
  1264. if (current->bio_tail) {
  1265. /* make_request is active */
  1266. *(current->bio_tail) = bio;
  1267. bio->bi_next = NULL;
  1268. current->bio_tail = &bio->bi_next;
  1269. return;
  1270. }
  1271. /* following loop may be a bit non-obvious, and so deserves some
  1272. * explanation.
  1273. * Before entering the loop, bio->bi_next is NULL (as all callers
  1274. * ensure that) so we have a list with a single bio.
  1275. * We pretend that we have just taken it off a longer list, so
  1276. * we assign bio_list to the next (which is NULL) and bio_tail
  1277. * to &bio_list, thus initialising the bio_list of new bios to be
  1278. * added. __generic_make_request may indeed add some more bios
  1279. * through a recursive call to generic_make_request. If it
  1280. * did, we find a non-NULL value in bio_list and re-enter the loop
  1281. * from the top. In this case we really did just take the bio
  1282. * of the top of the list (no pretending) and so fixup bio_list and
  1283. * bio_tail or bi_next, and call into __generic_make_request again.
  1284. *
  1285. * The loop was structured like this to make only one call to
  1286. * __generic_make_request (which is important as it is large and
  1287. * inlined) and to keep the structure simple.
  1288. */
  1289. BUG_ON(bio->bi_next);
  1290. do {
  1291. current->bio_list = bio->bi_next;
  1292. if (bio->bi_next == NULL)
  1293. current->bio_tail = &current->bio_list;
  1294. else
  1295. bio->bi_next = NULL;
  1296. __generic_make_request(bio);
  1297. bio = current->bio_list;
  1298. } while (bio);
  1299. current->bio_tail = NULL; /* deactivate */
  1300. }
  1301. EXPORT_SYMBOL(generic_make_request);
  1302. /**
  1303. * submit_bio - submit a bio to the block device layer for I/O
  1304. * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  1305. * @bio: The &struct bio which describes the I/O
  1306. *
  1307. * submit_bio() is very similar in purpose to generic_make_request(), and
  1308. * uses that function to do most of the work. Both are fairly rough
  1309. * interfaces; @bio must be presetup and ready for I/O.
  1310. *
  1311. */
  1312. void submit_bio(int rw, struct bio *bio)
  1313. {
  1314. int count = bio_sectors(bio);
  1315. bio->bi_rw |= rw;
  1316. /*
  1317. * If it's a regular read/write or a barrier with data attached,
  1318. * go through the normal accounting stuff before submission.
  1319. */
  1320. if (bio_has_data(bio)) {
  1321. if (rw & WRITE) {
  1322. count_vm_events(PGPGOUT, count);
  1323. } else {
  1324. task_io_account_read(bio->bi_size);
  1325. count_vm_events(PGPGIN, count);
  1326. }
  1327. if (unlikely(block_dump)) {
  1328. char b[BDEVNAME_SIZE];
  1329. printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
  1330. current->comm, task_pid_nr(current),
  1331. (rw & WRITE) ? "WRITE" : "READ",
  1332. (unsigned long long)bio->bi_sector,
  1333. bdevname(bio->bi_bdev, b));
  1334. }
  1335. }
  1336. generic_make_request(bio);
  1337. }
  1338. EXPORT_SYMBOL(submit_bio);
  1339. /**
  1340. * blk_rq_check_limits - Helper function to check a request for the queue limit
  1341. * @q: the queue
  1342. * @rq: the request being checked
  1343. *
  1344. * Description:
  1345. * @rq may have been made based on weaker limitations of upper-level queues
  1346. * in request stacking drivers, and it may violate the limitation of @q.
  1347. * Since the block layer and the underlying device driver trust @rq
  1348. * after it is inserted to @q, it should be checked against @q before
  1349. * the insertion using this generic function.
  1350. *
  1351. * This function should also be useful for request stacking drivers
  1352. * in some cases below, so export this fuction.
  1353. * Request stacking drivers like request-based dm may change the queue
  1354. * limits while requests are in the queue (e.g. dm's table swapping).
  1355. * Such request stacking drivers should check those requests agaist
  1356. * the new queue limits again when they dispatch those requests,
  1357. * although such checkings are also done against the old queue limits
  1358. * when submitting requests.
  1359. */
  1360. int blk_rq_check_limits(struct request_queue *q, struct request *rq)
  1361. {
  1362. if (rq->nr_sectors > q->max_sectors ||
  1363. rq->data_len > q->max_hw_sectors << 9) {
  1364. printk(KERN_ERR "%s: over max size limit.\n", __func__);
  1365. return -EIO;
  1366. }
  1367. /*
  1368. * queue's settings related to segment counting like q->bounce_pfn
  1369. * may differ from that of other stacking queues.
  1370. * Recalculate it to check the request correctly on this queue's
  1371. * limitation.
  1372. */
  1373. blk_recalc_rq_segments(rq);
  1374. if (rq->nr_phys_segments > q->max_phys_segments ||
  1375. rq->nr_phys_segments > q->max_hw_segments) {
  1376. printk(KERN_ERR "%s: over max segments limit.\n", __func__);
  1377. return -EIO;
  1378. }
  1379. return 0;
  1380. }
  1381. EXPORT_SYMBOL_GPL(blk_rq_check_limits);
  1382. /**
  1383. * blk_insert_cloned_request - Helper for stacking drivers to submit a request
  1384. * @q: the queue to submit the request
  1385. * @rq: the request being queued
  1386. */
  1387. int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
  1388. {
  1389. unsigned long flags;
  1390. if (blk_rq_check_limits(q, rq))
  1391. return -EIO;
  1392. #ifdef CONFIG_FAIL_MAKE_REQUEST
  1393. if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
  1394. should_fail(&fail_make_request, blk_rq_bytes(rq)))
  1395. return -EIO;
  1396. #endif
  1397. spin_lock_irqsave(q->queue_lock, flags);
  1398. /*
  1399. * Submitting request must be dequeued before calling this function
  1400. * because it will be linked to another request_queue
  1401. */
  1402. BUG_ON(blk_queued_rq(rq));
  1403. drive_stat_acct(rq, 1);
  1404. __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
  1405. spin_unlock_irqrestore(q->queue_lock, flags);
  1406. return 0;
  1407. }
  1408. EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
  1409. /**
  1410. * blkdev_dequeue_request - dequeue request and start timeout timer
  1411. * @req: request to dequeue
  1412. *
  1413. * Dequeue @req and start timeout timer on it. This hands off the
  1414. * request to the driver.
  1415. *
  1416. * Block internal functions which don't want to start timer should
  1417. * call elv_dequeue_request().
  1418. */
  1419. void blkdev_dequeue_request(struct request *req)
  1420. {
  1421. elv_dequeue_request(req->q, req);
  1422. /*
  1423. * We are now handing the request to the hardware, add the
  1424. * timeout handler.
  1425. */
  1426. blk_add_timer(req);
  1427. }
  1428. EXPORT_SYMBOL(blkdev_dequeue_request);
  1429. static void blk_account_io_completion(struct request *req, unsigned int bytes)
  1430. {
  1431. if (!blk_do_io_stat(req))
  1432. return;
  1433. if (blk_fs_request(req)) {
  1434. const int rw = rq_data_dir(req);
  1435. struct hd_struct *part;
  1436. int cpu;
  1437. cpu = part_stat_lock();
  1438. part = disk_map_sector_rcu(req->rq_disk, req->sector);
  1439. part_stat_add(cpu, part, sectors[rw], bytes >> 9);
  1440. part_stat_unlock();
  1441. }
  1442. }
  1443. static void blk_account_io_done(struct request *req)
  1444. {
  1445. if (!blk_do_io_stat(req))
  1446. return;
  1447. /*
  1448. * Account IO completion. bar_rq isn't accounted as a normal
  1449. * IO on queueing nor completion. Accounting the containing
  1450. * request is enough.
  1451. */
  1452. if (blk_fs_request(req) && req != &req->q->bar_rq) {
  1453. unsigned long duration = jiffies - req->start_time;
  1454. const int rw = rq_data_dir(req);
  1455. struct hd_struct *part;
  1456. int cpu;
  1457. cpu = part_stat_lock();
  1458. part = disk_map_sector_rcu(req->rq_disk, req->sector);
  1459. part_stat_inc(cpu, part, ios[rw]);
  1460. part_stat_add(cpu, part, ticks[rw], duration);
  1461. part_round_stats(cpu, part);
  1462. part_dec_in_flight(part);
  1463. part_stat_unlock();
  1464. }
  1465. }
  1466. /**
  1467. * __end_that_request_first - end I/O on a request
  1468. * @req: the request being processed
  1469. * @error: %0 for success, < %0 for error
  1470. * @nr_bytes: number of bytes to complete
  1471. *
  1472. * Description:
  1473. * Ends I/O on a number of bytes attached to @req, and sets it up
  1474. * for the next range of segments (if any) in the cluster.
  1475. *
  1476. * Return:
  1477. * %0 - we are done with this request, call end_that_request_last()
  1478. * %1 - still buffers pending for this request
  1479. **/
  1480. static int __end_that_request_first(struct request *req, int error,
  1481. int nr_bytes)
  1482. {
  1483. int total_bytes, bio_nbytes, next_idx = 0;
  1484. struct bio *bio;
  1485. trace_block_rq_complete(req->q, req);
  1486. /*
  1487. * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
  1488. * sense key with us all the way through
  1489. */
  1490. if (!blk_pc_request(req))
  1491. req->errors = 0;
  1492. if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
  1493. printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
  1494. req->rq_disk ? req->rq_disk->disk_name : "?",
  1495. (unsigned long long)req->sector);
  1496. }
  1497. blk_account_io_completion(req, nr_bytes);
  1498. total_bytes = bio_nbytes = 0;
  1499. while ((bio = req->bio) != NULL) {
  1500. int nbytes;
  1501. if (nr_bytes >= bio->bi_size) {
  1502. req->bio = bio->bi_next;
  1503. nbytes = bio->bi_size;
  1504. req_bio_endio(req, bio, nbytes, error);
  1505. next_idx = 0;
  1506. bio_nbytes = 0;
  1507. } else {
  1508. int idx = bio->bi_idx + next_idx;
  1509. if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
  1510. blk_dump_rq_flags(req, "__end_that");
  1511. printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
  1512. __func__, bio->bi_idx, bio->bi_vcnt);
  1513. break;
  1514. }
  1515. nbytes = bio_iovec_idx(bio, idx)->bv_len;
  1516. BIO_BUG_ON(nbytes > bio->bi_size);
  1517. /*
  1518. * not a complete bvec done
  1519. */
  1520. if (unlikely(nbytes > nr_bytes)) {
  1521. bio_nbytes += nr_bytes;
  1522. total_bytes += nr_bytes;
  1523. break;
  1524. }
  1525. /*
  1526. * advance to the next vector
  1527. */
  1528. next_idx++;
  1529. bio_nbytes += nbytes;
  1530. }
  1531. total_bytes += nbytes;
  1532. nr_bytes -= nbytes;
  1533. bio = req->bio;
  1534. if (bio) {
  1535. /*
  1536. * end more in this run, or just return 'not-done'
  1537. */
  1538. if (unlikely(nr_bytes <= 0))
  1539. break;
  1540. }
  1541. }
  1542. /*
  1543. * completely done
  1544. */
  1545. if (!req->bio)
  1546. return 0;
  1547. /*
  1548. * if the request wasn't completed, update state
  1549. */
  1550. if (bio_nbytes) {
  1551. req_bio_endio(req, bio, bio_nbytes, error);
  1552. bio->bi_idx += next_idx;
  1553. bio_iovec(bio)->bv_offset += nr_bytes;
  1554. bio_iovec(bio)->bv_len -= nr_bytes;
  1555. }
  1556. blk_recalc_rq_sectors(req, total_bytes >> 9);
  1557. blk_recalc_rq_segments(req);
  1558. return 1;
  1559. }
  1560. /*
  1561. * queue lock must be held
  1562. */
  1563. static void end_that_request_last(struct request *req, int error)
  1564. {
  1565. if (blk_rq_tagged(req))
  1566. blk_queue_end_tag(req->q, req);
  1567. if (blk_queued_rq(req))
  1568. elv_dequeue_request(req->q, req);
  1569. if (unlikely(laptop_mode) && blk_fs_request(req))
  1570. laptop_io_completion();
  1571. blk_delete_timer(req);
  1572. blk_account_io_done(req);
  1573. if (req->end_io)
  1574. req->end_io(req, error);
  1575. else {
  1576. if (blk_bidi_rq(req))
  1577. __blk_put_request(req->next_rq->q, req->next_rq);
  1578. __blk_put_request(req->q, req);
  1579. }
  1580. }
  1581. /**
  1582. * blk_rq_bytes - Returns bytes left to complete in the entire request
  1583. * @rq: the request being processed
  1584. **/
  1585. unsigned int blk_rq_bytes(struct request *rq)
  1586. {
  1587. if (blk_fs_request(rq))
  1588. return rq->hard_nr_sectors << 9;
  1589. return rq->data_len;
  1590. }
  1591. EXPORT_SYMBOL_GPL(blk_rq_bytes);
  1592. /**
  1593. * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
  1594. * @rq: the request being processed
  1595. **/
  1596. unsigned int blk_rq_cur_bytes(struct request *rq)
  1597. {
  1598. if (blk_fs_request(rq))
  1599. return rq->current_nr_sectors << 9;
  1600. if (rq->bio)
  1601. return rq->bio->bi_size;
  1602. return rq->data_len;
  1603. }
  1604. EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
  1605. /**
  1606. * end_request - end I/O on the current segment of the request
  1607. * @req: the request being processed
  1608. * @uptodate: error value or %0/%1 uptodate flag
  1609. *
  1610. * Description:
  1611. * Ends I/O on the current segment of a request. If that is the only
  1612. * remaining segment, the request is also completed and freed.
  1613. *
  1614. * This is a remnant of how older block drivers handled I/O completions.
  1615. * Modern drivers typically end I/O on the full request in one go, unless
  1616. * they have a residual value to account for. For that case this function
  1617. * isn't really useful, unless the residual just happens to be the
  1618. * full current segment. In other words, don't use this function in new
  1619. * code. Use blk_end_request() or __blk_end_request() to end a request.
  1620. **/
  1621. void end_request(struct request *req, int uptodate)
  1622. {
  1623. int error = 0;
  1624. if (uptodate <= 0)
  1625. error = uptodate ? uptodate : -EIO;
  1626. __blk_end_request(req, error, req->hard_cur_sectors << 9);
  1627. }
  1628. EXPORT_SYMBOL(end_request);
  1629. static int end_that_request_data(struct request *rq, int error,
  1630. unsigned int nr_bytes, unsigned int bidi_bytes)
  1631. {
  1632. if (rq->bio) {
  1633. if (__end_that_request_first(rq, error, nr_bytes))
  1634. return 1;
  1635. /* Bidi request must be completed as a whole */
  1636. if (blk_bidi_rq(rq) &&
  1637. __end_that_request_first(rq->next_rq, error, bidi_bytes))
  1638. return 1;
  1639. }
  1640. return 0;
  1641. }
  1642. /**
  1643. * blk_end_io - Generic end_io function to complete a request.
  1644. * @rq: the request being processed
  1645. * @error: %0 for success, < %0 for error
  1646. * @nr_bytes: number of bytes to complete @rq
  1647. * @bidi_bytes: number of bytes to complete @rq->next_rq
  1648. * @drv_callback: function called between completion of bios in the request
  1649. * and completion of the request.
  1650. * If the callback returns non %0, this helper returns without
  1651. * completion of the request.
  1652. *
  1653. * Description:
  1654. * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
  1655. * If @rq has leftover, sets it up for the next range of segments.
  1656. *
  1657. * Return:
  1658. * %0 - we are done with this request
  1659. * %1 - this request is not freed yet, it still has pending buffers.
  1660. **/
  1661. static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
  1662. unsigned int bidi_bytes,
  1663. int (drv_callback)(struct request *))
  1664. {
  1665. struct request_queue *q = rq->q;
  1666. unsigned long flags = 0UL;
  1667. if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
  1668. return 1;
  1669. /* Special feature for tricky drivers */
  1670. if (drv_callback && drv_callback(rq))
  1671. return 1;
  1672. add_disk_randomness(rq->rq_disk);
  1673. spin_lock_irqsave(q->queue_lock, flags);
  1674. end_that_request_last(rq, error);
  1675. spin_unlock_irqrestore(q->queue_lock, flags);
  1676. return 0;
  1677. }
  1678. /**
  1679. * blk_end_request - Helper function for drivers to complete the request.
  1680. * @rq: the request being processed
  1681. * @error: %0 for success, < %0 for error
  1682. * @nr_bytes: number of bytes to complete
  1683. *
  1684. * Description:
  1685. * Ends I/O on a number of bytes attached to @rq.
  1686. * If @rq has leftover, sets it up for the next range of segments.
  1687. *
  1688. * Return:
  1689. * %0 - we are done with this request
  1690. * %1 - still buffers pending for this request
  1691. **/
  1692. int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  1693. {
  1694. return blk_end_io(rq, error, nr_bytes, 0, NULL);
  1695. }
  1696. EXPORT_SYMBOL_GPL(blk_end_request);
  1697. /**
  1698. * __blk_end_request - Helper function for drivers to complete the request.
  1699. * @rq: the request being processed
  1700. * @error: %0 for success, < %0 for error
  1701. * @nr_bytes: number of bytes to complete
  1702. *
  1703. * Description:
  1704. * Must be called with queue lock held unlike blk_end_request().
  1705. *
  1706. * Return:
  1707. * %0 - we are done with this request
  1708. * %1 - still buffers pending for this request
  1709. **/
  1710. int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  1711. {
  1712. if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
  1713. return 1;
  1714. add_disk_randomness(rq->rq_disk);
  1715. end_that_request_last(rq, error);
  1716. return 0;
  1717. }
  1718. EXPORT_SYMBOL_GPL(__blk_end_request);
  1719. /**
  1720. * blk_end_bidi_request - Helper function for drivers to complete bidi request.
  1721. * @rq: the bidi request being processed
  1722. * @error: %0 for success, < %0 for error
  1723. * @nr_bytes: number of bytes to complete @rq
  1724. * @bidi_bytes: number of bytes to complete @rq->next_rq
  1725. *
  1726. * Description:
  1727. * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
  1728. *
  1729. * Return:
  1730. * %0 - we are done with this request
  1731. * %1 - still buffers pending for this request
  1732. **/
  1733. int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
  1734. unsigned int bidi_bytes)
  1735. {
  1736. return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
  1737. }
  1738. EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  1739. /**
  1740. * blk_update_request - Special helper function for request stacking drivers
  1741. * @rq: the request being processed
  1742. * @error: %0 for success, < %0 for error
  1743. * @nr_bytes: number of bytes to complete @rq
  1744. *
  1745. * Description:
  1746. * Ends I/O on a number of bytes attached to @rq, but doesn't complete
  1747. * the request structure even if @rq doesn't have leftover.
  1748. * If @rq has leftover, sets it up for the next range of segments.
  1749. *
  1750. * This special helper function is only for request stacking drivers
  1751. * (e.g. request-based dm) so that they can handle partial completion.
  1752. * Actual device drivers should use blk_end_request instead.
  1753. */
  1754. void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
  1755. {
  1756. if (!end_that_request_data(rq, error, nr_bytes, 0)) {
  1757. /*
  1758. * These members are not updated in end_that_request_data()
  1759. * when all bios are completed.
  1760. * Update them so that the request stacking driver can find
  1761. * how many bytes remain in the request later.
  1762. */
  1763. rq->nr_sectors = rq->hard_nr_sectors = 0;
  1764. rq->current_nr_sectors = rq->hard_cur_sectors = 0;
  1765. }
  1766. }
  1767. EXPORT_SYMBOL_GPL(blk_update_request);
  1768. /**
  1769. * blk_end_request_callback - Special helper function for tricky drivers
  1770. * @rq: the request being processed
  1771. * @error: %0 for success, < %0 for error
  1772. * @nr_bytes: number of bytes to complete
  1773. * @drv_callback: function called between completion of bios in the request
  1774. * and completion of the request.
  1775. * If the callback returns non %0, this helper returns without
  1776. * completion of the request.
  1777. *
  1778. * Description:
  1779. * Ends I/O on a number of bytes attached to @rq.
  1780. * If @rq has leftover, sets it up for the next range of segments.
  1781. *
  1782. * This special helper function is used only for existing tricky drivers.
  1783. * (e.g. cdrom_newpc_intr() of ide-cd)
  1784. * This interface will be removed when such drivers are rewritten.
  1785. * Don't use this interface in other places anymore.
  1786. *
  1787. * Return:
  1788. * %0 - we are done with this request
  1789. * %1 - this request is not freed yet.
  1790. * this request still has pending buffers or
  1791. * the driver doesn't want to finish this request yet.
  1792. **/
  1793. int blk_end_request_callback(struct request *rq, int error,
  1794. unsigned int nr_bytes,
  1795. int (drv_callback)(struct request *))
  1796. {
  1797. return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
  1798. }
  1799. EXPORT_SYMBOL_GPL(blk_end_request_callback);
  1800. void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  1801. struct bio *bio)
  1802. {
  1803. /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
  1804. we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
  1805. rq->cmd_flags |= (bio->bi_rw & 3);
  1806. if (bio_has_data(bio)) {
  1807. rq->nr_phys_segments = bio_phys_segments(q, bio);
  1808. rq->buffer = bio_data(bio);
  1809. }
  1810. rq->current_nr_sectors = bio_cur_sectors(bio);
  1811. rq->hard_cur_sectors = rq->current_nr_sectors;
  1812. rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
  1813. rq->data_len = bio->bi_size;
  1814. rq->bio = rq->biotail = bio;
  1815. if (bio->bi_bdev)
  1816. rq->rq_disk = bio->bi_bdev->bd_disk;
  1817. }
  1818. /**
  1819. * blk_lld_busy - Check if underlying low-level drivers of a device are busy
  1820. * @q : the queue of the device being checked
  1821. *
  1822. * Description:
  1823. * Check if underlying low-level drivers of a device are busy.
  1824. * If the drivers want to export their busy state, they must set own
  1825. * exporting function using blk_queue_lld_busy() first.
  1826. *
  1827. * Basically, this function is used only by request stacking drivers
  1828. * to stop dispatching requests to underlying devices when underlying
  1829. * devices are busy. This behavior helps more I/O merging on the queue
  1830. * of the request stacking driver and prevents I/O throughput regression
  1831. * on burst I/O load.
  1832. *
  1833. * Return:
  1834. * 0 - Not busy (The request stacking driver should dispatch request)
  1835. * 1 - Busy (The request stacking driver should stop dispatching request)
  1836. */
  1837. int blk_lld_busy(struct request_queue *q)
  1838. {
  1839. if (q->lld_busy_fn)
  1840. return q->lld_busy_fn(q);
  1841. return 0;
  1842. }
  1843. EXPORT_SYMBOL_GPL(blk_lld_busy);
  1844. int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
  1845. {
  1846. return queue_work(kblockd_workqueue, work);
  1847. }
  1848. EXPORT_SYMBOL(kblockd_schedule_work);
  1849. int __init blk_dev_init(void)
  1850. {
  1851. kblockd_workqueue = create_workqueue("kblockd");
  1852. if (!kblockd_workqueue)
  1853. panic("Failed to create kblockd\n");
  1854. request_cachep = kmem_cache_create("blkdev_requests",
  1855. sizeof(struct request), 0, SLAB_PANIC, NULL);
  1856. blk_requestq_cachep = kmem_cache_create("blkdev_queue",
  1857. sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
  1858. return 0;
  1859. }