scsi_lib.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260
  1. /*
  2. * scsi_lib.c Copyright (C) 1999 Eric Youngdale
  3. *
  4. * SCSI queueing library.
  5. * Initial versions: Eric Youngdale (eric@andante.org).
  6. * Based upon conversations with large numbers
  7. * of people at Linux Expo.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/completion.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mempool.h>
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/pci.h>
  17. #include <linux/delay.h>
  18. #include <linux/hardirq.h>
  19. #include <scsi/scsi.h>
  20. #include <scsi/scsi_cmnd.h>
  21. #include <scsi/scsi_dbg.h>
  22. #include <scsi/scsi_device.h>
  23. #include <scsi/scsi_driver.h>
  24. #include <scsi/scsi_eh.h>
  25. #include <scsi/scsi_host.h>
  26. #include "scsi_priv.h"
  27. #include "scsi_logging.h"
  28. #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
  29. #define SG_MEMPOOL_SIZE 32
  30. struct scsi_host_sg_pool {
  31. size_t size;
  32. char *name;
  33. kmem_cache_t *slab;
  34. mempool_t *pool;
  35. };
  36. #if (SCSI_MAX_PHYS_SEGMENTS < 32)
  37. #error SCSI_MAX_PHYS_SEGMENTS is too small
  38. #endif
  39. #define SP(x) { x, "sgpool-" #x }
  40. static struct scsi_host_sg_pool scsi_sg_pools[] = {
  41. SP(8),
  42. SP(16),
  43. SP(32),
  44. #if (SCSI_MAX_PHYS_SEGMENTS > 32)
  45. SP(64),
  46. #if (SCSI_MAX_PHYS_SEGMENTS > 64)
  47. SP(128),
  48. #if (SCSI_MAX_PHYS_SEGMENTS > 128)
  49. SP(256),
  50. #if (SCSI_MAX_PHYS_SEGMENTS > 256)
  51. #error SCSI_MAX_PHYS_SEGMENTS is too large
  52. #endif
  53. #endif
  54. #endif
  55. #endif
  56. };
  57. #undef SP
  58. static void scsi_run_queue(struct request_queue *q);
  59. /*
  60. * Function: scsi_unprep_request()
  61. *
  62. * Purpose: Remove all preparation done for a request, including its
  63. * associated scsi_cmnd, so that it can be requeued.
  64. *
  65. * Arguments: req - request to unprepare
  66. *
  67. * Lock status: Assumed that no locks are held upon entry.
  68. *
  69. * Returns: Nothing.
  70. */
  71. static void scsi_unprep_request(struct request *req)
  72. {
  73. struct scsi_cmnd *cmd = req->special;
  74. req->cmd_flags &= ~REQ_DONTPREP;
  75. req->special = NULL;
  76. scsi_put_command(cmd);
  77. }
  78. /*
  79. * Function: scsi_queue_insert()
  80. *
  81. * Purpose: Insert a command in the midlevel queue.
  82. *
  83. * Arguments: cmd - command that we are adding to queue.
  84. * reason - why we are inserting command to queue.
  85. *
  86. * Lock status: Assumed that lock is not held upon entry.
  87. *
  88. * Returns: Nothing.
  89. *
  90. * Notes: We do this for one of two cases. Either the host is busy
  91. * and it cannot accept any more commands for the time being,
  92. * or the device returned QUEUE_FULL and can accept no more
  93. * commands.
  94. * Notes: This could be called either from an interrupt context or a
  95. * normal process context.
  96. */
  97. int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  98. {
  99. struct Scsi_Host *host = cmd->device->host;
  100. struct scsi_device *device = cmd->device;
  101. struct request_queue *q = device->request_queue;
  102. unsigned long flags;
  103. SCSI_LOG_MLQUEUE(1,
  104. printk("Inserting command %p into mlqueue\n", cmd));
  105. /*
  106. * Set the appropriate busy bit for the device/host.
  107. *
  108. * If the host/device isn't busy, assume that something actually
  109. * completed, and that we should be able to queue a command now.
  110. *
  111. * Note that the prior mid-layer assumption that any host could
  112. * always queue at least one command is now broken. The mid-layer
  113. * will implement a user specifiable stall (see
  114. * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  115. * if a command is requeued with no other commands outstanding
  116. * either for the device or for the host.
  117. */
  118. if (reason == SCSI_MLQUEUE_HOST_BUSY)
  119. host->host_blocked = host->max_host_blocked;
  120. else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
  121. device->device_blocked = device->max_device_blocked;
  122. /*
  123. * Decrement the counters, since these commands are no longer
  124. * active on the host/device.
  125. */
  126. scsi_device_unbusy(device);
  127. /*
  128. * Requeue this command. It will go before all other commands
  129. * that are already in the queue.
  130. *
  131. * NOTE: there is magic here about the way the queue is plugged if
  132. * we have no outstanding commands.
  133. *
  134. * Although we *don't* plug the queue, we call the request
  135. * function. The SCSI request function detects the blocked condition
  136. * and plugs the queue appropriately.
  137. */
  138. spin_lock_irqsave(q->queue_lock, flags);
  139. blk_requeue_request(q, cmd->request);
  140. spin_unlock_irqrestore(q->queue_lock, flags);
  141. scsi_run_queue(q);
  142. return 0;
  143. }
  144. /**
  145. * scsi_execute - insert request and wait for the result
  146. * @sdev: scsi device
  147. * @cmd: scsi command
  148. * @data_direction: data direction
  149. * @buffer: data buffer
  150. * @bufflen: len of buffer
  151. * @sense: optional sense buffer
  152. * @timeout: request timeout in seconds
  153. * @retries: number of times to retry request
  154. * @flags: or into request flags;
  155. *
  156. * returns the req->errors value which is the the scsi_cmnd result
  157. * field.
  158. **/
  159. int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
  160. int data_direction, void *buffer, unsigned bufflen,
  161. unsigned char *sense, int timeout, int retries, int flags)
  162. {
  163. struct request *req;
  164. int write = (data_direction == DMA_TO_DEVICE);
  165. int ret = DRIVER_ERROR << 24;
  166. req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
  167. if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
  168. buffer, bufflen, __GFP_WAIT))
  169. goto out;
  170. req->cmd_len = COMMAND_SIZE(cmd[0]);
  171. memcpy(req->cmd, cmd, req->cmd_len);
  172. req->sense = sense;
  173. req->sense_len = 0;
  174. req->retries = retries;
  175. req->timeout = timeout;
  176. req->cmd_type = REQ_TYPE_BLOCK_PC;
  177. req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
  178. /*
  179. * head injection *required* here otherwise quiesce won't work
  180. */
  181. blk_execute_rq(req->q, NULL, req, 1);
  182. ret = req->errors;
  183. out:
  184. blk_put_request(req);
  185. return ret;
  186. }
  187. EXPORT_SYMBOL(scsi_execute);
  188. int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
  189. int data_direction, void *buffer, unsigned bufflen,
  190. struct scsi_sense_hdr *sshdr, int timeout, int retries)
  191. {
  192. char *sense = NULL;
  193. int result;
  194. if (sshdr) {
  195. sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
  196. if (!sense)
  197. return DRIVER_ERROR << 24;
  198. }
  199. result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
  200. sense, timeout, retries, 0);
  201. if (sshdr)
  202. scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
  203. kfree(sense);
  204. return result;
  205. }
  206. EXPORT_SYMBOL(scsi_execute_req);
  207. struct scsi_io_context {
  208. void *data;
  209. void (*done)(void *data, char *sense, int result, int resid);
  210. char sense[SCSI_SENSE_BUFFERSIZE];
  211. };
  212. static kmem_cache_t *scsi_io_context_cache;
  213. static void scsi_end_async(struct request *req, int uptodate)
  214. {
  215. struct scsi_io_context *sioc = req->end_io_data;
  216. if (sioc->done)
  217. sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
  218. kmem_cache_free(scsi_io_context_cache, sioc);
  219. __blk_put_request(req->q, req);
  220. }
  221. static int scsi_merge_bio(struct request *rq, struct bio *bio)
  222. {
  223. struct request_queue *q = rq->q;
  224. bio->bi_flags &= ~(1 << BIO_SEG_VALID);
  225. if (rq_data_dir(rq) == WRITE)
  226. bio->bi_rw |= (1 << BIO_RW);
  227. blk_queue_bounce(q, &bio);
  228. if (!rq->bio)
  229. blk_rq_bio_prep(q, rq, bio);
  230. else if (!q->back_merge_fn(q, rq, bio))
  231. return -EINVAL;
  232. else {
  233. rq->biotail->bi_next = bio;
  234. rq->biotail = bio;
  235. rq->hard_nr_sectors += bio_sectors(bio);
  236. rq->nr_sectors = rq->hard_nr_sectors;
  237. }
  238. return 0;
  239. }
  240. static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
  241. {
  242. if (bio->bi_size)
  243. return 1;
  244. bio_put(bio);
  245. return 0;
  246. }
  247. /**
  248. * scsi_req_map_sg - map a scatterlist into a request
  249. * @rq: request to fill
  250. * @sg: scatterlist
  251. * @nsegs: number of elements
  252. * @bufflen: len of buffer
  253. * @gfp: memory allocation flags
  254. *
  255. * scsi_req_map_sg maps a scatterlist into a request so that the
  256. * request can be sent to the block layer. We do not trust the scatterlist
  257. * sent to use, as some ULDs use that struct to only organize the pages.
  258. */
  259. static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
  260. int nsegs, unsigned bufflen, gfp_t gfp)
  261. {
  262. struct request_queue *q = rq->q;
  263. int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  264. unsigned int data_len = 0, len, bytes, off;
  265. struct page *page;
  266. struct bio *bio = NULL;
  267. int i, err, nr_vecs = 0;
  268. for (i = 0; i < nsegs; i++) {
  269. page = sgl[i].page;
  270. off = sgl[i].offset;
  271. len = sgl[i].length;
  272. data_len += len;
  273. while (len > 0) {
  274. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  275. if (!bio) {
  276. nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
  277. nr_pages -= nr_vecs;
  278. bio = bio_alloc(gfp, nr_vecs);
  279. if (!bio) {
  280. err = -ENOMEM;
  281. goto free_bios;
  282. }
  283. bio->bi_end_io = scsi_bi_endio;
  284. }
  285. if (bio_add_pc_page(q, bio, page, bytes, off) !=
  286. bytes) {
  287. bio_put(bio);
  288. err = -EINVAL;
  289. goto free_bios;
  290. }
  291. if (bio->bi_vcnt >= nr_vecs) {
  292. err = scsi_merge_bio(rq, bio);
  293. if (err) {
  294. bio_endio(bio, bio->bi_size, 0);
  295. goto free_bios;
  296. }
  297. bio = NULL;
  298. }
  299. page++;
  300. len -= bytes;
  301. off = 0;
  302. }
  303. }
  304. rq->buffer = rq->data = NULL;
  305. rq->data_len = data_len;
  306. return 0;
  307. free_bios:
  308. while ((bio = rq->bio) != NULL) {
  309. rq->bio = bio->bi_next;
  310. /*
  311. * call endio instead of bio_put incase it was bounced
  312. */
  313. bio_endio(bio, bio->bi_size, 0);
  314. }
  315. return err;
  316. }
  317. /**
  318. * scsi_execute_async - insert request
  319. * @sdev: scsi device
  320. * @cmd: scsi command
  321. * @cmd_len: length of scsi cdb
  322. * @data_direction: data direction
  323. * @buffer: data buffer (this can be a kernel buffer or scatterlist)
  324. * @bufflen: len of buffer
  325. * @use_sg: if buffer is a scatterlist this is the number of elements
  326. * @timeout: request timeout in seconds
  327. * @retries: number of times to retry request
  328. * @flags: or into request flags
  329. **/
  330. int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
  331. int cmd_len, int data_direction, void *buffer, unsigned bufflen,
  332. int use_sg, int timeout, int retries, void *privdata,
  333. void (*done)(void *, char *, int, int), gfp_t gfp)
  334. {
  335. struct request *req;
  336. struct scsi_io_context *sioc;
  337. int err = 0;
  338. int write = (data_direction == DMA_TO_DEVICE);
  339. sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
  340. if (!sioc)
  341. return DRIVER_ERROR << 24;
  342. memset(sioc, 0, sizeof(*sioc));
  343. req = blk_get_request(sdev->request_queue, write, gfp);
  344. if (!req)
  345. goto free_sense;
  346. req->cmd_type = REQ_TYPE_BLOCK_PC;
  347. req->cmd_flags |= REQ_QUIET;
  348. if (use_sg)
  349. err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
  350. else if (bufflen)
  351. err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
  352. if (err)
  353. goto free_req;
  354. req->cmd_len = cmd_len;
  355. memcpy(req->cmd, cmd, req->cmd_len);
  356. req->sense = sioc->sense;
  357. req->sense_len = 0;
  358. req->timeout = timeout;
  359. req->retries = retries;
  360. req->end_io_data = sioc;
  361. sioc->data = privdata;
  362. sioc->done = done;
  363. blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
  364. return 0;
  365. free_req:
  366. blk_put_request(req);
  367. free_sense:
  368. kmem_cache_free(scsi_io_context_cache, sioc);
  369. return DRIVER_ERROR << 24;
  370. }
  371. EXPORT_SYMBOL_GPL(scsi_execute_async);
  372. /*
  373. * Function: scsi_init_cmd_errh()
  374. *
  375. * Purpose: Initialize cmd fields related to error handling.
  376. *
  377. * Arguments: cmd - command that is ready to be queued.
  378. *
  379. * Notes: This function has the job of initializing a number of
  380. * fields related to error handling. Typically this will
  381. * be called once for each command, as required.
  382. */
  383. static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
  384. {
  385. cmd->serial_number = 0;
  386. memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
  387. if (cmd->cmd_len == 0)
  388. cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
  389. }
  390. void scsi_device_unbusy(struct scsi_device *sdev)
  391. {
  392. struct Scsi_Host *shost = sdev->host;
  393. unsigned long flags;
  394. spin_lock_irqsave(shost->host_lock, flags);
  395. shost->host_busy--;
  396. if (unlikely(scsi_host_in_recovery(shost) &&
  397. (shost->host_failed || shost->host_eh_scheduled)))
  398. scsi_eh_wakeup(shost);
  399. spin_unlock(shost->host_lock);
  400. spin_lock(sdev->request_queue->queue_lock);
  401. sdev->device_busy--;
  402. spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
  403. }
  404. /*
  405. * Called for single_lun devices on IO completion. Clear starget_sdev_user,
  406. * and call blk_run_queue for all the scsi_devices on the target -
  407. * including current_sdev first.
  408. *
  409. * Called with *no* scsi locks held.
  410. */
  411. static void scsi_single_lun_run(struct scsi_device *current_sdev)
  412. {
  413. struct Scsi_Host *shost = current_sdev->host;
  414. struct scsi_device *sdev, *tmp;
  415. struct scsi_target *starget = scsi_target(current_sdev);
  416. unsigned long flags;
  417. spin_lock_irqsave(shost->host_lock, flags);
  418. starget->starget_sdev_user = NULL;
  419. spin_unlock_irqrestore(shost->host_lock, flags);
  420. /*
  421. * Call blk_run_queue for all LUNs on the target, starting with
  422. * current_sdev. We race with others (to set starget_sdev_user),
  423. * but in most cases, we will be first. Ideally, each LU on the
  424. * target would get some limited time or requests on the target.
  425. */
  426. blk_run_queue(current_sdev->request_queue);
  427. spin_lock_irqsave(shost->host_lock, flags);
  428. if (starget->starget_sdev_user)
  429. goto out;
  430. list_for_each_entry_safe(sdev, tmp, &starget->devices,
  431. same_target_siblings) {
  432. if (sdev == current_sdev)
  433. continue;
  434. if (scsi_device_get(sdev))
  435. continue;
  436. spin_unlock_irqrestore(shost->host_lock, flags);
  437. blk_run_queue(sdev->request_queue);
  438. spin_lock_irqsave(shost->host_lock, flags);
  439. scsi_device_put(sdev);
  440. }
  441. out:
  442. spin_unlock_irqrestore(shost->host_lock, flags);
  443. }
  444. /*
  445. * Function: scsi_run_queue()
  446. *
  447. * Purpose: Select a proper request queue to serve next
  448. *
  449. * Arguments: q - last request's queue
  450. *
  451. * Returns: Nothing
  452. *
  453. * Notes: The previous command was completely finished, start
  454. * a new one if possible.
  455. */
  456. static void scsi_run_queue(struct request_queue *q)
  457. {
  458. struct scsi_device *sdev = q->queuedata;
  459. struct Scsi_Host *shost = sdev->host;
  460. unsigned long flags;
  461. if (sdev->single_lun)
  462. scsi_single_lun_run(sdev);
  463. spin_lock_irqsave(shost->host_lock, flags);
  464. while (!list_empty(&shost->starved_list) &&
  465. !shost->host_blocked && !shost->host_self_blocked &&
  466. !((shost->can_queue > 0) &&
  467. (shost->host_busy >= shost->can_queue))) {
  468. /*
  469. * As long as shost is accepting commands and we have
  470. * starved queues, call blk_run_queue. scsi_request_fn
  471. * drops the queue_lock and can add us back to the
  472. * starved_list.
  473. *
  474. * host_lock protects the starved_list and starved_entry.
  475. * scsi_request_fn must get the host_lock before checking
  476. * or modifying starved_list or starved_entry.
  477. */
  478. sdev = list_entry(shost->starved_list.next,
  479. struct scsi_device, starved_entry);
  480. list_del_init(&sdev->starved_entry);
  481. spin_unlock_irqrestore(shost->host_lock, flags);
  482. if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
  483. !test_and_set_bit(QUEUE_FLAG_REENTER,
  484. &sdev->request_queue->queue_flags)) {
  485. blk_run_queue(sdev->request_queue);
  486. clear_bit(QUEUE_FLAG_REENTER,
  487. &sdev->request_queue->queue_flags);
  488. } else
  489. blk_run_queue(sdev->request_queue);
  490. spin_lock_irqsave(shost->host_lock, flags);
  491. if (unlikely(!list_empty(&sdev->starved_entry)))
  492. /*
  493. * sdev lost a race, and was put back on the
  494. * starved list. This is unlikely but without this
  495. * in theory we could loop forever.
  496. */
  497. break;
  498. }
  499. spin_unlock_irqrestore(shost->host_lock, flags);
  500. blk_run_queue(q);
  501. }
  502. /*
  503. * Function: scsi_requeue_command()
  504. *
  505. * Purpose: Handle post-processing of completed commands.
  506. *
  507. * Arguments: q - queue to operate on
  508. * cmd - command that may need to be requeued.
  509. *
  510. * Returns: Nothing
  511. *
  512. * Notes: After command completion, there may be blocks left
  513. * over which weren't finished by the previous command
  514. * this can be for a number of reasons - the main one is
  515. * I/O errors in the middle of the request, in which case
  516. * we need to request the blocks that come after the bad
  517. * sector.
  518. * Notes: Upon return, cmd is a stale pointer.
  519. */
  520. static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
  521. {
  522. struct request *req = cmd->request;
  523. unsigned long flags;
  524. scsi_unprep_request(req);
  525. spin_lock_irqsave(q->queue_lock, flags);
  526. blk_requeue_request(q, req);
  527. spin_unlock_irqrestore(q->queue_lock, flags);
  528. scsi_run_queue(q);
  529. }
  530. void scsi_next_command(struct scsi_cmnd *cmd)
  531. {
  532. struct scsi_device *sdev = cmd->device;
  533. struct request_queue *q = sdev->request_queue;
  534. /* need to hold a reference on the device before we let go of the cmd */
  535. get_device(&sdev->sdev_gendev);
  536. scsi_put_command(cmd);
  537. scsi_run_queue(q);
  538. /* ok to remove device now */
  539. put_device(&sdev->sdev_gendev);
  540. }
  541. void scsi_run_host_queues(struct Scsi_Host *shost)
  542. {
  543. struct scsi_device *sdev;
  544. shost_for_each_device(sdev, shost)
  545. scsi_run_queue(sdev->request_queue);
  546. }
  547. /*
  548. * Function: scsi_end_request()
  549. *
  550. * Purpose: Post-processing of completed commands (usually invoked at end
  551. * of upper level post-processing and scsi_io_completion).
  552. *
  553. * Arguments: cmd - command that is complete.
  554. * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
  555. * bytes - number of bytes of completed I/O
  556. * requeue - indicates whether we should requeue leftovers.
  557. *
  558. * Lock status: Assumed that lock is not held upon entry.
  559. *
  560. * Returns: cmd if requeue required, NULL otherwise.
  561. *
  562. * Notes: This is called for block device requests in order to
  563. * mark some number of sectors as complete.
  564. *
  565. * We are guaranteeing that the request queue will be goosed
  566. * at some point during this call.
  567. * Notes: If cmd was requeued, upon return it will be a stale pointer.
  568. */
  569. static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
  570. int bytes, int requeue)
  571. {
  572. request_queue_t *q = cmd->device->request_queue;
  573. struct request *req = cmd->request;
  574. unsigned long flags;
  575. /*
  576. * If there are blocks left over at the end, set up the command
  577. * to queue the remainder of them.
  578. */
  579. if (end_that_request_chunk(req, uptodate, bytes)) {
  580. int leftover = (req->hard_nr_sectors << 9);
  581. if (blk_pc_request(req))
  582. leftover = req->data_len;
  583. /* kill remainder if no retrys */
  584. if (!uptodate && blk_noretry_request(req))
  585. end_that_request_chunk(req, 0, leftover);
  586. else {
  587. if (requeue) {
  588. /*
  589. * Bleah. Leftovers again. Stick the
  590. * leftovers in the front of the
  591. * queue, and goose the queue again.
  592. */
  593. scsi_requeue_command(q, cmd);
  594. cmd = NULL;
  595. }
  596. return cmd;
  597. }
  598. }
  599. add_disk_randomness(req->rq_disk);
  600. spin_lock_irqsave(q->queue_lock, flags);
  601. if (blk_rq_tagged(req))
  602. blk_queue_end_tag(q, req);
  603. end_that_request_last(req, uptodate);
  604. spin_unlock_irqrestore(q->queue_lock, flags);
  605. /*
  606. * This will goose the queue request function at the end, so we don't
  607. * need to worry about launching another command.
  608. */
  609. scsi_next_command(cmd);
  610. return NULL;
  611. }
  612. static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
  613. {
  614. struct scsi_host_sg_pool *sgp;
  615. struct scatterlist *sgl;
  616. BUG_ON(!cmd->use_sg);
  617. switch (cmd->use_sg) {
  618. case 1 ... 8:
  619. cmd->sglist_len = 0;
  620. break;
  621. case 9 ... 16:
  622. cmd->sglist_len = 1;
  623. break;
  624. case 17 ... 32:
  625. cmd->sglist_len = 2;
  626. break;
  627. #if (SCSI_MAX_PHYS_SEGMENTS > 32)
  628. case 33 ... 64:
  629. cmd->sglist_len = 3;
  630. break;
  631. #if (SCSI_MAX_PHYS_SEGMENTS > 64)
  632. case 65 ... 128:
  633. cmd->sglist_len = 4;
  634. break;
  635. #if (SCSI_MAX_PHYS_SEGMENTS > 128)
  636. case 129 ... 256:
  637. cmd->sglist_len = 5;
  638. break;
  639. #endif
  640. #endif
  641. #endif
  642. default:
  643. return NULL;
  644. }
  645. sgp = scsi_sg_pools + cmd->sglist_len;
  646. sgl = mempool_alloc(sgp->pool, gfp_mask);
  647. return sgl;
  648. }
  649. static void scsi_free_sgtable(struct scatterlist *sgl, int index)
  650. {
  651. struct scsi_host_sg_pool *sgp;
  652. BUG_ON(index >= SG_MEMPOOL_NR);
  653. sgp = scsi_sg_pools + index;
  654. mempool_free(sgl, sgp->pool);
  655. }
  656. /*
  657. * Function: scsi_release_buffers()
  658. *
  659. * Purpose: Completion processing for block device I/O requests.
  660. *
  661. * Arguments: cmd - command that we are bailing.
  662. *
  663. * Lock status: Assumed that no lock is held upon entry.
  664. *
  665. * Returns: Nothing
  666. *
  667. * Notes: In the event that an upper level driver rejects a
  668. * command, we must release resources allocated during
  669. * the __init_io() function. Primarily this would involve
  670. * the scatter-gather table, and potentially any bounce
  671. * buffers.
  672. */
  673. static void scsi_release_buffers(struct scsi_cmnd *cmd)
  674. {
  675. if (cmd->use_sg)
  676. scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
  677. /*
  678. * Zero these out. They now point to freed memory, and it is
  679. * dangerous to hang onto the pointers.
  680. */
  681. cmd->request_buffer = NULL;
  682. cmd->request_bufflen = 0;
  683. }
  684. /*
  685. * Function: scsi_io_completion()
  686. *
  687. * Purpose: Completion processing for block device I/O requests.
  688. *
  689. * Arguments: cmd - command that is finished.
  690. *
  691. * Lock status: Assumed that no lock is held upon entry.
  692. *
  693. * Returns: Nothing
  694. *
  695. * Notes: This function is matched in terms of capabilities to
  696. * the function that created the scatter-gather list.
  697. * In other words, if there are no bounce buffers
  698. * (the normal case for most drivers), we don't need
  699. * the logic to deal with cleaning up afterwards.
  700. *
  701. * We must do one of several things here:
  702. *
  703. * a) Call scsi_end_request. This will finish off the
  704. * specified number of sectors. If we are done, the
  705. * command block will be released, and the queue
  706. * function will be goosed. If we are not done, then
  707. * scsi_end_request will directly goose the queue.
  708. *
  709. * b) We can just use scsi_requeue_command() here. This would
  710. * be used if we just wanted to retry, for example.
  711. */
  712. void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  713. {
  714. int result = cmd->result;
  715. int this_count = cmd->request_bufflen;
  716. request_queue_t *q = cmd->device->request_queue;
  717. struct request *req = cmd->request;
  718. int clear_errors = 1;
  719. struct scsi_sense_hdr sshdr;
  720. int sense_valid = 0;
  721. int sense_deferred = 0;
  722. scsi_release_buffers(cmd);
  723. if (result) {
  724. sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
  725. if (sense_valid)
  726. sense_deferred = scsi_sense_is_deferred(&sshdr);
  727. }
  728. if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
  729. req->errors = result;
  730. if (result) {
  731. clear_errors = 0;
  732. if (sense_valid && req->sense) {
  733. /*
  734. * SG_IO wants current and deferred errors
  735. */
  736. int len = 8 + cmd->sense_buffer[7];
  737. if (len > SCSI_SENSE_BUFFERSIZE)
  738. len = SCSI_SENSE_BUFFERSIZE;
  739. memcpy(req->sense, cmd->sense_buffer, len);
  740. req->sense_len = len;
  741. }
  742. } else
  743. req->data_len = cmd->resid;
  744. }
  745. /*
  746. * Next deal with any sectors which we were able to correctly
  747. * handle.
  748. */
  749. SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
  750. "%d bytes done.\n",
  751. req->nr_sectors, good_bytes));
  752. SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
  753. if (clear_errors)
  754. req->errors = 0;
  755. /* A number of bytes were successfully read. If there
  756. * are leftovers and there is some kind of error
  757. * (result != 0), retry the rest.
  758. */
  759. if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
  760. return;
  761. /* good_bytes = 0, or (inclusive) there were leftovers and
  762. * result = 0, so scsi_end_request couldn't retry.
  763. */
  764. if (sense_valid && !sense_deferred) {
  765. switch (sshdr.sense_key) {
  766. case UNIT_ATTENTION:
  767. if (cmd->device->removable) {
  768. /* Detected disc change. Set a bit
  769. * and quietly refuse further access.
  770. */
  771. cmd->device->changed = 1;
  772. scsi_end_request(cmd, 0, this_count, 1);
  773. return;
  774. } else {
  775. /* Must have been a power glitch, or a
  776. * bus reset. Could not have been a
  777. * media change, so we just retry the
  778. * request and see what happens.
  779. */
  780. scsi_requeue_command(q, cmd);
  781. return;
  782. }
  783. break;
  784. case ILLEGAL_REQUEST:
  785. /* If we had an ILLEGAL REQUEST returned, then
  786. * we may have performed an unsupported
  787. * command. The only thing this should be
  788. * would be a ten byte read where only a six
  789. * byte read was supported. Also, on a system
  790. * where READ CAPACITY failed, we may have
  791. * read past the end of the disk.
  792. */
  793. if ((cmd->device->use_10_for_rw &&
  794. sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
  795. (cmd->cmnd[0] == READ_10 ||
  796. cmd->cmnd[0] == WRITE_10)) {
  797. cmd->device->use_10_for_rw = 0;
  798. /* This will cause a retry with a
  799. * 6-byte command.
  800. */
  801. scsi_requeue_command(q, cmd);
  802. return;
  803. } else {
  804. scsi_end_request(cmd, 0, this_count, 1);
  805. return;
  806. }
  807. break;
  808. case NOT_READY:
  809. /* If the device is in the process of becoming
  810. * ready, or has a temporary blockage, retry.
  811. */
  812. if (sshdr.asc == 0x04) {
  813. switch (sshdr.ascq) {
  814. case 0x01: /* becoming ready */
  815. case 0x04: /* format in progress */
  816. case 0x05: /* rebuild in progress */
  817. case 0x06: /* recalculation in progress */
  818. case 0x07: /* operation in progress */
  819. case 0x08: /* Long write in progress */
  820. case 0x09: /* self test in progress */
  821. scsi_requeue_command(q, cmd);
  822. return;
  823. default:
  824. break;
  825. }
  826. }
  827. if (!(req->cmd_flags & REQ_QUIET)) {
  828. scmd_printk(KERN_INFO, cmd,
  829. "Device not ready: ");
  830. scsi_print_sense_hdr("", &sshdr);
  831. }
  832. scsi_end_request(cmd, 0, this_count, 1);
  833. return;
  834. case VOLUME_OVERFLOW:
  835. if (!(req->cmd_flags & REQ_QUIET)) {
  836. scmd_printk(KERN_INFO, cmd,
  837. "Volume overflow, CDB: ");
  838. __scsi_print_command(cmd->cmnd);
  839. scsi_print_sense("", cmd);
  840. }
  841. /* See SSC3rXX or current. */
  842. scsi_end_request(cmd, 0, this_count, 1);
  843. return;
  844. default:
  845. break;
  846. }
  847. }
  848. if (host_byte(result) == DID_RESET) {
  849. /* Third party bus reset or reset for error recovery
  850. * reasons. Just retry the request and see what
  851. * happens.
  852. */
  853. scsi_requeue_command(q, cmd);
  854. return;
  855. }
  856. if (result) {
  857. if (!(req->cmd_flags & REQ_QUIET)) {
  858. scmd_printk(KERN_INFO, cmd,
  859. "SCSI error: return code = 0x%08x\n",
  860. result);
  861. if (driver_byte(result) & DRIVER_SENSE)
  862. scsi_print_sense("", cmd);
  863. }
  864. }
  865. scsi_end_request(cmd, 0, this_count, !result);
  866. }
  867. EXPORT_SYMBOL(scsi_io_completion);
  868. /*
  869. * Function: scsi_init_io()
  870. *
  871. * Purpose: SCSI I/O initialize function.
  872. *
  873. * Arguments: cmd - Command descriptor we wish to initialize
  874. *
  875. * Returns: 0 on success
  876. * BLKPREP_DEFER if the failure is retryable
  877. * BLKPREP_KILL if the failure is fatal
  878. */
  879. static int scsi_init_io(struct scsi_cmnd *cmd)
  880. {
  881. struct request *req = cmd->request;
  882. struct scatterlist *sgpnt;
  883. int count;
  884. /*
  885. * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
  886. */
  887. if (blk_pc_request(req) && !req->bio) {
  888. cmd->request_bufflen = req->data_len;
  889. cmd->request_buffer = req->data;
  890. req->buffer = req->data;
  891. cmd->use_sg = 0;
  892. return 0;
  893. }
  894. /*
  895. * we used to not use scatter-gather for single segment request,
  896. * but now we do (it makes highmem I/O easier to support without
  897. * kmapping pages)
  898. */
  899. cmd->use_sg = req->nr_phys_segments;
  900. /*
  901. * if sg table allocation fails, requeue request later.
  902. */
  903. sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
  904. if (unlikely(!sgpnt)) {
  905. scsi_unprep_request(req);
  906. return BLKPREP_DEFER;
  907. }
  908. cmd->request_buffer = (char *) sgpnt;
  909. cmd->request_bufflen = req->nr_sectors << 9;
  910. if (blk_pc_request(req))
  911. cmd->request_bufflen = req->data_len;
  912. req->buffer = NULL;
  913. /*
  914. * Next, walk the list, and fill in the addresses and sizes of
  915. * each segment.
  916. */
  917. count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
  918. /*
  919. * mapped well, send it off
  920. */
  921. if (likely(count <= cmd->use_sg)) {
  922. cmd->use_sg = count;
  923. return 0;
  924. }
  925. printk(KERN_ERR "Incorrect number of segments after building list\n");
  926. printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
  927. printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
  928. req->current_nr_sectors);
  929. /* release the command and kill it */
  930. scsi_release_buffers(cmd);
  931. scsi_put_command(cmd);
  932. return BLKPREP_KILL;
  933. }
  934. static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
  935. sector_t *error_sector)
  936. {
  937. struct scsi_device *sdev = q->queuedata;
  938. struct scsi_driver *drv;
  939. if (sdev->sdev_state != SDEV_RUNNING)
  940. return -ENXIO;
  941. drv = *(struct scsi_driver **) disk->private_data;
  942. if (drv->issue_flush)
  943. return drv->issue_flush(&sdev->sdev_gendev, error_sector);
  944. return -EOPNOTSUPP;
  945. }
  946. static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
  947. {
  948. BUG_ON(!blk_pc_request(cmd->request));
  949. /*
  950. * This will complete the whole command with uptodate=1 so
  951. * as far as the block layer is concerned the command completed
  952. * successfully. Since this is a REQ_BLOCK_PC command the
  953. * caller should check the request's errors value
  954. */
  955. scsi_io_completion(cmd, cmd->request_bufflen);
  956. }
  957. static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
  958. {
  959. struct request *req = cmd->request;
  960. BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
  961. memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
  962. cmd->cmd_len = req->cmd_len;
  963. if (!req->data_len)
  964. cmd->sc_data_direction = DMA_NONE;
  965. else if (rq_data_dir(req) == WRITE)
  966. cmd->sc_data_direction = DMA_TO_DEVICE;
  967. else
  968. cmd->sc_data_direction = DMA_FROM_DEVICE;
  969. cmd->transfersize = req->data_len;
  970. cmd->allowed = req->retries;
  971. cmd->timeout_per_command = req->timeout;
  972. cmd->done = scsi_blk_pc_done;
  973. }
  974. static int scsi_prep_fn(struct request_queue *q, struct request *req)
  975. {
  976. struct scsi_device *sdev = q->queuedata;
  977. struct scsi_cmnd *cmd;
  978. int specials_only = 0;
  979. /*
  980. * Just check to see if the device is online. If it isn't, we
  981. * refuse to process any commands. The device must be brought
  982. * online before trying any recovery commands
  983. */
  984. if (unlikely(!scsi_device_online(sdev))) {
  985. sdev_printk(KERN_ERR, sdev,
  986. "rejecting I/O to offline device\n");
  987. goto kill;
  988. }
  989. if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
  990. /* OK, we're not in a running state don't prep
  991. * user commands */
  992. if (sdev->sdev_state == SDEV_DEL) {
  993. /* Device is fully deleted, no commands
  994. * at all allowed down */
  995. sdev_printk(KERN_ERR, sdev,
  996. "rejecting I/O to dead device\n");
  997. goto kill;
  998. }
  999. /* OK, we only allow special commands (i.e. not
  1000. * user initiated ones */
  1001. specials_only = sdev->sdev_state;
  1002. }
  1003. /*
  1004. * Find the actual device driver associated with this command.
  1005. * The SPECIAL requests are things like character device or
  1006. * ioctls, which did not originate from ll_rw_blk. Note that
  1007. * the special field is also used to indicate the cmd for
  1008. * the remainder of a partially fulfilled request that can
  1009. * come up when there is a medium error. We have to treat
  1010. * these two cases differently. We differentiate by looking
  1011. * at request->cmd, as this tells us the real story.
  1012. */
  1013. if (blk_special_request(req) && req->special)
  1014. cmd = req->special;
  1015. else if (blk_pc_request(req) || blk_fs_request(req)) {
  1016. if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
  1017. if (specials_only == SDEV_QUIESCE ||
  1018. specials_only == SDEV_BLOCK)
  1019. goto defer;
  1020. sdev_printk(KERN_ERR, sdev,
  1021. "rejecting I/O to device being removed\n");
  1022. goto kill;
  1023. }
  1024. /*
  1025. * Now try and find a command block that we can use.
  1026. */
  1027. if (!req->special) {
  1028. cmd = scsi_get_command(sdev, GFP_ATOMIC);
  1029. if (unlikely(!cmd))
  1030. goto defer;
  1031. } else
  1032. cmd = req->special;
  1033. /* pull a tag out of the request if we have one */
  1034. cmd->tag = req->tag;
  1035. } else {
  1036. blk_dump_rq_flags(req, "SCSI bad req");
  1037. goto kill;
  1038. }
  1039. /* note the overloading of req->special. When the tag
  1040. * is active it always means cmd. If the tag goes
  1041. * back for re-queueing, it may be reset */
  1042. req->special = cmd;
  1043. cmd->request = req;
  1044. /*
  1045. * FIXME: drop the lock here because the functions below
  1046. * expect to be called without the queue lock held. Also,
  1047. * previously, we dequeued the request before dropping the
  1048. * lock. We hope REQ_STARTED prevents anything untoward from
  1049. * happening now.
  1050. */
  1051. if (blk_fs_request(req) || blk_pc_request(req)) {
  1052. int ret;
  1053. /*
  1054. * This will do a couple of things:
  1055. * 1) Fill in the actual SCSI command.
  1056. * 2) Fill in any other upper-level specific fields
  1057. * (timeout).
  1058. *
  1059. * If this returns 0, it means that the request failed
  1060. * (reading past end of disk, reading offline device,
  1061. * etc). This won't actually talk to the device, but
  1062. * some kinds of consistency checking may cause the
  1063. * request to be rejected immediately.
  1064. */
  1065. /*
  1066. * This sets up the scatter-gather table (allocating if
  1067. * required).
  1068. */
  1069. ret = scsi_init_io(cmd);
  1070. switch(ret) {
  1071. /* For BLKPREP_KILL/DEFER the cmd was released */
  1072. case BLKPREP_KILL:
  1073. goto kill;
  1074. case BLKPREP_DEFER:
  1075. goto defer;
  1076. }
  1077. /*
  1078. * Initialize the actual SCSI command for this request.
  1079. */
  1080. if (blk_pc_request(req)) {
  1081. scsi_setup_blk_pc_cmnd(cmd);
  1082. } else if (req->rq_disk) {
  1083. struct scsi_driver *drv;
  1084. drv = *(struct scsi_driver **)req->rq_disk->private_data;
  1085. if (unlikely(!drv->init_command(cmd))) {
  1086. scsi_release_buffers(cmd);
  1087. scsi_put_command(cmd);
  1088. goto kill;
  1089. }
  1090. }
  1091. }
  1092. /*
  1093. * The request is now prepped, no need to come back here
  1094. */
  1095. req->cmd_flags |= REQ_DONTPREP;
  1096. return BLKPREP_OK;
  1097. defer:
  1098. /* If we defer, the elv_next_request() returns NULL, but the
  1099. * queue must be restarted, so we plug here if no returning
  1100. * command will automatically do that. */
  1101. if (sdev->device_busy == 0)
  1102. blk_plug_device(q);
  1103. return BLKPREP_DEFER;
  1104. kill:
  1105. req->errors = DID_NO_CONNECT << 16;
  1106. return BLKPREP_KILL;
  1107. }
  1108. /*
  1109. * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
  1110. * return 0.
  1111. *
  1112. * Called with the queue_lock held.
  1113. */
  1114. static inline int scsi_dev_queue_ready(struct request_queue *q,
  1115. struct scsi_device *sdev)
  1116. {
  1117. if (sdev->device_busy >= sdev->queue_depth)
  1118. return 0;
  1119. if (sdev->device_busy == 0 && sdev->device_blocked) {
  1120. /*
  1121. * unblock after device_blocked iterates to zero
  1122. */
  1123. if (--sdev->device_blocked == 0) {
  1124. SCSI_LOG_MLQUEUE(3,
  1125. sdev_printk(KERN_INFO, sdev,
  1126. "unblocking device at zero depth\n"));
  1127. } else {
  1128. blk_plug_device(q);
  1129. return 0;
  1130. }
  1131. }
  1132. if (sdev->device_blocked)
  1133. return 0;
  1134. return 1;
  1135. }
  1136. /*
  1137. * scsi_host_queue_ready: if we can send requests to shost, return 1 else
  1138. * return 0. We must end up running the queue again whenever 0 is
  1139. * returned, else IO can hang.
  1140. *
  1141. * Called with host_lock held.
  1142. */
  1143. static inline int scsi_host_queue_ready(struct request_queue *q,
  1144. struct Scsi_Host *shost,
  1145. struct scsi_device *sdev)
  1146. {
  1147. if (scsi_host_in_recovery(shost))
  1148. return 0;
  1149. if (shost->host_busy == 0 && shost->host_blocked) {
  1150. /*
  1151. * unblock after host_blocked iterates to zero
  1152. */
  1153. if (--shost->host_blocked == 0) {
  1154. SCSI_LOG_MLQUEUE(3,
  1155. printk("scsi%d unblocking host at zero depth\n",
  1156. shost->host_no));
  1157. } else {
  1158. blk_plug_device(q);
  1159. return 0;
  1160. }
  1161. }
  1162. if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
  1163. shost->host_blocked || shost->host_self_blocked) {
  1164. if (list_empty(&sdev->starved_entry))
  1165. list_add_tail(&sdev->starved_entry, &shost->starved_list);
  1166. return 0;
  1167. }
  1168. /* We're OK to process the command, so we can't be starved */
  1169. if (!list_empty(&sdev->starved_entry))
  1170. list_del_init(&sdev->starved_entry);
  1171. return 1;
  1172. }
  1173. /*
  1174. * Kill a request for a dead device
  1175. */
  1176. static void scsi_kill_request(struct request *req, request_queue_t *q)
  1177. {
  1178. struct scsi_cmnd *cmd = req->special;
  1179. struct scsi_device *sdev = cmd->device;
  1180. struct Scsi_Host *shost = sdev->host;
  1181. blkdev_dequeue_request(req);
  1182. if (unlikely(cmd == NULL)) {
  1183. printk(KERN_CRIT "impossible request in %s.\n",
  1184. __FUNCTION__);
  1185. BUG();
  1186. }
  1187. scsi_init_cmd_errh(cmd);
  1188. cmd->result = DID_NO_CONNECT << 16;
  1189. atomic_inc(&cmd->device->iorequest_cnt);
  1190. /*
  1191. * SCSI request completion path will do scsi_device_unbusy(),
  1192. * bump busy counts. To bump the counters, we need to dance
  1193. * with the locks as normal issue path does.
  1194. */
  1195. sdev->device_busy++;
  1196. spin_unlock(sdev->request_queue->queue_lock);
  1197. spin_lock(shost->host_lock);
  1198. shost->host_busy++;
  1199. spin_unlock(shost->host_lock);
  1200. spin_lock(sdev->request_queue->queue_lock);
  1201. __scsi_done(cmd);
  1202. }
  1203. static void scsi_softirq_done(struct request *rq)
  1204. {
  1205. struct scsi_cmnd *cmd = rq->completion_data;
  1206. unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
  1207. int disposition;
  1208. INIT_LIST_HEAD(&cmd->eh_entry);
  1209. disposition = scsi_decide_disposition(cmd);
  1210. if (disposition != SUCCESS &&
  1211. time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
  1212. sdev_printk(KERN_ERR, cmd->device,
  1213. "timing out command, waited %lus\n",
  1214. wait_for/HZ);
  1215. disposition = SUCCESS;
  1216. }
  1217. scsi_log_completion(cmd, disposition);
  1218. switch (disposition) {
  1219. case SUCCESS:
  1220. scsi_finish_command(cmd);
  1221. break;
  1222. case NEEDS_RETRY:
  1223. scsi_retry_command(cmd);
  1224. break;
  1225. case ADD_TO_MLQUEUE:
  1226. scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
  1227. break;
  1228. default:
  1229. if (!scsi_eh_scmd_add(cmd, 0))
  1230. scsi_finish_command(cmd);
  1231. }
  1232. }
  1233. /*
  1234. * Function: scsi_request_fn()
  1235. *
  1236. * Purpose: Main strategy routine for SCSI.
  1237. *
  1238. * Arguments: q - Pointer to actual queue.
  1239. *
  1240. * Returns: Nothing
  1241. *
  1242. * Lock status: IO request lock assumed to be held when called.
  1243. */
  1244. static void scsi_request_fn(struct request_queue *q)
  1245. {
  1246. struct scsi_device *sdev = q->queuedata;
  1247. struct Scsi_Host *shost;
  1248. struct scsi_cmnd *cmd;
  1249. struct request *req;
  1250. if (!sdev) {
  1251. printk("scsi: killing requests for dead queue\n");
  1252. while ((req = elv_next_request(q)) != NULL)
  1253. scsi_kill_request(req, q);
  1254. return;
  1255. }
  1256. if(!get_device(&sdev->sdev_gendev))
  1257. /* We must be tearing the block queue down already */
  1258. return;
  1259. /*
  1260. * To start with, we keep looping until the queue is empty, or until
  1261. * the host is no longer able to accept any more requests.
  1262. */
  1263. shost = sdev->host;
  1264. while (!blk_queue_plugged(q)) {
  1265. int rtn;
  1266. /*
  1267. * get next queueable request. We do this early to make sure
  1268. * that the request is fully prepared even if we cannot
  1269. * accept it.
  1270. */
  1271. req = elv_next_request(q);
  1272. if (!req || !scsi_dev_queue_ready(q, sdev))
  1273. break;
  1274. if (unlikely(!scsi_device_online(sdev))) {
  1275. sdev_printk(KERN_ERR, sdev,
  1276. "rejecting I/O to offline device\n");
  1277. scsi_kill_request(req, q);
  1278. continue;
  1279. }
  1280. /*
  1281. * Remove the request from the request list.
  1282. */
  1283. if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
  1284. blkdev_dequeue_request(req);
  1285. sdev->device_busy++;
  1286. spin_unlock(q->queue_lock);
  1287. cmd = req->special;
  1288. if (unlikely(cmd == NULL)) {
  1289. printk(KERN_CRIT "impossible request in %s.\n"
  1290. "please mail a stack trace to "
  1291. "linux-scsi@vger.kernel.org\n",
  1292. __FUNCTION__);
  1293. blk_dump_rq_flags(req, "foo");
  1294. BUG();
  1295. }
  1296. spin_lock(shost->host_lock);
  1297. if (!scsi_host_queue_ready(q, shost, sdev))
  1298. goto not_ready;
  1299. if (sdev->single_lun) {
  1300. if (scsi_target(sdev)->starget_sdev_user &&
  1301. scsi_target(sdev)->starget_sdev_user != sdev)
  1302. goto not_ready;
  1303. scsi_target(sdev)->starget_sdev_user = sdev;
  1304. }
  1305. shost->host_busy++;
  1306. /*
  1307. * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
  1308. * take the lock again.
  1309. */
  1310. spin_unlock_irq(shost->host_lock);
  1311. /*
  1312. * Finally, initialize any error handling parameters, and set up
  1313. * the timers for timeouts.
  1314. */
  1315. scsi_init_cmd_errh(cmd);
  1316. /*
  1317. * Dispatch the command to the low-level driver.
  1318. */
  1319. rtn = scsi_dispatch_cmd(cmd);
  1320. spin_lock_irq(q->queue_lock);
  1321. if(rtn) {
  1322. /* we're refusing the command; because of
  1323. * the way locks get dropped, we need to
  1324. * check here if plugging is required */
  1325. if(sdev->device_busy == 0)
  1326. blk_plug_device(q);
  1327. break;
  1328. }
  1329. }
  1330. goto out;
  1331. not_ready:
  1332. spin_unlock_irq(shost->host_lock);
  1333. /*
  1334. * lock q, handle tag, requeue req, and decrement device_busy. We
  1335. * must return with queue_lock held.
  1336. *
  1337. * Decrementing device_busy without checking it is OK, as all such
  1338. * cases (host limits or settings) should run the queue at some
  1339. * later time.
  1340. */
  1341. spin_lock_irq(q->queue_lock);
  1342. blk_requeue_request(q, req);
  1343. sdev->device_busy--;
  1344. if(sdev->device_busy == 0)
  1345. blk_plug_device(q);
  1346. out:
  1347. /* must be careful here...if we trigger the ->remove() function
  1348. * we cannot be holding the q lock */
  1349. spin_unlock_irq(q->queue_lock);
  1350. put_device(&sdev->sdev_gendev);
  1351. spin_lock_irq(q->queue_lock);
  1352. }
  1353. u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
  1354. {
  1355. struct device *host_dev;
  1356. u64 bounce_limit = 0xffffffff;
  1357. if (shost->unchecked_isa_dma)
  1358. return BLK_BOUNCE_ISA;
  1359. /*
  1360. * Platforms with virtual-DMA translation
  1361. * hardware have no practical limit.
  1362. */
  1363. if (!PCI_DMA_BUS_IS_PHYS)
  1364. return BLK_BOUNCE_ANY;
  1365. host_dev = scsi_get_device(shost);
  1366. if (host_dev && host_dev->dma_mask)
  1367. bounce_limit = *host_dev->dma_mask;
  1368. return bounce_limit;
  1369. }
  1370. EXPORT_SYMBOL(scsi_calculate_bounce_limit);
  1371. struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
  1372. {
  1373. struct Scsi_Host *shost = sdev->host;
  1374. struct request_queue *q;
  1375. q = blk_init_queue(scsi_request_fn, NULL);
  1376. if (!q)
  1377. return NULL;
  1378. blk_queue_prep_rq(q, scsi_prep_fn);
  1379. blk_queue_max_hw_segments(q, shost->sg_tablesize);
  1380. blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
  1381. blk_queue_max_sectors(q, shost->max_sectors);
  1382. blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
  1383. blk_queue_segment_boundary(q, shost->dma_boundary);
  1384. blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
  1385. blk_queue_softirq_done(q, scsi_softirq_done);
  1386. if (!shost->use_clustering)
  1387. clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
  1388. return q;
  1389. }
  1390. void scsi_free_queue(struct request_queue *q)
  1391. {
  1392. blk_cleanup_queue(q);
  1393. }
  1394. /*
  1395. * Function: scsi_block_requests()
  1396. *
  1397. * Purpose: Utility function used by low-level drivers to prevent further
  1398. * commands from being queued to the device.
  1399. *
  1400. * Arguments: shost - Host in question
  1401. *
  1402. * Returns: Nothing
  1403. *
  1404. * Lock status: No locks are assumed held.
  1405. *
  1406. * Notes: There is no timer nor any other means by which the requests
  1407. * get unblocked other than the low-level driver calling
  1408. * scsi_unblock_requests().
  1409. */
  1410. void scsi_block_requests(struct Scsi_Host *shost)
  1411. {
  1412. shost->host_self_blocked = 1;
  1413. }
  1414. EXPORT_SYMBOL(scsi_block_requests);
  1415. /*
  1416. * Function: scsi_unblock_requests()
  1417. *
  1418. * Purpose: Utility function used by low-level drivers to allow further
  1419. * commands from being queued to the device.
  1420. *
  1421. * Arguments: shost - Host in question
  1422. *
  1423. * Returns: Nothing
  1424. *
  1425. * Lock status: No locks are assumed held.
  1426. *
  1427. * Notes: There is no timer nor any other means by which the requests
  1428. * get unblocked other than the low-level driver calling
  1429. * scsi_unblock_requests().
  1430. *
  1431. * This is done as an API function so that changes to the
  1432. * internals of the scsi mid-layer won't require wholesale
  1433. * changes to drivers that use this feature.
  1434. */
  1435. void scsi_unblock_requests(struct Scsi_Host *shost)
  1436. {
  1437. shost->host_self_blocked = 0;
  1438. scsi_run_host_queues(shost);
  1439. }
  1440. EXPORT_SYMBOL(scsi_unblock_requests);
  1441. int __init scsi_init_queue(void)
  1442. {
  1443. int i;
  1444. scsi_io_context_cache = kmem_cache_create("scsi_io_context",
  1445. sizeof(struct scsi_io_context),
  1446. 0, 0, NULL, NULL);
  1447. if (!scsi_io_context_cache) {
  1448. printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
  1449. return -ENOMEM;
  1450. }
  1451. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1452. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1453. int size = sgp->size * sizeof(struct scatterlist);
  1454. sgp->slab = kmem_cache_create(sgp->name, size, 0,
  1455. SLAB_HWCACHE_ALIGN, NULL, NULL);
  1456. if (!sgp->slab) {
  1457. printk(KERN_ERR "SCSI: can't init sg slab %s\n",
  1458. sgp->name);
  1459. }
  1460. sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
  1461. sgp->slab);
  1462. if (!sgp->pool) {
  1463. printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
  1464. sgp->name);
  1465. }
  1466. }
  1467. return 0;
  1468. }
  1469. void scsi_exit_queue(void)
  1470. {
  1471. int i;
  1472. kmem_cache_destroy(scsi_io_context_cache);
  1473. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1474. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1475. mempool_destroy(sgp->pool);
  1476. kmem_cache_destroy(sgp->slab);
  1477. }
  1478. }
  1479. /**
  1480. * scsi_mode_select - issue a mode select
  1481. * @sdev: SCSI device to be queried
  1482. * @pf: Page format bit (1 == standard, 0 == vendor specific)
  1483. * @sp: Save page bit (0 == don't save, 1 == save)
  1484. * @modepage: mode page being requested
  1485. * @buffer: request buffer (may not be smaller than eight bytes)
  1486. * @len: length of request buffer.
  1487. * @timeout: command timeout
  1488. * @retries: number of retries before failing
  1489. * @data: returns a structure abstracting the mode header data
  1490. * @sense: place to put sense data (or NULL if no sense to be collected).
  1491. * must be SCSI_SENSE_BUFFERSIZE big.
  1492. *
  1493. * Returns zero if successful; negative error number or scsi
  1494. * status on error
  1495. *
  1496. */
  1497. int
  1498. scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
  1499. unsigned char *buffer, int len, int timeout, int retries,
  1500. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1501. {
  1502. unsigned char cmd[10];
  1503. unsigned char *real_buffer;
  1504. int ret;
  1505. memset(cmd, 0, sizeof(cmd));
  1506. cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
  1507. if (sdev->use_10_for_ms) {
  1508. if (len > 65535)
  1509. return -EINVAL;
  1510. real_buffer = kmalloc(8 + len, GFP_KERNEL);
  1511. if (!real_buffer)
  1512. return -ENOMEM;
  1513. memcpy(real_buffer + 8, buffer, len);
  1514. len += 8;
  1515. real_buffer[0] = 0;
  1516. real_buffer[1] = 0;
  1517. real_buffer[2] = data->medium_type;
  1518. real_buffer[3] = data->device_specific;
  1519. real_buffer[4] = data->longlba ? 0x01 : 0;
  1520. real_buffer[5] = 0;
  1521. real_buffer[6] = data->block_descriptor_length >> 8;
  1522. real_buffer[7] = data->block_descriptor_length;
  1523. cmd[0] = MODE_SELECT_10;
  1524. cmd[7] = len >> 8;
  1525. cmd[8] = len;
  1526. } else {
  1527. if (len > 255 || data->block_descriptor_length > 255 ||
  1528. data->longlba)
  1529. return -EINVAL;
  1530. real_buffer = kmalloc(4 + len, GFP_KERNEL);
  1531. if (!real_buffer)
  1532. return -ENOMEM;
  1533. memcpy(real_buffer + 4, buffer, len);
  1534. len += 4;
  1535. real_buffer[0] = 0;
  1536. real_buffer[1] = data->medium_type;
  1537. real_buffer[2] = data->device_specific;
  1538. real_buffer[3] = data->block_descriptor_length;
  1539. cmd[0] = MODE_SELECT;
  1540. cmd[4] = len;
  1541. }
  1542. ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
  1543. sshdr, timeout, retries);
  1544. kfree(real_buffer);
  1545. return ret;
  1546. }
  1547. EXPORT_SYMBOL_GPL(scsi_mode_select);
  1548. /**
  1549. * scsi_mode_sense - issue a mode sense, falling back from 10 to
  1550. * six bytes if necessary.
  1551. * @sdev: SCSI device to be queried
  1552. * @dbd: set if mode sense will allow block descriptors to be returned
  1553. * @modepage: mode page being requested
  1554. * @buffer: request buffer (may not be smaller than eight bytes)
  1555. * @len: length of request buffer.
  1556. * @timeout: command timeout
  1557. * @retries: number of retries before failing
  1558. * @data: returns a structure abstracting the mode header data
  1559. * @sense: place to put sense data (or NULL if no sense to be collected).
  1560. * must be SCSI_SENSE_BUFFERSIZE big.
  1561. *
  1562. * Returns zero if unsuccessful, or the header offset (either 4
  1563. * or 8 depending on whether a six or ten byte command was
  1564. * issued) if successful.
  1565. **/
  1566. int
  1567. scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  1568. unsigned char *buffer, int len, int timeout, int retries,
  1569. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1570. {
  1571. unsigned char cmd[12];
  1572. int use_10_for_ms;
  1573. int header_length;
  1574. int result;
  1575. struct scsi_sense_hdr my_sshdr;
  1576. memset(data, 0, sizeof(*data));
  1577. memset(&cmd[0], 0, 12);
  1578. cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
  1579. cmd[2] = modepage;
  1580. /* caller might not be interested in sense, but we need it */
  1581. if (!sshdr)
  1582. sshdr = &my_sshdr;
  1583. retry:
  1584. use_10_for_ms = sdev->use_10_for_ms;
  1585. if (use_10_for_ms) {
  1586. if (len < 8)
  1587. len = 8;
  1588. cmd[0] = MODE_SENSE_10;
  1589. cmd[8] = len;
  1590. header_length = 8;
  1591. } else {
  1592. if (len < 4)
  1593. len = 4;
  1594. cmd[0] = MODE_SENSE;
  1595. cmd[4] = len;
  1596. header_length = 4;
  1597. }
  1598. memset(buffer, 0, len);
  1599. result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
  1600. sshdr, timeout, retries);
  1601. /* This code looks awful: what it's doing is making sure an
  1602. * ILLEGAL REQUEST sense return identifies the actual command
  1603. * byte as the problem. MODE_SENSE commands can return
  1604. * ILLEGAL REQUEST if the code page isn't supported */
  1605. if (use_10_for_ms && !scsi_status_is_good(result) &&
  1606. (driver_byte(result) & DRIVER_SENSE)) {
  1607. if (scsi_sense_valid(sshdr)) {
  1608. if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
  1609. (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
  1610. /*
  1611. * Invalid command operation code
  1612. */
  1613. sdev->use_10_for_ms = 0;
  1614. goto retry;
  1615. }
  1616. }
  1617. }
  1618. if(scsi_status_is_good(result)) {
  1619. if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
  1620. (modepage == 6 || modepage == 8))) {
  1621. /* Initio breakage? */
  1622. header_length = 0;
  1623. data->length = 13;
  1624. data->medium_type = 0;
  1625. data->device_specific = 0;
  1626. data->longlba = 0;
  1627. data->block_descriptor_length = 0;
  1628. } else if(use_10_for_ms) {
  1629. data->length = buffer[0]*256 + buffer[1] + 2;
  1630. data->medium_type = buffer[2];
  1631. data->device_specific = buffer[3];
  1632. data->longlba = buffer[4] & 0x01;
  1633. data->block_descriptor_length = buffer[6]*256
  1634. + buffer[7];
  1635. } else {
  1636. data->length = buffer[0] + 1;
  1637. data->medium_type = buffer[1];
  1638. data->device_specific = buffer[2];
  1639. data->block_descriptor_length = buffer[3];
  1640. }
  1641. data->header_length = header_length;
  1642. }
  1643. return result;
  1644. }
  1645. EXPORT_SYMBOL(scsi_mode_sense);
  1646. int
  1647. scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
  1648. {
  1649. char cmd[] = {
  1650. TEST_UNIT_READY, 0, 0, 0, 0, 0,
  1651. };
  1652. struct scsi_sense_hdr sshdr;
  1653. int result;
  1654. result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
  1655. timeout, retries);
  1656. if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
  1657. if ((scsi_sense_valid(&sshdr)) &&
  1658. ((sshdr.sense_key == UNIT_ATTENTION) ||
  1659. (sshdr.sense_key == NOT_READY))) {
  1660. sdev->changed = 1;
  1661. result = 0;
  1662. }
  1663. }
  1664. return result;
  1665. }
  1666. EXPORT_SYMBOL(scsi_test_unit_ready);
  1667. /**
  1668. * scsi_device_set_state - Take the given device through the device
  1669. * state model.
  1670. * @sdev: scsi device to change the state of.
  1671. * @state: state to change to.
  1672. *
  1673. * Returns zero if unsuccessful or an error if the requested
  1674. * transition is illegal.
  1675. **/
  1676. int
  1677. scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
  1678. {
  1679. enum scsi_device_state oldstate = sdev->sdev_state;
  1680. if (state == oldstate)
  1681. return 0;
  1682. switch (state) {
  1683. case SDEV_CREATED:
  1684. /* There are no legal states that come back to
  1685. * created. This is the manually initialised start
  1686. * state */
  1687. goto illegal;
  1688. case SDEV_RUNNING:
  1689. switch (oldstate) {
  1690. case SDEV_CREATED:
  1691. case SDEV_OFFLINE:
  1692. case SDEV_QUIESCE:
  1693. case SDEV_BLOCK:
  1694. break;
  1695. default:
  1696. goto illegal;
  1697. }
  1698. break;
  1699. case SDEV_QUIESCE:
  1700. switch (oldstate) {
  1701. case SDEV_RUNNING:
  1702. case SDEV_OFFLINE:
  1703. break;
  1704. default:
  1705. goto illegal;
  1706. }
  1707. break;
  1708. case SDEV_OFFLINE:
  1709. switch (oldstate) {
  1710. case SDEV_CREATED:
  1711. case SDEV_RUNNING:
  1712. case SDEV_QUIESCE:
  1713. case SDEV_BLOCK:
  1714. break;
  1715. default:
  1716. goto illegal;
  1717. }
  1718. break;
  1719. case SDEV_BLOCK:
  1720. switch (oldstate) {
  1721. case SDEV_CREATED:
  1722. case SDEV_RUNNING:
  1723. break;
  1724. default:
  1725. goto illegal;
  1726. }
  1727. break;
  1728. case SDEV_CANCEL:
  1729. switch (oldstate) {
  1730. case SDEV_CREATED:
  1731. case SDEV_RUNNING:
  1732. case SDEV_QUIESCE:
  1733. case SDEV_OFFLINE:
  1734. case SDEV_BLOCK:
  1735. break;
  1736. default:
  1737. goto illegal;
  1738. }
  1739. break;
  1740. case SDEV_DEL:
  1741. switch (oldstate) {
  1742. case SDEV_CREATED:
  1743. case SDEV_RUNNING:
  1744. case SDEV_OFFLINE:
  1745. case SDEV_CANCEL:
  1746. break;
  1747. default:
  1748. goto illegal;
  1749. }
  1750. break;
  1751. }
  1752. sdev->sdev_state = state;
  1753. return 0;
  1754. illegal:
  1755. SCSI_LOG_ERROR_RECOVERY(1,
  1756. sdev_printk(KERN_ERR, sdev,
  1757. "Illegal state transition %s->%s\n",
  1758. scsi_device_state_name(oldstate),
  1759. scsi_device_state_name(state))
  1760. );
  1761. return -EINVAL;
  1762. }
  1763. EXPORT_SYMBOL(scsi_device_set_state);
  1764. /**
  1765. * scsi_device_quiesce - Block user issued commands.
  1766. * @sdev: scsi device to quiesce.
  1767. *
  1768. * This works by trying to transition to the SDEV_QUIESCE state
  1769. * (which must be a legal transition). When the device is in this
  1770. * state, only special requests will be accepted, all others will
  1771. * be deferred. Since special requests may also be requeued requests,
  1772. * a successful return doesn't guarantee the device will be
  1773. * totally quiescent.
  1774. *
  1775. * Must be called with user context, may sleep.
  1776. *
  1777. * Returns zero if unsuccessful or an error if not.
  1778. **/
  1779. int
  1780. scsi_device_quiesce(struct scsi_device *sdev)
  1781. {
  1782. int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
  1783. if (err)
  1784. return err;
  1785. scsi_run_queue(sdev->request_queue);
  1786. while (sdev->device_busy) {
  1787. msleep_interruptible(200);
  1788. scsi_run_queue(sdev->request_queue);
  1789. }
  1790. return 0;
  1791. }
  1792. EXPORT_SYMBOL(scsi_device_quiesce);
  1793. /**
  1794. * scsi_device_resume - Restart user issued commands to a quiesced device.
  1795. * @sdev: scsi device to resume.
  1796. *
  1797. * Moves the device from quiesced back to running and restarts the
  1798. * queues.
  1799. *
  1800. * Must be called with user context, may sleep.
  1801. **/
  1802. void
  1803. scsi_device_resume(struct scsi_device *sdev)
  1804. {
  1805. if(scsi_device_set_state(sdev, SDEV_RUNNING))
  1806. return;
  1807. scsi_run_queue(sdev->request_queue);
  1808. }
  1809. EXPORT_SYMBOL(scsi_device_resume);
  1810. static void
  1811. device_quiesce_fn(struct scsi_device *sdev, void *data)
  1812. {
  1813. scsi_device_quiesce(sdev);
  1814. }
  1815. void
  1816. scsi_target_quiesce(struct scsi_target *starget)
  1817. {
  1818. starget_for_each_device(starget, NULL, device_quiesce_fn);
  1819. }
  1820. EXPORT_SYMBOL(scsi_target_quiesce);
  1821. static void
  1822. device_resume_fn(struct scsi_device *sdev, void *data)
  1823. {
  1824. scsi_device_resume(sdev);
  1825. }
  1826. void
  1827. scsi_target_resume(struct scsi_target *starget)
  1828. {
  1829. starget_for_each_device(starget, NULL, device_resume_fn);
  1830. }
  1831. EXPORT_SYMBOL(scsi_target_resume);
  1832. /**
  1833. * scsi_internal_device_block - internal function to put a device
  1834. * temporarily into the SDEV_BLOCK state
  1835. * @sdev: device to block
  1836. *
  1837. * Block request made by scsi lld's to temporarily stop all
  1838. * scsi commands on the specified device. Called from interrupt
  1839. * or normal process context.
  1840. *
  1841. * Returns zero if successful or error if not
  1842. *
  1843. * Notes:
  1844. * This routine transitions the device to the SDEV_BLOCK state
  1845. * (which must be a legal transition). When the device is in this
  1846. * state, all commands are deferred until the scsi lld reenables
  1847. * the device with scsi_device_unblock or device_block_tmo fires.
  1848. * This routine assumes the host_lock is held on entry.
  1849. **/
  1850. int
  1851. scsi_internal_device_block(struct scsi_device *sdev)
  1852. {
  1853. request_queue_t *q = sdev->request_queue;
  1854. unsigned long flags;
  1855. int err = 0;
  1856. err = scsi_device_set_state(sdev, SDEV_BLOCK);
  1857. if (err)
  1858. return err;
  1859. /*
  1860. * The device has transitioned to SDEV_BLOCK. Stop the
  1861. * block layer from calling the midlayer with this device's
  1862. * request queue.
  1863. */
  1864. spin_lock_irqsave(q->queue_lock, flags);
  1865. blk_stop_queue(q);
  1866. spin_unlock_irqrestore(q->queue_lock, flags);
  1867. return 0;
  1868. }
  1869. EXPORT_SYMBOL_GPL(scsi_internal_device_block);
  1870. /**
  1871. * scsi_internal_device_unblock - resume a device after a block request
  1872. * @sdev: device to resume
  1873. *
  1874. * Called by scsi lld's or the midlayer to restart the device queue
  1875. * for the previously suspended scsi device. Called from interrupt or
  1876. * normal process context.
  1877. *
  1878. * Returns zero if successful or error if not.
  1879. *
  1880. * Notes:
  1881. * This routine transitions the device to the SDEV_RUNNING state
  1882. * (which must be a legal transition) allowing the midlayer to
  1883. * goose the queue for this device. This routine assumes the
  1884. * host_lock is held upon entry.
  1885. **/
  1886. int
  1887. scsi_internal_device_unblock(struct scsi_device *sdev)
  1888. {
  1889. request_queue_t *q = sdev->request_queue;
  1890. int err;
  1891. unsigned long flags;
  1892. /*
  1893. * Try to transition the scsi device to SDEV_RUNNING
  1894. * and goose the device queue if successful.
  1895. */
  1896. err = scsi_device_set_state(sdev, SDEV_RUNNING);
  1897. if (err)
  1898. return err;
  1899. spin_lock_irqsave(q->queue_lock, flags);
  1900. blk_start_queue(q);
  1901. spin_unlock_irqrestore(q->queue_lock, flags);
  1902. return 0;
  1903. }
  1904. EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
  1905. static void
  1906. device_block(struct scsi_device *sdev, void *data)
  1907. {
  1908. scsi_internal_device_block(sdev);
  1909. }
  1910. static int
  1911. target_block(struct device *dev, void *data)
  1912. {
  1913. if (scsi_is_target_device(dev))
  1914. starget_for_each_device(to_scsi_target(dev), NULL,
  1915. device_block);
  1916. return 0;
  1917. }
  1918. void
  1919. scsi_target_block(struct device *dev)
  1920. {
  1921. if (scsi_is_target_device(dev))
  1922. starget_for_each_device(to_scsi_target(dev), NULL,
  1923. device_block);
  1924. else
  1925. device_for_each_child(dev, NULL, target_block);
  1926. }
  1927. EXPORT_SYMBOL_GPL(scsi_target_block);
  1928. static void
  1929. device_unblock(struct scsi_device *sdev, void *data)
  1930. {
  1931. scsi_internal_device_unblock(sdev);
  1932. }
  1933. static int
  1934. target_unblock(struct device *dev, void *data)
  1935. {
  1936. if (scsi_is_target_device(dev))
  1937. starget_for_each_device(to_scsi_target(dev), NULL,
  1938. device_unblock);
  1939. return 0;
  1940. }
  1941. void
  1942. scsi_target_unblock(struct device *dev)
  1943. {
  1944. if (scsi_is_target_device(dev))
  1945. starget_for_each_device(to_scsi_target(dev), NULL,
  1946. device_unblock);
  1947. else
  1948. device_for_each_child(dev, NULL, target_unblock);
  1949. }
  1950. EXPORT_SYMBOL_GPL(scsi_target_unblock);
  1951. /**
  1952. * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
  1953. * @sg: scatter-gather list
  1954. * @sg_count: number of segments in sg
  1955. * @offset: offset in bytes into sg, on return offset into the mapped area
  1956. * @len: bytes to map, on return number of bytes mapped
  1957. *
  1958. * Returns virtual address of the start of the mapped page
  1959. */
  1960. void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
  1961. size_t *offset, size_t *len)
  1962. {
  1963. int i;
  1964. size_t sg_len = 0, len_complete = 0;
  1965. struct page *page;
  1966. for (i = 0; i < sg_count; i++) {
  1967. len_complete = sg_len; /* Complete sg-entries */
  1968. sg_len += sg[i].length;
  1969. if (sg_len > *offset)
  1970. break;
  1971. }
  1972. if (unlikely(i == sg_count)) {
  1973. printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
  1974. "elements %d\n",
  1975. __FUNCTION__, sg_len, *offset, sg_count);
  1976. WARN_ON(1);
  1977. return NULL;
  1978. }
  1979. /* Offset starting from the beginning of first page in this sg-entry */
  1980. *offset = *offset - len_complete + sg[i].offset;
  1981. /* Assumption: contiguous pages can be accessed as "page + i" */
  1982. page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT));
  1983. *offset &= ~PAGE_MASK;
  1984. /* Bytes in this sg-entry from *offset to the end of the page */
  1985. sg_len = PAGE_SIZE - *offset;
  1986. if (*len > sg_len)
  1987. *len = sg_len;
  1988. return kmap_atomic(page, KM_BIO_SRC_IRQ);
  1989. }
  1990. EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  1991. /**
  1992. * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously
  1993. * mapped with scsi_kmap_atomic_sg
  1994. * @virt: virtual address to be unmapped
  1995. */
  1996. void scsi_kunmap_atomic_sg(void *virt)
  1997. {
  1998. kunmap_atomic(virt, KM_BIO_SRC_IRQ);
  1999. }
  2000. EXPORT_SYMBOL(scsi_kunmap_atomic_sg);