scsi_lib.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557
  1. /*
  2. * scsi_lib.c Copyright (C) 1999 Eric Youngdale
  3. *
  4. * SCSI queueing library.
  5. * Initial versions: Eric Youngdale (eric@andante.org).
  6. * Based upon conversations with large numbers
  7. * of people at Linux Expo.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/bitops.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/completion.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mempool.h>
  15. #include <linux/slab.h>
  16. #include <linux/init.h>
  17. #include <linux/pci.h>
  18. #include <linux/delay.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/scatterlist.h>
  21. #include <scsi/scsi.h>
  22. #include <scsi/scsi_cmnd.h>
  23. #include <scsi/scsi_dbg.h>
  24. #include <scsi/scsi_device.h>
  25. #include <scsi/scsi_driver.h>
  26. #include <scsi/scsi_eh.h>
  27. #include <scsi/scsi_host.h>
  28. #include "scsi_priv.h"
  29. #include "scsi_logging.h"
  30. #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
  31. #define SG_MEMPOOL_SIZE 2
  32. struct scsi_host_sg_pool {
  33. size_t size;
  34. char *name;
  35. struct kmem_cache *slab;
  36. mempool_t *pool;
  37. };
  38. #define SP(x) { x, "sgpool-" __stringify(x) }
  39. #if (SCSI_MAX_SG_SEGMENTS < 32)
  40. #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
  41. #endif
  42. static struct scsi_host_sg_pool scsi_sg_pools[] = {
  43. SP(8),
  44. SP(16),
  45. #if (SCSI_MAX_SG_SEGMENTS > 32)
  46. SP(32),
  47. #if (SCSI_MAX_SG_SEGMENTS > 64)
  48. SP(64),
  49. #if (SCSI_MAX_SG_SEGMENTS > 128)
  50. SP(128),
  51. #if (SCSI_MAX_SG_SEGMENTS > 256)
  52. #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
  53. #endif
  54. #endif
  55. #endif
  56. #endif
  57. SP(SCSI_MAX_SG_SEGMENTS)
  58. };
  59. #undef SP
  60. struct kmem_cache *scsi_sdb_cache;
  61. static void scsi_run_queue(struct request_queue *q);
  62. /*
  63. * Function: scsi_unprep_request()
  64. *
  65. * Purpose: Remove all preparation done for a request, including its
  66. * associated scsi_cmnd, so that it can be requeued.
  67. *
  68. * Arguments: req - request to unprepare
  69. *
  70. * Lock status: Assumed that no locks are held upon entry.
  71. *
  72. * Returns: Nothing.
  73. */
  74. static void scsi_unprep_request(struct request *req)
  75. {
  76. struct scsi_cmnd *cmd = req->special;
  77. blk_unprep_request(req);
  78. req->special = NULL;
  79. scsi_put_command(cmd);
  80. }
  81. /**
  82. * __scsi_queue_insert - private queue insertion
  83. * @cmd: The SCSI command being requeued
  84. * @reason: The reason for the requeue
  85. * @unbusy: Whether the queue should be unbusied
  86. *
  87. * This is a private queue insertion. The public interface
  88. * scsi_queue_insert() always assumes the queue should be unbusied
  89. * because it's always called before the completion. This function is
  90. * for a requeue after completion, which should only occur in this
  91. * file.
  92. */
  93. static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
  94. {
  95. struct Scsi_Host *host = cmd->device->host;
  96. struct scsi_device *device = cmd->device;
  97. struct scsi_target *starget = scsi_target(device);
  98. struct request_queue *q = device->request_queue;
  99. unsigned long flags;
  100. SCSI_LOG_MLQUEUE(1,
  101. printk("Inserting command %p into mlqueue\n", cmd));
  102. /*
  103. * Set the appropriate busy bit for the device/host.
  104. *
  105. * If the host/device isn't busy, assume that something actually
  106. * completed, and that we should be able to queue a command now.
  107. *
  108. * Note that the prior mid-layer assumption that any host could
  109. * always queue at least one command is now broken. The mid-layer
  110. * will implement a user specifiable stall (see
  111. * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  112. * if a command is requeued with no other commands outstanding
  113. * either for the device or for the host.
  114. */
  115. switch (reason) {
  116. case SCSI_MLQUEUE_HOST_BUSY:
  117. host->host_blocked = host->max_host_blocked;
  118. break;
  119. case SCSI_MLQUEUE_DEVICE_BUSY:
  120. device->device_blocked = device->max_device_blocked;
  121. break;
  122. case SCSI_MLQUEUE_TARGET_BUSY:
  123. starget->target_blocked = starget->max_target_blocked;
  124. break;
  125. }
  126. /*
  127. * Decrement the counters, since these commands are no longer
  128. * active on the host/device.
  129. */
  130. if (unbusy)
  131. scsi_device_unbusy(device);
  132. /*
  133. * Requeue this command. It will go before all other commands
  134. * that are already in the queue.
  135. *
  136. * NOTE: there is magic here about the way the queue is plugged if
  137. * we have no outstanding commands.
  138. *
  139. * Although we *don't* plug the queue, we call the request
  140. * function. The SCSI request function detects the blocked condition
  141. * and plugs the queue appropriately.
  142. */
  143. spin_lock_irqsave(q->queue_lock, flags);
  144. blk_requeue_request(q, cmd->request);
  145. spin_unlock_irqrestore(q->queue_lock, flags);
  146. scsi_run_queue(q);
  147. return 0;
  148. }
  149. /*
  150. * Function: scsi_queue_insert()
  151. *
  152. * Purpose: Insert a command in the midlevel queue.
  153. *
  154. * Arguments: cmd - command that we are adding to queue.
  155. * reason - why we are inserting command to queue.
  156. *
  157. * Lock status: Assumed that lock is not held upon entry.
  158. *
  159. * Returns: Nothing.
  160. *
  161. * Notes: We do this for one of two cases. Either the host is busy
  162. * and it cannot accept any more commands for the time being,
  163. * or the device returned QUEUE_FULL and can accept no more
  164. * commands.
  165. * Notes: This could be called either from an interrupt context or a
  166. * normal process context.
  167. */
  168. int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  169. {
  170. return __scsi_queue_insert(cmd, reason, 1);
  171. }
  172. /**
  173. * scsi_execute - insert request and wait for the result
  174. * @sdev: scsi device
  175. * @cmd: scsi command
  176. * @data_direction: data direction
  177. * @buffer: data buffer
  178. * @bufflen: len of buffer
  179. * @sense: optional sense buffer
  180. * @timeout: request timeout in seconds
  181. * @retries: number of times to retry request
  182. * @flags: or into request flags;
  183. * @resid: optional residual length
  184. *
  185. * returns the req->errors value which is the scsi_cmnd result
  186. * field.
  187. */
  188. int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
  189. int data_direction, void *buffer, unsigned bufflen,
  190. unsigned char *sense, int timeout, int retries, int flags,
  191. int *resid)
  192. {
  193. struct request *req;
  194. int write = (data_direction == DMA_TO_DEVICE);
  195. int ret = DRIVER_ERROR << 24;
  196. req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
  197. if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
  198. buffer, bufflen, __GFP_WAIT))
  199. goto out;
  200. req->cmd_len = COMMAND_SIZE(cmd[0]);
  201. memcpy(req->cmd, cmd, req->cmd_len);
  202. req->sense = sense;
  203. req->sense_len = 0;
  204. req->retries = retries;
  205. req->timeout = timeout;
  206. req->cmd_type = REQ_TYPE_BLOCK_PC;
  207. req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
  208. /*
  209. * head injection *required* here otherwise quiesce won't work
  210. */
  211. blk_execute_rq(req->q, NULL, req, 1);
  212. /*
  213. * Some devices (USB mass-storage in particular) may transfer
  214. * garbage data together with a residue indicating that the data
  215. * is invalid. Prevent the garbage from being misinterpreted
  216. * and prevent security leaks by zeroing out the excess data.
  217. */
  218. if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
  219. memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
  220. if (resid)
  221. *resid = req->resid_len;
  222. ret = req->errors;
  223. out:
  224. blk_put_request(req);
  225. return ret;
  226. }
  227. EXPORT_SYMBOL(scsi_execute);
  228. int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
  229. int data_direction, void *buffer, unsigned bufflen,
  230. struct scsi_sense_hdr *sshdr, int timeout, int retries,
  231. int *resid)
  232. {
  233. char *sense = NULL;
  234. int result;
  235. if (sshdr) {
  236. sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
  237. if (!sense)
  238. return DRIVER_ERROR << 24;
  239. }
  240. result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
  241. sense, timeout, retries, 0, resid);
  242. if (sshdr)
  243. scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
  244. kfree(sense);
  245. return result;
  246. }
  247. EXPORT_SYMBOL(scsi_execute_req);
  248. /*
  249. * Function: scsi_init_cmd_errh()
  250. *
  251. * Purpose: Initialize cmd fields related to error handling.
  252. *
  253. * Arguments: cmd - command that is ready to be queued.
  254. *
  255. * Notes: This function has the job of initializing a number of
  256. * fields related to error handling. Typically this will
  257. * be called once for each command, as required.
  258. */
  259. static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
  260. {
  261. cmd->serial_number = 0;
  262. scsi_set_resid(cmd, 0);
  263. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  264. if (cmd->cmd_len == 0)
  265. cmd->cmd_len = scsi_command_size(cmd->cmnd);
  266. }
  267. void scsi_device_unbusy(struct scsi_device *sdev)
  268. {
  269. struct Scsi_Host *shost = sdev->host;
  270. struct scsi_target *starget = scsi_target(sdev);
  271. unsigned long flags;
  272. spin_lock_irqsave(shost->host_lock, flags);
  273. shost->host_busy--;
  274. starget->target_busy--;
  275. if (unlikely(scsi_host_in_recovery(shost) &&
  276. (shost->host_failed || shost->host_eh_scheduled)))
  277. scsi_eh_wakeup(shost);
  278. spin_unlock(shost->host_lock);
  279. spin_lock(sdev->request_queue->queue_lock);
  280. sdev->device_busy--;
  281. spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
  282. }
  283. /*
  284. * Called for single_lun devices on IO completion. Clear starget_sdev_user,
  285. * and call blk_run_queue for all the scsi_devices on the target -
  286. * including current_sdev first.
  287. *
  288. * Called with *no* scsi locks held.
  289. */
  290. static void scsi_single_lun_run(struct scsi_device *current_sdev)
  291. {
  292. struct Scsi_Host *shost = current_sdev->host;
  293. struct scsi_device *sdev, *tmp;
  294. struct scsi_target *starget = scsi_target(current_sdev);
  295. unsigned long flags;
  296. spin_lock_irqsave(shost->host_lock, flags);
  297. starget->starget_sdev_user = NULL;
  298. spin_unlock_irqrestore(shost->host_lock, flags);
  299. /*
  300. * Call blk_run_queue for all LUNs on the target, starting with
  301. * current_sdev. We race with others (to set starget_sdev_user),
  302. * but in most cases, we will be first. Ideally, each LU on the
  303. * target would get some limited time or requests on the target.
  304. */
  305. blk_run_queue(current_sdev->request_queue);
  306. spin_lock_irqsave(shost->host_lock, flags);
  307. if (starget->starget_sdev_user)
  308. goto out;
  309. list_for_each_entry_safe(sdev, tmp, &starget->devices,
  310. same_target_siblings) {
  311. if (sdev == current_sdev)
  312. continue;
  313. if (scsi_device_get(sdev))
  314. continue;
  315. spin_unlock_irqrestore(shost->host_lock, flags);
  316. blk_run_queue(sdev->request_queue);
  317. spin_lock_irqsave(shost->host_lock, flags);
  318. scsi_device_put(sdev);
  319. }
  320. out:
  321. spin_unlock_irqrestore(shost->host_lock, flags);
  322. }
  323. static inline int scsi_device_is_busy(struct scsi_device *sdev)
  324. {
  325. if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
  326. return 1;
  327. return 0;
  328. }
  329. static inline int scsi_target_is_busy(struct scsi_target *starget)
  330. {
  331. return ((starget->can_queue > 0 &&
  332. starget->target_busy >= starget->can_queue) ||
  333. starget->target_blocked);
  334. }
  335. static inline int scsi_host_is_busy(struct Scsi_Host *shost)
  336. {
  337. if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
  338. shost->host_blocked || shost->host_self_blocked)
  339. return 1;
  340. return 0;
  341. }
  342. /*
  343. * Function: scsi_run_queue()
  344. *
  345. * Purpose: Select a proper request queue to serve next
  346. *
  347. * Arguments: q - last request's queue
  348. *
  349. * Returns: Nothing
  350. *
  351. * Notes: The previous command was completely finished, start
  352. * a new one if possible.
  353. */
  354. static void scsi_run_queue(struct request_queue *q)
  355. {
  356. struct scsi_device *sdev = q->queuedata;
  357. struct Scsi_Host *shost = sdev->host;
  358. LIST_HEAD(starved_list);
  359. unsigned long flags;
  360. if (scsi_target(sdev)->single_lun)
  361. scsi_single_lun_run(sdev);
  362. spin_lock_irqsave(shost->host_lock, flags);
  363. list_splice_init(&shost->starved_list, &starved_list);
  364. while (!list_empty(&starved_list)) {
  365. int flagset;
  366. /*
  367. * As long as shost is accepting commands and we have
  368. * starved queues, call blk_run_queue. scsi_request_fn
  369. * drops the queue_lock and can add us back to the
  370. * starved_list.
  371. *
  372. * host_lock protects the starved_list and starved_entry.
  373. * scsi_request_fn must get the host_lock before checking
  374. * or modifying starved_list or starved_entry.
  375. */
  376. if (scsi_host_is_busy(shost))
  377. break;
  378. sdev = list_entry(starved_list.next,
  379. struct scsi_device, starved_entry);
  380. list_del_init(&sdev->starved_entry);
  381. if (scsi_target_is_busy(scsi_target(sdev))) {
  382. list_move_tail(&sdev->starved_entry,
  383. &shost->starved_list);
  384. continue;
  385. }
  386. spin_unlock(shost->host_lock);
  387. spin_lock(sdev->request_queue->queue_lock);
  388. flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
  389. !test_bit(QUEUE_FLAG_REENTER,
  390. &sdev->request_queue->queue_flags);
  391. if (flagset)
  392. queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
  393. __blk_run_queue(sdev->request_queue);
  394. if (flagset)
  395. queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
  396. spin_unlock(sdev->request_queue->queue_lock);
  397. spin_lock(shost->host_lock);
  398. }
  399. /* put any unprocessed entries back */
  400. list_splice(&starved_list, &shost->starved_list);
  401. spin_unlock_irqrestore(shost->host_lock, flags);
  402. blk_run_queue(q);
  403. }
  404. /*
  405. * Function: scsi_requeue_command()
  406. *
  407. * Purpose: Handle post-processing of completed commands.
  408. *
  409. * Arguments: q - queue to operate on
  410. * cmd - command that may need to be requeued.
  411. *
  412. * Returns: Nothing
  413. *
  414. * Notes: After command completion, there may be blocks left
  415. * over which weren't finished by the previous command
  416. * this can be for a number of reasons - the main one is
  417. * I/O errors in the middle of the request, in which case
  418. * we need to request the blocks that come after the bad
  419. * sector.
  420. * Notes: Upon return, cmd is a stale pointer.
  421. */
  422. static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
  423. {
  424. struct request *req = cmd->request;
  425. unsigned long flags;
  426. spin_lock_irqsave(q->queue_lock, flags);
  427. scsi_unprep_request(req);
  428. blk_requeue_request(q, req);
  429. spin_unlock_irqrestore(q->queue_lock, flags);
  430. scsi_run_queue(q);
  431. }
  432. void scsi_next_command(struct scsi_cmnd *cmd)
  433. {
  434. struct scsi_device *sdev = cmd->device;
  435. struct request_queue *q = sdev->request_queue;
  436. /* need to hold a reference on the device before we let go of the cmd */
  437. get_device(&sdev->sdev_gendev);
  438. scsi_put_command(cmd);
  439. scsi_run_queue(q);
  440. /* ok to remove device now */
  441. put_device(&sdev->sdev_gendev);
  442. }
  443. void scsi_run_host_queues(struct Scsi_Host *shost)
  444. {
  445. struct scsi_device *sdev;
  446. shost_for_each_device(sdev, shost)
  447. scsi_run_queue(sdev->request_queue);
  448. }
  449. static void __scsi_release_buffers(struct scsi_cmnd *, int);
  450. /*
  451. * Function: scsi_end_request()
  452. *
  453. * Purpose: Post-processing of completed commands (usually invoked at end
  454. * of upper level post-processing and scsi_io_completion).
  455. *
  456. * Arguments: cmd - command that is complete.
  457. * error - 0 if I/O indicates success, < 0 for I/O error.
  458. * bytes - number of bytes of completed I/O
  459. * requeue - indicates whether we should requeue leftovers.
  460. *
  461. * Lock status: Assumed that lock is not held upon entry.
  462. *
  463. * Returns: cmd if requeue required, NULL otherwise.
  464. *
  465. * Notes: This is called for block device requests in order to
  466. * mark some number of sectors as complete.
  467. *
  468. * We are guaranteeing that the request queue will be goosed
  469. * at some point during this call.
  470. * Notes: If cmd was requeued, upon return it will be a stale pointer.
  471. */
  472. static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
  473. int bytes, int requeue)
  474. {
  475. struct request_queue *q = cmd->device->request_queue;
  476. struct request *req = cmd->request;
  477. /*
  478. * If there are blocks left over at the end, set up the command
  479. * to queue the remainder of them.
  480. */
  481. if (blk_end_request(req, error, bytes)) {
  482. /* kill remainder if no retrys */
  483. if (error && scsi_noretry_cmd(cmd))
  484. blk_end_request_all(req, error);
  485. else {
  486. if (requeue) {
  487. /*
  488. * Bleah. Leftovers again. Stick the
  489. * leftovers in the front of the
  490. * queue, and goose the queue again.
  491. */
  492. scsi_release_buffers(cmd);
  493. scsi_requeue_command(q, cmd);
  494. cmd = NULL;
  495. }
  496. return cmd;
  497. }
  498. }
  499. /*
  500. * This will goose the queue request function at the end, so we don't
  501. * need to worry about launching another command.
  502. */
  503. __scsi_release_buffers(cmd, 0);
  504. scsi_next_command(cmd);
  505. return NULL;
  506. }
  507. static inline unsigned int scsi_sgtable_index(unsigned short nents)
  508. {
  509. unsigned int index;
  510. BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
  511. if (nents <= 8)
  512. index = 0;
  513. else
  514. index = get_count_order(nents) - 3;
  515. return index;
  516. }
  517. static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
  518. {
  519. struct scsi_host_sg_pool *sgp;
  520. sgp = scsi_sg_pools + scsi_sgtable_index(nents);
  521. mempool_free(sgl, sgp->pool);
  522. }
  523. static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
  524. {
  525. struct scsi_host_sg_pool *sgp;
  526. sgp = scsi_sg_pools + scsi_sgtable_index(nents);
  527. return mempool_alloc(sgp->pool, gfp_mask);
  528. }
  529. static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
  530. gfp_t gfp_mask)
  531. {
  532. int ret;
  533. BUG_ON(!nents);
  534. ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
  535. gfp_mask, scsi_sg_alloc);
  536. if (unlikely(ret))
  537. __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
  538. scsi_sg_free);
  539. return ret;
  540. }
  541. static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
  542. {
  543. __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
  544. }
  545. static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
  546. {
  547. if (cmd->sdb.table.nents)
  548. scsi_free_sgtable(&cmd->sdb);
  549. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  550. if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
  551. struct scsi_data_buffer *bidi_sdb =
  552. cmd->request->next_rq->special;
  553. scsi_free_sgtable(bidi_sdb);
  554. kmem_cache_free(scsi_sdb_cache, bidi_sdb);
  555. cmd->request->next_rq->special = NULL;
  556. }
  557. if (scsi_prot_sg_count(cmd))
  558. scsi_free_sgtable(cmd->prot_sdb);
  559. }
  560. /*
  561. * Function: scsi_release_buffers()
  562. *
  563. * Purpose: Completion processing for block device I/O requests.
  564. *
  565. * Arguments: cmd - command that we are bailing.
  566. *
  567. * Lock status: Assumed that no lock is held upon entry.
  568. *
  569. * Returns: Nothing
  570. *
  571. * Notes: In the event that an upper level driver rejects a
  572. * command, we must release resources allocated during
  573. * the __init_io() function. Primarily this would involve
  574. * the scatter-gather table, and potentially any bounce
  575. * buffers.
  576. */
  577. void scsi_release_buffers(struct scsi_cmnd *cmd)
  578. {
  579. __scsi_release_buffers(cmd, 1);
  580. }
  581. EXPORT_SYMBOL(scsi_release_buffers);
  582. /*
  583. * Function: scsi_io_completion()
  584. *
  585. * Purpose: Completion processing for block device I/O requests.
  586. *
  587. * Arguments: cmd - command that is finished.
  588. *
  589. * Lock status: Assumed that no lock is held upon entry.
  590. *
  591. * Returns: Nothing
  592. *
  593. * Notes: This function is matched in terms of capabilities to
  594. * the function that created the scatter-gather list.
  595. * In other words, if there are no bounce buffers
  596. * (the normal case for most drivers), we don't need
  597. * the logic to deal with cleaning up afterwards.
  598. *
  599. * We must call scsi_end_request(). This will finish off
  600. * the specified number of sectors. If we are done, the
  601. * command block will be released and the queue function
  602. * will be goosed. If we are not done then we have to
  603. * figure out what to do next:
  604. *
  605. * a) We can call scsi_requeue_command(). The request
  606. * will be unprepared and put back on the queue. Then
  607. * a new command will be created for it. This should
  608. * be used if we made forward progress, or if we want
  609. * to switch from READ(10) to READ(6) for example.
  610. *
  611. * b) We can call scsi_queue_insert(). The request will
  612. * be put back on the queue and retried using the same
  613. * command as before, possibly after a delay.
  614. *
  615. * c) We can call blk_end_request() with -EIO to fail
  616. * the remainder of the request.
  617. */
  618. void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  619. {
  620. int result = cmd->result;
  621. struct request_queue *q = cmd->device->request_queue;
  622. struct request *req = cmd->request;
  623. int error = 0;
  624. struct scsi_sense_hdr sshdr;
  625. int sense_valid = 0;
  626. int sense_deferred = 0;
  627. enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
  628. ACTION_DELAYED_RETRY} action;
  629. char *description = NULL;
  630. if (result) {
  631. sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
  632. if (sense_valid)
  633. sense_deferred = scsi_sense_is_deferred(&sshdr);
  634. }
  635. if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
  636. req->errors = result;
  637. if (result) {
  638. if (sense_valid && req->sense) {
  639. /*
  640. * SG_IO wants current and deferred errors
  641. */
  642. int len = 8 + cmd->sense_buffer[7];
  643. if (len > SCSI_SENSE_BUFFERSIZE)
  644. len = SCSI_SENSE_BUFFERSIZE;
  645. memcpy(req->sense, cmd->sense_buffer, len);
  646. req->sense_len = len;
  647. }
  648. if (!sense_deferred)
  649. error = -EIO;
  650. }
  651. req->resid_len = scsi_get_resid(cmd);
  652. if (scsi_bidi_cmnd(cmd)) {
  653. /*
  654. * Bidi commands Must be complete as a whole,
  655. * both sides at once.
  656. */
  657. req->next_rq->resid_len = scsi_in(cmd)->resid;
  658. scsi_release_buffers(cmd);
  659. blk_end_request_all(req, 0);
  660. scsi_next_command(cmd);
  661. return;
  662. }
  663. }
  664. /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
  665. BUG_ON(blk_bidi_rq(req));
  666. /*
  667. * Next deal with any sectors which we were able to correctly
  668. * handle.
  669. */
  670. SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
  671. "%d bytes done.\n",
  672. blk_rq_sectors(req), good_bytes));
  673. /*
  674. * Recovered errors need reporting, but they're always treated
  675. * as success, so fiddle the result code here. For BLOCK_PC
  676. * we already took a copy of the original into rq->errors which
  677. * is what gets returned to the user
  678. */
  679. if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
  680. /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
  681. * print since caller wants ATA registers. Only occurs on
  682. * SCSI ATA PASS_THROUGH commands when CK_COND=1
  683. */
  684. if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
  685. ;
  686. else if (!(req->cmd_flags & REQ_QUIET))
  687. scsi_print_sense("", cmd);
  688. result = 0;
  689. /* BLOCK_PC may have set error */
  690. error = 0;
  691. }
  692. /*
  693. * A number of bytes were successfully read. If there
  694. * are leftovers and there is some kind of error
  695. * (result != 0), retry the rest.
  696. */
  697. if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
  698. return;
  699. error = -EIO;
  700. if (host_byte(result) == DID_RESET) {
  701. /* Third party bus reset or reset for error recovery
  702. * reasons. Just retry the command and see what
  703. * happens.
  704. */
  705. action = ACTION_RETRY;
  706. } else if (sense_valid && !sense_deferred) {
  707. switch (sshdr.sense_key) {
  708. case UNIT_ATTENTION:
  709. if (cmd->device->removable) {
  710. /* Detected disc change. Set a bit
  711. * and quietly refuse further access.
  712. */
  713. cmd->device->changed = 1;
  714. description = "Media Changed";
  715. action = ACTION_FAIL;
  716. } else {
  717. /* Must have been a power glitch, or a
  718. * bus reset. Could not have been a
  719. * media change, so we just retry the
  720. * command and see what happens.
  721. */
  722. action = ACTION_RETRY;
  723. }
  724. break;
  725. case ILLEGAL_REQUEST:
  726. /* If we had an ILLEGAL REQUEST returned, then
  727. * we may have performed an unsupported
  728. * command. The only thing this should be
  729. * would be a ten byte read where only a six
  730. * byte read was supported. Also, on a system
  731. * where READ CAPACITY failed, we may have
  732. * read past the end of the disk.
  733. */
  734. if ((cmd->device->use_10_for_rw &&
  735. sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
  736. (cmd->cmnd[0] == READ_10 ||
  737. cmd->cmnd[0] == WRITE_10)) {
  738. /* This will issue a new 6-byte command. */
  739. cmd->device->use_10_for_rw = 0;
  740. action = ACTION_REPREP;
  741. } else if (sshdr.asc == 0x10) /* DIX */ {
  742. description = "Host Data Integrity Failure";
  743. action = ACTION_FAIL;
  744. error = -EILSEQ;
  745. } else
  746. action = ACTION_FAIL;
  747. break;
  748. case ABORTED_COMMAND:
  749. action = ACTION_FAIL;
  750. if (sshdr.asc == 0x10) { /* DIF */
  751. description = "Target Data Integrity Failure";
  752. error = -EILSEQ;
  753. }
  754. break;
  755. case NOT_READY:
  756. /* If the device is in the process of becoming
  757. * ready, or has a temporary blockage, retry.
  758. */
  759. if (sshdr.asc == 0x04) {
  760. switch (sshdr.ascq) {
  761. case 0x01: /* becoming ready */
  762. case 0x04: /* format in progress */
  763. case 0x05: /* rebuild in progress */
  764. case 0x06: /* recalculation in progress */
  765. case 0x07: /* operation in progress */
  766. case 0x08: /* Long write in progress */
  767. case 0x09: /* self test in progress */
  768. case 0x14: /* space allocation in progress */
  769. action = ACTION_DELAYED_RETRY;
  770. break;
  771. default:
  772. description = "Device not ready";
  773. action = ACTION_FAIL;
  774. break;
  775. }
  776. } else {
  777. description = "Device not ready";
  778. action = ACTION_FAIL;
  779. }
  780. break;
  781. case VOLUME_OVERFLOW:
  782. /* See SSC3rXX or current. */
  783. action = ACTION_FAIL;
  784. break;
  785. default:
  786. description = "Unhandled sense code";
  787. action = ACTION_FAIL;
  788. break;
  789. }
  790. } else {
  791. description = "Unhandled error code";
  792. action = ACTION_FAIL;
  793. }
  794. switch (action) {
  795. case ACTION_FAIL:
  796. /* Give up and fail the remainder of the request */
  797. scsi_release_buffers(cmd);
  798. if (!(req->cmd_flags & REQ_QUIET)) {
  799. if (description)
  800. scmd_printk(KERN_INFO, cmd, "%s\n",
  801. description);
  802. scsi_print_result(cmd);
  803. if (driver_byte(result) & DRIVER_SENSE)
  804. scsi_print_sense("", cmd);
  805. scsi_print_command(cmd);
  806. }
  807. if (blk_end_request_err(req, error))
  808. scsi_requeue_command(q, cmd);
  809. else
  810. scsi_next_command(cmd);
  811. break;
  812. case ACTION_REPREP:
  813. /* Unprep the request and put it back at the head of the queue.
  814. * A new command will be prepared and issued.
  815. */
  816. scsi_release_buffers(cmd);
  817. scsi_requeue_command(q, cmd);
  818. break;
  819. case ACTION_RETRY:
  820. /* Retry the same command immediately */
  821. __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
  822. break;
  823. case ACTION_DELAYED_RETRY:
  824. /* Retry the same command after a delay */
  825. __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
  826. break;
  827. }
  828. }
  829. static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
  830. gfp_t gfp_mask)
  831. {
  832. int count;
  833. /*
  834. * If sg table allocation fails, requeue request later.
  835. */
  836. if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
  837. gfp_mask))) {
  838. return BLKPREP_DEFER;
  839. }
  840. req->buffer = NULL;
  841. /*
  842. * Next, walk the list, and fill in the addresses and sizes of
  843. * each segment.
  844. */
  845. count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
  846. BUG_ON(count > sdb->table.nents);
  847. sdb->table.nents = count;
  848. sdb->length = blk_rq_bytes(req);
  849. return BLKPREP_OK;
  850. }
  851. /*
  852. * Function: scsi_init_io()
  853. *
  854. * Purpose: SCSI I/O initialize function.
  855. *
  856. * Arguments: cmd - Command descriptor we wish to initialize
  857. *
  858. * Returns: 0 on success
  859. * BLKPREP_DEFER if the failure is retryable
  860. * BLKPREP_KILL if the failure is fatal
  861. */
  862. int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
  863. {
  864. struct request *rq = cmd->request;
  865. int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
  866. if (error)
  867. goto err_exit;
  868. if (blk_bidi_rq(rq)) {
  869. struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
  870. scsi_sdb_cache, GFP_ATOMIC);
  871. if (!bidi_sdb) {
  872. error = BLKPREP_DEFER;
  873. goto err_exit;
  874. }
  875. rq->next_rq->special = bidi_sdb;
  876. error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
  877. if (error)
  878. goto err_exit;
  879. }
  880. if (blk_integrity_rq(rq)) {
  881. struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
  882. int ivecs, count;
  883. BUG_ON(prot_sdb == NULL);
  884. ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
  885. if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
  886. error = BLKPREP_DEFER;
  887. goto err_exit;
  888. }
  889. count = blk_rq_map_integrity_sg(rq->q, rq->bio,
  890. prot_sdb->table.sgl);
  891. BUG_ON(unlikely(count > ivecs));
  892. BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
  893. cmd->prot_sdb = prot_sdb;
  894. cmd->prot_sdb->table.nents = count;
  895. }
  896. return BLKPREP_OK ;
  897. err_exit:
  898. scsi_release_buffers(cmd);
  899. cmd->request->special = NULL;
  900. scsi_put_command(cmd);
  901. return error;
  902. }
  903. EXPORT_SYMBOL(scsi_init_io);
  904. static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
  905. struct request *req)
  906. {
  907. struct scsi_cmnd *cmd;
  908. if (!req->special) {
  909. cmd = scsi_get_command(sdev, GFP_ATOMIC);
  910. if (unlikely(!cmd))
  911. return NULL;
  912. req->special = cmd;
  913. } else {
  914. cmd = req->special;
  915. }
  916. /* pull a tag out of the request if we have one */
  917. cmd->tag = req->tag;
  918. cmd->request = req;
  919. cmd->cmnd = req->cmd;
  920. return cmd;
  921. }
  922. int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
  923. {
  924. struct scsi_cmnd *cmd;
  925. int ret = scsi_prep_state_check(sdev, req);
  926. if (ret != BLKPREP_OK)
  927. return ret;
  928. cmd = scsi_get_cmd_from_req(sdev, req);
  929. if (unlikely(!cmd))
  930. return BLKPREP_DEFER;
  931. /*
  932. * BLOCK_PC requests may transfer data, in which case they must
  933. * a bio attached to them. Or they might contain a SCSI command
  934. * that does not transfer data, in which case they may optionally
  935. * submit a request without an attached bio.
  936. */
  937. if (req->bio) {
  938. int ret;
  939. BUG_ON(!req->nr_phys_segments);
  940. ret = scsi_init_io(cmd, GFP_ATOMIC);
  941. if (unlikely(ret))
  942. return ret;
  943. } else {
  944. BUG_ON(blk_rq_bytes(req));
  945. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  946. req->buffer = NULL;
  947. }
  948. cmd->cmd_len = req->cmd_len;
  949. if (!blk_rq_bytes(req))
  950. cmd->sc_data_direction = DMA_NONE;
  951. else if (rq_data_dir(req) == WRITE)
  952. cmd->sc_data_direction = DMA_TO_DEVICE;
  953. else
  954. cmd->sc_data_direction = DMA_FROM_DEVICE;
  955. cmd->transfersize = blk_rq_bytes(req);
  956. cmd->allowed = req->retries;
  957. return BLKPREP_OK;
  958. }
  959. EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
  960. /*
  961. * Setup a REQ_TYPE_FS command. These are simple read/write request
  962. * from filesystems that still need to be translated to SCSI CDBs from
  963. * the ULD.
  964. */
  965. int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
  966. {
  967. struct scsi_cmnd *cmd;
  968. int ret = scsi_prep_state_check(sdev, req);
  969. if (ret != BLKPREP_OK)
  970. return ret;
  971. if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
  972. && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
  973. ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
  974. if (ret != BLKPREP_OK)
  975. return ret;
  976. }
  977. /*
  978. * Filesystem requests must transfer data.
  979. */
  980. BUG_ON(!req->nr_phys_segments);
  981. cmd = scsi_get_cmd_from_req(sdev, req);
  982. if (unlikely(!cmd))
  983. return BLKPREP_DEFER;
  984. memset(cmd->cmnd, 0, BLK_MAX_CDB);
  985. return scsi_init_io(cmd, GFP_ATOMIC);
  986. }
  987. EXPORT_SYMBOL(scsi_setup_fs_cmnd);
  988. int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
  989. {
  990. int ret = BLKPREP_OK;
  991. /*
  992. * If the device is not in running state we will reject some
  993. * or all commands.
  994. */
  995. if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
  996. switch (sdev->sdev_state) {
  997. case SDEV_OFFLINE:
  998. /*
  999. * If the device is offline we refuse to process any
  1000. * commands. The device must be brought online
  1001. * before trying any recovery commands.
  1002. */
  1003. sdev_printk(KERN_ERR, sdev,
  1004. "rejecting I/O to offline device\n");
  1005. ret = BLKPREP_KILL;
  1006. break;
  1007. case SDEV_DEL:
  1008. /*
  1009. * If the device is fully deleted, we refuse to
  1010. * process any commands as well.
  1011. */
  1012. sdev_printk(KERN_ERR, sdev,
  1013. "rejecting I/O to dead device\n");
  1014. ret = BLKPREP_KILL;
  1015. break;
  1016. case SDEV_QUIESCE:
  1017. case SDEV_BLOCK:
  1018. case SDEV_CREATED_BLOCK:
  1019. /*
  1020. * If the devices is blocked we defer normal commands.
  1021. */
  1022. if (!(req->cmd_flags & REQ_PREEMPT))
  1023. ret = BLKPREP_DEFER;
  1024. break;
  1025. default:
  1026. /*
  1027. * For any other not fully online state we only allow
  1028. * special commands. In particular any user initiated
  1029. * command is not allowed.
  1030. */
  1031. if (!(req->cmd_flags & REQ_PREEMPT))
  1032. ret = BLKPREP_KILL;
  1033. break;
  1034. }
  1035. }
  1036. return ret;
  1037. }
  1038. EXPORT_SYMBOL(scsi_prep_state_check);
  1039. int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
  1040. {
  1041. struct scsi_device *sdev = q->queuedata;
  1042. switch (ret) {
  1043. case BLKPREP_KILL:
  1044. req->errors = DID_NO_CONNECT << 16;
  1045. /* release the command and kill it */
  1046. if (req->special) {
  1047. struct scsi_cmnd *cmd = req->special;
  1048. scsi_release_buffers(cmd);
  1049. scsi_put_command(cmd);
  1050. req->special = NULL;
  1051. }
  1052. break;
  1053. case BLKPREP_DEFER:
  1054. /*
  1055. * If we defer, the blk_peek_request() returns NULL, but the
  1056. * queue must be restarted, so we plug here if no returning
  1057. * command will automatically do that.
  1058. */
  1059. if (sdev->device_busy == 0)
  1060. blk_plug_device(q);
  1061. break;
  1062. default:
  1063. req->cmd_flags |= REQ_DONTPREP;
  1064. }
  1065. return ret;
  1066. }
  1067. EXPORT_SYMBOL(scsi_prep_return);
  1068. int scsi_prep_fn(struct request_queue *q, struct request *req)
  1069. {
  1070. struct scsi_device *sdev = q->queuedata;
  1071. int ret = BLKPREP_KILL;
  1072. if (req->cmd_type == REQ_TYPE_BLOCK_PC)
  1073. ret = scsi_setup_blk_pc_cmnd(sdev, req);
  1074. return scsi_prep_return(q, req, ret);
  1075. }
  1076. EXPORT_SYMBOL(scsi_prep_fn);
  1077. /*
  1078. * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
  1079. * return 0.
  1080. *
  1081. * Called with the queue_lock held.
  1082. */
  1083. static inline int scsi_dev_queue_ready(struct request_queue *q,
  1084. struct scsi_device *sdev)
  1085. {
  1086. if (sdev->device_busy == 0 && sdev->device_blocked) {
  1087. /*
  1088. * unblock after device_blocked iterates to zero
  1089. */
  1090. if (--sdev->device_blocked == 0) {
  1091. SCSI_LOG_MLQUEUE(3,
  1092. sdev_printk(KERN_INFO, sdev,
  1093. "unblocking device at zero depth\n"));
  1094. } else {
  1095. blk_plug_device(q);
  1096. return 0;
  1097. }
  1098. }
  1099. if (scsi_device_is_busy(sdev))
  1100. return 0;
  1101. return 1;
  1102. }
  1103. /*
  1104. * scsi_target_queue_ready: checks if there we can send commands to target
  1105. * @sdev: scsi device on starget to check.
  1106. *
  1107. * Called with the host lock held.
  1108. */
  1109. static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
  1110. struct scsi_device *sdev)
  1111. {
  1112. struct scsi_target *starget = scsi_target(sdev);
  1113. if (starget->single_lun) {
  1114. if (starget->starget_sdev_user &&
  1115. starget->starget_sdev_user != sdev)
  1116. return 0;
  1117. starget->starget_sdev_user = sdev;
  1118. }
  1119. if (starget->target_busy == 0 && starget->target_blocked) {
  1120. /*
  1121. * unblock after target_blocked iterates to zero
  1122. */
  1123. if (--starget->target_blocked == 0) {
  1124. SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
  1125. "unblocking target at zero depth\n"));
  1126. } else
  1127. return 0;
  1128. }
  1129. if (scsi_target_is_busy(starget)) {
  1130. if (list_empty(&sdev->starved_entry))
  1131. list_add_tail(&sdev->starved_entry,
  1132. &shost->starved_list);
  1133. return 0;
  1134. }
  1135. /* We're OK to process the command, so we can't be starved */
  1136. if (!list_empty(&sdev->starved_entry))
  1137. list_del_init(&sdev->starved_entry);
  1138. return 1;
  1139. }
  1140. /*
  1141. * scsi_host_queue_ready: if we can send requests to shost, return 1 else
  1142. * return 0. We must end up running the queue again whenever 0 is
  1143. * returned, else IO can hang.
  1144. *
  1145. * Called with host_lock held.
  1146. */
  1147. static inline int scsi_host_queue_ready(struct request_queue *q,
  1148. struct Scsi_Host *shost,
  1149. struct scsi_device *sdev)
  1150. {
  1151. if (scsi_host_in_recovery(shost))
  1152. return 0;
  1153. if (shost->host_busy == 0 && shost->host_blocked) {
  1154. /*
  1155. * unblock after host_blocked iterates to zero
  1156. */
  1157. if (--shost->host_blocked == 0) {
  1158. SCSI_LOG_MLQUEUE(3,
  1159. printk("scsi%d unblocking host at zero depth\n",
  1160. shost->host_no));
  1161. } else {
  1162. return 0;
  1163. }
  1164. }
  1165. if (scsi_host_is_busy(shost)) {
  1166. if (list_empty(&sdev->starved_entry))
  1167. list_add_tail(&sdev->starved_entry, &shost->starved_list);
  1168. return 0;
  1169. }
  1170. /* We're OK to process the command, so we can't be starved */
  1171. if (!list_empty(&sdev->starved_entry))
  1172. list_del_init(&sdev->starved_entry);
  1173. return 1;
  1174. }
  1175. /*
  1176. * Busy state exporting function for request stacking drivers.
  1177. *
  1178. * For efficiency, no lock is taken to check the busy state of
  1179. * shost/starget/sdev, since the returned value is not guaranteed and
  1180. * may be changed after request stacking drivers call the function,
  1181. * regardless of taking lock or not.
  1182. *
  1183. * When scsi can't dispatch I/Os anymore and needs to kill I/Os
  1184. * (e.g. !sdev), scsi needs to return 'not busy'.
  1185. * Otherwise, request stacking drivers may hold requests forever.
  1186. */
  1187. static int scsi_lld_busy(struct request_queue *q)
  1188. {
  1189. struct scsi_device *sdev = q->queuedata;
  1190. struct Scsi_Host *shost;
  1191. struct scsi_target *starget;
  1192. if (!sdev)
  1193. return 0;
  1194. shost = sdev->host;
  1195. starget = scsi_target(sdev);
  1196. if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
  1197. scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
  1198. return 1;
  1199. return 0;
  1200. }
  1201. /*
  1202. * Kill a request for a dead device
  1203. */
  1204. static void scsi_kill_request(struct request *req, struct request_queue *q)
  1205. {
  1206. struct scsi_cmnd *cmd = req->special;
  1207. struct scsi_device *sdev;
  1208. struct scsi_target *starget;
  1209. struct Scsi_Host *shost;
  1210. blk_start_request(req);
  1211. sdev = cmd->device;
  1212. starget = scsi_target(sdev);
  1213. shost = sdev->host;
  1214. scsi_init_cmd_errh(cmd);
  1215. cmd->result = DID_NO_CONNECT << 16;
  1216. atomic_inc(&cmd->device->iorequest_cnt);
  1217. /*
  1218. * SCSI request completion path will do scsi_device_unbusy(),
  1219. * bump busy counts. To bump the counters, we need to dance
  1220. * with the locks as normal issue path does.
  1221. */
  1222. sdev->device_busy++;
  1223. spin_unlock(sdev->request_queue->queue_lock);
  1224. spin_lock(shost->host_lock);
  1225. shost->host_busy++;
  1226. starget->target_busy++;
  1227. spin_unlock(shost->host_lock);
  1228. spin_lock(sdev->request_queue->queue_lock);
  1229. blk_complete_request(req);
  1230. }
  1231. static void scsi_softirq_done(struct request *rq)
  1232. {
  1233. struct scsi_cmnd *cmd = rq->special;
  1234. unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
  1235. int disposition;
  1236. INIT_LIST_HEAD(&cmd->eh_entry);
  1237. atomic_inc(&cmd->device->iodone_cnt);
  1238. if (cmd->result)
  1239. atomic_inc(&cmd->device->ioerr_cnt);
  1240. disposition = scsi_decide_disposition(cmd);
  1241. if (disposition != SUCCESS &&
  1242. time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
  1243. sdev_printk(KERN_ERR, cmd->device,
  1244. "timing out command, waited %lus\n",
  1245. wait_for/HZ);
  1246. disposition = SUCCESS;
  1247. }
  1248. scsi_log_completion(cmd, disposition);
  1249. switch (disposition) {
  1250. case SUCCESS:
  1251. scsi_finish_command(cmd);
  1252. break;
  1253. case NEEDS_RETRY:
  1254. scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
  1255. break;
  1256. case ADD_TO_MLQUEUE:
  1257. scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
  1258. break;
  1259. default:
  1260. if (!scsi_eh_scmd_add(cmd, 0))
  1261. scsi_finish_command(cmd);
  1262. }
  1263. }
  1264. /*
  1265. * Function: scsi_request_fn()
  1266. *
  1267. * Purpose: Main strategy routine for SCSI.
  1268. *
  1269. * Arguments: q - Pointer to actual queue.
  1270. *
  1271. * Returns: Nothing
  1272. *
  1273. * Lock status: IO request lock assumed to be held when called.
  1274. */
  1275. static void scsi_request_fn(struct request_queue *q)
  1276. {
  1277. struct scsi_device *sdev = q->queuedata;
  1278. struct Scsi_Host *shost;
  1279. struct scsi_cmnd *cmd;
  1280. struct request *req;
  1281. if (!sdev) {
  1282. printk("scsi: killing requests for dead queue\n");
  1283. while ((req = blk_peek_request(q)) != NULL)
  1284. scsi_kill_request(req, q);
  1285. return;
  1286. }
  1287. if(!get_device(&sdev->sdev_gendev))
  1288. /* We must be tearing the block queue down already */
  1289. return;
  1290. /*
  1291. * To start with, we keep looping until the queue is empty, or until
  1292. * the host is no longer able to accept any more requests.
  1293. */
  1294. shost = sdev->host;
  1295. while (!blk_queue_plugged(q)) {
  1296. int rtn;
  1297. /*
  1298. * get next queueable request. We do this early to make sure
  1299. * that the request is fully prepared even if we cannot
  1300. * accept it.
  1301. */
  1302. req = blk_peek_request(q);
  1303. if (!req || !scsi_dev_queue_ready(q, sdev))
  1304. break;
  1305. if (unlikely(!scsi_device_online(sdev))) {
  1306. sdev_printk(KERN_ERR, sdev,
  1307. "rejecting I/O to offline device\n");
  1308. scsi_kill_request(req, q);
  1309. continue;
  1310. }
  1311. /*
  1312. * Remove the request from the request list.
  1313. */
  1314. if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
  1315. blk_start_request(req);
  1316. sdev->device_busy++;
  1317. spin_unlock(q->queue_lock);
  1318. cmd = req->special;
  1319. if (unlikely(cmd == NULL)) {
  1320. printk(KERN_CRIT "impossible request in %s.\n"
  1321. "please mail a stack trace to "
  1322. "linux-scsi@vger.kernel.org\n",
  1323. __func__);
  1324. blk_dump_rq_flags(req, "foo");
  1325. BUG();
  1326. }
  1327. spin_lock(shost->host_lock);
  1328. /*
  1329. * We hit this when the driver is using a host wide
  1330. * tag map. For device level tag maps the queue_depth check
  1331. * in the device ready fn would prevent us from trying
  1332. * to allocate a tag. Since the map is a shared host resource
  1333. * we add the dev to the starved list so it eventually gets
  1334. * a run when a tag is freed.
  1335. */
  1336. if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
  1337. if (list_empty(&sdev->starved_entry))
  1338. list_add_tail(&sdev->starved_entry,
  1339. &shost->starved_list);
  1340. goto not_ready;
  1341. }
  1342. if (!scsi_target_queue_ready(shost, sdev))
  1343. goto not_ready;
  1344. if (!scsi_host_queue_ready(q, shost, sdev))
  1345. goto not_ready;
  1346. scsi_target(sdev)->target_busy++;
  1347. shost->host_busy++;
  1348. /*
  1349. * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
  1350. * take the lock again.
  1351. */
  1352. spin_unlock_irq(shost->host_lock);
  1353. /*
  1354. * Finally, initialize any error handling parameters, and set up
  1355. * the timers for timeouts.
  1356. */
  1357. scsi_init_cmd_errh(cmd);
  1358. /*
  1359. * Dispatch the command to the low-level driver.
  1360. */
  1361. rtn = scsi_dispatch_cmd(cmd);
  1362. spin_lock_irq(q->queue_lock);
  1363. if(rtn) {
  1364. /* we're refusing the command; because of
  1365. * the way locks get dropped, we need to
  1366. * check here if plugging is required */
  1367. if(sdev->device_busy == 0)
  1368. blk_plug_device(q);
  1369. break;
  1370. }
  1371. }
  1372. goto out;
  1373. not_ready:
  1374. spin_unlock_irq(shost->host_lock);
  1375. /*
  1376. * lock q, handle tag, requeue req, and decrement device_busy. We
  1377. * must return with queue_lock held.
  1378. *
  1379. * Decrementing device_busy without checking it is OK, as all such
  1380. * cases (host limits or settings) should run the queue at some
  1381. * later time.
  1382. */
  1383. spin_lock_irq(q->queue_lock);
  1384. blk_requeue_request(q, req);
  1385. sdev->device_busy--;
  1386. if(sdev->device_busy == 0)
  1387. blk_plug_device(q);
  1388. out:
  1389. /* must be careful here...if we trigger the ->remove() function
  1390. * we cannot be holding the q lock */
  1391. spin_unlock_irq(q->queue_lock);
  1392. put_device(&sdev->sdev_gendev);
  1393. spin_lock_irq(q->queue_lock);
  1394. }
  1395. u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
  1396. {
  1397. struct device *host_dev;
  1398. u64 bounce_limit = 0xffffffff;
  1399. if (shost->unchecked_isa_dma)
  1400. return BLK_BOUNCE_ISA;
  1401. /*
  1402. * Platforms with virtual-DMA translation
  1403. * hardware have no practical limit.
  1404. */
  1405. if (!PCI_DMA_BUS_IS_PHYS)
  1406. return BLK_BOUNCE_ANY;
  1407. host_dev = scsi_get_device(shost);
  1408. if (host_dev && host_dev->dma_mask)
  1409. bounce_limit = *host_dev->dma_mask;
  1410. return bounce_limit;
  1411. }
  1412. EXPORT_SYMBOL(scsi_calculate_bounce_limit);
  1413. struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
  1414. request_fn_proc *request_fn)
  1415. {
  1416. struct request_queue *q;
  1417. struct device *dev = shost->shost_gendev.parent;
  1418. q = blk_init_queue(request_fn, NULL);
  1419. if (!q)
  1420. return NULL;
  1421. /*
  1422. * this limit is imposed by hardware restrictions
  1423. */
  1424. blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
  1425. SCSI_MAX_SG_CHAIN_SEGMENTS));
  1426. if (scsi_host_prot_dma(shost)) {
  1427. shost->sg_prot_tablesize =
  1428. min_not_zero(shost->sg_prot_tablesize,
  1429. (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
  1430. BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
  1431. blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
  1432. }
  1433. blk_queue_max_hw_sectors(q, shost->max_sectors);
  1434. blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
  1435. blk_queue_segment_boundary(q, shost->dma_boundary);
  1436. dma_set_seg_boundary(dev, shost->dma_boundary);
  1437. blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
  1438. /* New queue, no concurrency on queue_flags */
  1439. if (!shost->use_clustering)
  1440. queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
  1441. /*
  1442. * set a reasonable default alignment on word boundaries: the
  1443. * host and device may alter it using
  1444. * blk_queue_update_dma_alignment() later.
  1445. */
  1446. blk_queue_dma_alignment(q, 0x03);
  1447. return q;
  1448. }
  1449. EXPORT_SYMBOL(__scsi_alloc_queue);
  1450. struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
  1451. {
  1452. struct request_queue *q;
  1453. q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
  1454. if (!q)
  1455. return NULL;
  1456. blk_queue_prep_rq(q, scsi_prep_fn);
  1457. blk_queue_softirq_done(q, scsi_softirq_done);
  1458. blk_queue_rq_timed_out(q, scsi_times_out);
  1459. blk_queue_lld_busy(q, scsi_lld_busy);
  1460. return q;
  1461. }
  1462. void scsi_free_queue(struct request_queue *q)
  1463. {
  1464. blk_cleanup_queue(q);
  1465. }
  1466. /*
  1467. * Function: scsi_block_requests()
  1468. *
  1469. * Purpose: Utility function used by low-level drivers to prevent further
  1470. * commands from being queued to the device.
  1471. *
  1472. * Arguments: shost - Host in question
  1473. *
  1474. * Returns: Nothing
  1475. *
  1476. * Lock status: No locks are assumed held.
  1477. *
  1478. * Notes: There is no timer nor any other means by which the requests
  1479. * get unblocked other than the low-level driver calling
  1480. * scsi_unblock_requests().
  1481. */
  1482. void scsi_block_requests(struct Scsi_Host *shost)
  1483. {
  1484. shost->host_self_blocked = 1;
  1485. }
  1486. EXPORT_SYMBOL(scsi_block_requests);
  1487. /*
  1488. * Function: scsi_unblock_requests()
  1489. *
  1490. * Purpose: Utility function used by low-level drivers to allow further
  1491. * commands from being queued to the device.
  1492. *
  1493. * Arguments: shost - Host in question
  1494. *
  1495. * Returns: Nothing
  1496. *
  1497. * Lock status: No locks are assumed held.
  1498. *
  1499. * Notes: There is no timer nor any other means by which the requests
  1500. * get unblocked other than the low-level driver calling
  1501. * scsi_unblock_requests().
  1502. *
  1503. * This is done as an API function so that changes to the
  1504. * internals of the scsi mid-layer won't require wholesale
  1505. * changes to drivers that use this feature.
  1506. */
  1507. void scsi_unblock_requests(struct Scsi_Host *shost)
  1508. {
  1509. shost->host_self_blocked = 0;
  1510. scsi_run_host_queues(shost);
  1511. }
  1512. EXPORT_SYMBOL(scsi_unblock_requests);
  1513. int __init scsi_init_queue(void)
  1514. {
  1515. int i;
  1516. scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
  1517. sizeof(struct scsi_data_buffer),
  1518. 0, 0, NULL);
  1519. if (!scsi_sdb_cache) {
  1520. printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
  1521. return -ENOMEM;
  1522. }
  1523. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1524. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1525. int size = sgp->size * sizeof(struct scatterlist);
  1526. sgp->slab = kmem_cache_create(sgp->name, size, 0,
  1527. SLAB_HWCACHE_ALIGN, NULL);
  1528. if (!sgp->slab) {
  1529. printk(KERN_ERR "SCSI: can't init sg slab %s\n",
  1530. sgp->name);
  1531. goto cleanup_sdb;
  1532. }
  1533. sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
  1534. sgp->slab);
  1535. if (!sgp->pool) {
  1536. printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
  1537. sgp->name);
  1538. goto cleanup_sdb;
  1539. }
  1540. }
  1541. return 0;
  1542. cleanup_sdb:
  1543. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1544. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1545. if (sgp->pool)
  1546. mempool_destroy(sgp->pool);
  1547. if (sgp->slab)
  1548. kmem_cache_destroy(sgp->slab);
  1549. }
  1550. kmem_cache_destroy(scsi_sdb_cache);
  1551. return -ENOMEM;
  1552. }
  1553. void scsi_exit_queue(void)
  1554. {
  1555. int i;
  1556. kmem_cache_destroy(scsi_sdb_cache);
  1557. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1558. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1559. mempool_destroy(sgp->pool);
  1560. kmem_cache_destroy(sgp->slab);
  1561. }
  1562. }
  1563. /**
  1564. * scsi_mode_select - issue a mode select
  1565. * @sdev: SCSI device to be queried
  1566. * @pf: Page format bit (1 == standard, 0 == vendor specific)
  1567. * @sp: Save page bit (0 == don't save, 1 == save)
  1568. * @modepage: mode page being requested
  1569. * @buffer: request buffer (may not be smaller than eight bytes)
  1570. * @len: length of request buffer.
  1571. * @timeout: command timeout
  1572. * @retries: number of retries before failing
  1573. * @data: returns a structure abstracting the mode header data
  1574. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  1575. * must be SCSI_SENSE_BUFFERSIZE big.
  1576. *
  1577. * Returns zero if successful; negative error number or scsi
  1578. * status on error
  1579. *
  1580. */
  1581. int
  1582. scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
  1583. unsigned char *buffer, int len, int timeout, int retries,
  1584. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1585. {
  1586. unsigned char cmd[10];
  1587. unsigned char *real_buffer;
  1588. int ret;
  1589. memset(cmd, 0, sizeof(cmd));
  1590. cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
  1591. if (sdev->use_10_for_ms) {
  1592. if (len > 65535)
  1593. return -EINVAL;
  1594. real_buffer = kmalloc(8 + len, GFP_KERNEL);
  1595. if (!real_buffer)
  1596. return -ENOMEM;
  1597. memcpy(real_buffer + 8, buffer, len);
  1598. len += 8;
  1599. real_buffer[0] = 0;
  1600. real_buffer[1] = 0;
  1601. real_buffer[2] = data->medium_type;
  1602. real_buffer[3] = data->device_specific;
  1603. real_buffer[4] = data->longlba ? 0x01 : 0;
  1604. real_buffer[5] = 0;
  1605. real_buffer[6] = data->block_descriptor_length >> 8;
  1606. real_buffer[7] = data->block_descriptor_length;
  1607. cmd[0] = MODE_SELECT_10;
  1608. cmd[7] = len >> 8;
  1609. cmd[8] = len;
  1610. } else {
  1611. if (len > 255 || data->block_descriptor_length > 255 ||
  1612. data->longlba)
  1613. return -EINVAL;
  1614. real_buffer = kmalloc(4 + len, GFP_KERNEL);
  1615. if (!real_buffer)
  1616. return -ENOMEM;
  1617. memcpy(real_buffer + 4, buffer, len);
  1618. len += 4;
  1619. real_buffer[0] = 0;
  1620. real_buffer[1] = data->medium_type;
  1621. real_buffer[2] = data->device_specific;
  1622. real_buffer[3] = data->block_descriptor_length;
  1623. cmd[0] = MODE_SELECT;
  1624. cmd[4] = len;
  1625. }
  1626. ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
  1627. sshdr, timeout, retries, NULL);
  1628. kfree(real_buffer);
  1629. return ret;
  1630. }
  1631. EXPORT_SYMBOL_GPL(scsi_mode_select);
  1632. /**
  1633. * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
  1634. * @sdev: SCSI device to be queried
  1635. * @dbd: set if mode sense will allow block descriptors to be returned
  1636. * @modepage: mode page being requested
  1637. * @buffer: request buffer (may not be smaller than eight bytes)
  1638. * @len: length of request buffer.
  1639. * @timeout: command timeout
  1640. * @retries: number of retries before failing
  1641. * @data: returns a structure abstracting the mode header data
  1642. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  1643. * must be SCSI_SENSE_BUFFERSIZE big.
  1644. *
  1645. * Returns zero if unsuccessful, or the header offset (either 4
  1646. * or 8 depending on whether a six or ten byte command was
  1647. * issued) if successful.
  1648. */
  1649. int
  1650. scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  1651. unsigned char *buffer, int len, int timeout, int retries,
  1652. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1653. {
  1654. unsigned char cmd[12];
  1655. int use_10_for_ms;
  1656. int header_length;
  1657. int result;
  1658. struct scsi_sense_hdr my_sshdr;
  1659. memset(data, 0, sizeof(*data));
  1660. memset(&cmd[0], 0, 12);
  1661. cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
  1662. cmd[2] = modepage;
  1663. /* caller might not be interested in sense, but we need it */
  1664. if (!sshdr)
  1665. sshdr = &my_sshdr;
  1666. retry:
  1667. use_10_for_ms = sdev->use_10_for_ms;
  1668. if (use_10_for_ms) {
  1669. if (len < 8)
  1670. len = 8;
  1671. cmd[0] = MODE_SENSE_10;
  1672. cmd[8] = len;
  1673. header_length = 8;
  1674. } else {
  1675. if (len < 4)
  1676. len = 4;
  1677. cmd[0] = MODE_SENSE;
  1678. cmd[4] = len;
  1679. header_length = 4;
  1680. }
  1681. memset(buffer, 0, len);
  1682. result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
  1683. sshdr, timeout, retries, NULL);
  1684. /* This code looks awful: what it's doing is making sure an
  1685. * ILLEGAL REQUEST sense return identifies the actual command
  1686. * byte as the problem. MODE_SENSE commands can return
  1687. * ILLEGAL REQUEST if the code page isn't supported */
  1688. if (use_10_for_ms && !scsi_status_is_good(result) &&
  1689. (driver_byte(result) & DRIVER_SENSE)) {
  1690. if (scsi_sense_valid(sshdr)) {
  1691. if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
  1692. (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
  1693. /*
  1694. * Invalid command operation code
  1695. */
  1696. sdev->use_10_for_ms = 0;
  1697. goto retry;
  1698. }
  1699. }
  1700. }
  1701. if(scsi_status_is_good(result)) {
  1702. if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
  1703. (modepage == 6 || modepage == 8))) {
  1704. /* Initio breakage? */
  1705. header_length = 0;
  1706. data->length = 13;
  1707. data->medium_type = 0;
  1708. data->device_specific = 0;
  1709. data->longlba = 0;
  1710. data->block_descriptor_length = 0;
  1711. } else if(use_10_for_ms) {
  1712. data->length = buffer[0]*256 + buffer[1] + 2;
  1713. data->medium_type = buffer[2];
  1714. data->device_specific = buffer[3];
  1715. data->longlba = buffer[4] & 0x01;
  1716. data->block_descriptor_length = buffer[6]*256
  1717. + buffer[7];
  1718. } else {
  1719. data->length = buffer[0] + 1;
  1720. data->medium_type = buffer[1];
  1721. data->device_specific = buffer[2];
  1722. data->block_descriptor_length = buffer[3];
  1723. }
  1724. data->header_length = header_length;
  1725. }
  1726. return result;
  1727. }
  1728. EXPORT_SYMBOL(scsi_mode_sense);
  1729. /**
  1730. * scsi_test_unit_ready - test if unit is ready
  1731. * @sdev: scsi device to change the state of.
  1732. * @timeout: command timeout
  1733. * @retries: number of retries before failing
  1734. * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
  1735. * returning sense. Make sure that this is cleared before passing
  1736. * in.
  1737. *
  1738. * Returns zero if unsuccessful or an error if TUR failed. For
  1739. * removable media, a return of NOT_READY or UNIT_ATTENTION is
  1740. * translated to success, with the ->changed flag updated.
  1741. **/
  1742. int
  1743. scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
  1744. struct scsi_sense_hdr *sshdr_external)
  1745. {
  1746. char cmd[] = {
  1747. TEST_UNIT_READY, 0, 0, 0, 0, 0,
  1748. };
  1749. struct scsi_sense_hdr *sshdr;
  1750. int result;
  1751. if (!sshdr_external)
  1752. sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
  1753. else
  1754. sshdr = sshdr_external;
  1755. /* try to eat the UNIT_ATTENTION if there are enough retries */
  1756. do {
  1757. result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
  1758. timeout, retries, NULL);
  1759. if (sdev->removable && scsi_sense_valid(sshdr) &&
  1760. sshdr->sense_key == UNIT_ATTENTION)
  1761. sdev->changed = 1;
  1762. } while (scsi_sense_valid(sshdr) &&
  1763. sshdr->sense_key == UNIT_ATTENTION && --retries);
  1764. if (!sshdr)
  1765. /* could not allocate sense buffer, so can't process it */
  1766. return result;
  1767. if (sdev->removable && scsi_sense_valid(sshdr) &&
  1768. (sshdr->sense_key == UNIT_ATTENTION ||
  1769. sshdr->sense_key == NOT_READY)) {
  1770. sdev->changed = 1;
  1771. result = 0;
  1772. }
  1773. if (!sshdr_external)
  1774. kfree(sshdr);
  1775. return result;
  1776. }
  1777. EXPORT_SYMBOL(scsi_test_unit_ready);
  1778. /**
  1779. * scsi_device_set_state - Take the given device through the device state model.
  1780. * @sdev: scsi device to change the state of.
  1781. * @state: state to change to.
  1782. *
  1783. * Returns zero if unsuccessful or an error if the requested
  1784. * transition is illegal.
  1785. */
  1786. int
  1787. scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
  1788. {
  1789. enum scsi_device_state oldstate = sdev->sdev_state;
  1790. if (state == oldstate)
  1791. return 0;
  1792. switch (state) {
  1793. case SDEV_CREATED:
  1794. switch (oldstate) {
  1795. case SDEV_CREATED_BLOCK:
  1796. break;
  1797. default:
  1798. goto illegal;
  1799. }
  1800. break;
  1801. case SDEV_RUNNING:
  1802. switch (oldstate) {
  1803. case SDEV_CREATED:
  1804. case SDEV_OFFLINE:
  1805. case SDEV_QUIESCE:
  1806. case SDEV_BLOCK:
  1807. break;
  1808. default:
  1809. goto illegal;
  1810. }
  1811. break;
  1812. case SDEV_QUIESCE:
  1813. switch (oldstate) {
  1814. case SDEV_RUNNING:
  1815. case SDEV_OFFLINE:
  1816. break;
  1817. default:
  1818. goto illegal;
  1819. }
  1820. break;
  1821. case SDEV_OFFLINE:
  1822. switch (oldstate) {
  1823. case SDEV_CREATED:
  1824. case SDEV_RUNNING:
  1825. case SDEV_QUIESCE:
  1826. case SDEV_BLOCK:
  1827. break;
  1828. default:
  1829. goto illegal;
  1830. }
  1831. break;
  1832. case SDEV_BLOCK:
  1833. switch (oldstate) {
  1834. case SDEV_RUNNING:
  1835. case SDEV_CREATED_BLOCK:
  1836. break;
  1837. default:
  1838. goto illegal;
  1839. }
  1840. break;
  1841. case SDEV_CREATED_BLOCK:
  1842. switch (oldstate) {
  1843. case SDEV_CREATED:
  1844. break;
  1845. default:
  1846. goto illegal;
  1847. }
  1848. break;
  1849. case SDEV_CANCEL:
  1850. switch (oldstate) {
  1851. case SDEV_CREATED:
  1852. case SDEV_RUNNING:
  1853. case SDEV_QUIESCE:
  1854. case SDEV_OFFLINE:
  1855. case SDEV_BLOCK:
  1856. break;
  1857. default:
  1858. goto illegal;
  1859. }
  1860. break;
  1861. case SDEV_DEL:
  1862. switch (oldstate) {
  1863. case SDEV_CREATED:
  1864. case SDEV_RUNNING:
  1865. case SDEV_OFFLINE:
  1866. case SDEV_CANCEL:
  1867. break;
  1868. default:
  1869. goto illegal;
  1870. }
  1871. break;
  1872. }
  1873. sdev->sdev_state = state;
  1874. return 0;
  1875. illegal:
  1876. SCSI_LOG_ERROR_RECOVERY(1,
  1877. sdev_printk(KERN_ERR, sdev,
  1878. "Illegal state transition %s->%s\n",
  1879. scsi_device_state_name(oldstate),
  1880. scsi_device_state_name(state))
  1881. );
  1882. return -EINVAL;
  1883. }
  1884. EXPORT_SYMBOL(scsi_device_set_state);
  1885. /**
  1886. * sdev_evt_emit - emit a single SCSI device uevent
  1887. * @sdev: associated SCSI device
  1888. * @evt: event to emit
  1889. *
  1890. * Send a single uevent (scsi_event) to the associated scsi_device.
  1891. */
  1892. static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
  1893. {
  1894. int idx = 0;
  1895. char *envp[3];
  1896. switch (evt->evt_type) {
  1897. case SDEV_EVT_MEDIA_CHANGE:
  1898. envp[idx++] = "SDEV_MEDIA_CHANGE=1";
  1899. break;
  1900. default:
  1901. /* do nothing */
  1902. break;
  1903. }
  1904. envp[idx++] = NULL;
  1905. kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
  1906. }
  1907. /**
  1908. * sdev_evt_thread - send a uevent for each scsi event
  1909. * @work: work struct for scsi_device
  1910. *
  1911. * Dispatch queued events to their associated scsi_device kobjects
  1912. * as uevents.
  1913. */
  1914. void scsi_evt_thread(struct work_struct *work)
  1915. {
  1916. struct scsi_device *sdev;
  1917. LIST_HEAD(event_list);
  1918. sdev = container_of(work, struct scsi_device, event_work);
  1919. while (1) {
  1920. struct scsi_event *evt;
  1921. struct list_head *this, *tmp;
  1922. unsigned long flags;
  1923. spin_lock_irqsave(&sdev->list_lock, flags);
  1924. list_splice_init(&sdev->event_list, &event_list);
  1925. spin_unlock_irqrestore(&sdev->list_lock, flags);
  1926. if (list_empty(&event_list))
  1927. break;
  1928. list_for_each_safe(this, tmp, &event_list) {
  1929. evt = list_entry(this, struct scsi_event, node);
  1930. list_del(&evt->node);
  1931. scsi_evt_emit(sdev, evt);
  1932. kfree(evt);
  1933. }
  1934. }
  1935. }
  1936. /**
  1937. * sdev_evt_send - send asserted event to uevent thread
  1938. * @sdev: scsi_device event occurred on
  1939. * @evt: event to send
  1940. *
  1941. * Assert scsi device event asynchronously.
  1942. */
  1943. void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
  1944. {
  1945. unsigned long flags;
  1946. #if 0
  1947. /* FIXME: currently this check eliminates all media change events
  1948. * for polled devices. Need to update to discriminate between AN
  1949. * and polled events */
  1950. if (!test_bit(evt->evt_type, sdev->supported_events)) {
  1951. kfree(evt);
  1952. return;
  1953. }
  1954. #endif
  1955. spin_lock_irqsave(&sdev->list_lock, flags);
  1956. list_add_tail(&evt->node, &sdev->event_list);
  1957. schedule_work(&sdev->event_work);
  1958. spin_unlock_irqrestore(&sdev->list_lock, flags);
  1959. }
  1960. EXPORT_SYMBOL_GPL(sdev_evt_send);
  1961. /**
  1962. * sdev_evt_alloc - allocate a new scsi event
  1963. * @evt_type: type of event to allocate
  1964. * @gfpflags: GFP flags for allocation
  1965. *
  1966. * Allocates and returns a new scsi_event.
  1967. */
  1968. struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
  1969. gfp_t gfpflags)
  1970. {
  1971. struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
  1972. if (!evt)
  1973. return NULL;
  1974. evt->evt_type = evt_type;
  1975. INIT_LIST_HEAD(&evt->node);
  1976. /* evt_type-specific initialization, if any */
  1977. switch (evt_type) {
  1978. case SDEV_EVT_MEDIA_CHANGE:
  1979. default:
  1980. /* do nothing */
  1981. break;
  1982. }
  1983. return evt;
  1984. }
  1985. EXPORT_SYMBOL_GPL(sdev_evt_alloc);
  1986. /**
  1987. * sdev_evt_send_simple - send asserted event to uevent thread
  1988. * @sdev: scsi_device event occurred on
  1989. * @evt_type: type of event to send
  1990. * @gfpflags: GFP flags for allocation
  1991. *
  1992. * Assert scsi device event asynchronously, given an event type.
  1993. */
  1994. void sdev_evt_send_simple(struct scsi_device *sdev,
  1995. enum scsi_device_event evt_type, gfp_t gfpflags)
  1996. {
  1997. struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
  1998. if (!evt) {
  1999. sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
  2000. evt_type);
  2001. return;
  2002. }
  2003. sdev_evt_send(sdev, evt);
  2004. }
  2005. EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
  2006. /**
  2007. * scsi_device_quiesce - Block user issued commands.
  2008. * @sdev: scsi device to quiesce.
  2009. *
  2010. * This works by trying to transition to the SDEV_QUIESCE state
  2011. * (which must be a legal transition). When the device is in this
  2012. * state, only special requests will be accepted, all others will
  2013. * be deferred. Since special requests may also be requeued requests,
  2014. * a successful return doesn't guarantee the device will be
  2015. * totally quiescent.
  2016. *
  2017. * Must be called with user context, may sleep.
  2018. *
  2019. * Returns zero if unsuccessful or an error if not.
  2020. */
  2021. int
  2022. scsi_device_quiesce(struct scsi_device *sdev)
  2023. {
  2024. int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
  2025. if (err)
  2026. return err;
  2027. scsi_run_queue(sdev->request_queue);
  2028. while (sdev->device_busy) {
  2029. msleep_interruptible(200);
  2030. scsi_run_queue(sdev->request_queue);
  2031. }
  2032. return 0;
  2033. }
  2034. EXPORT_SYMBOL(scsi_device_quiesce);
  2035. /**
  2036. * scsi_device_resume - Restart user issued commands to a quiesced device.
  2037. * @sdev: scsi device to resume.
  2038. *
  2039. * Moves the device from quiesced back to running and restarts the
  2040. * queues.
  2041. *
  2042. * Must be called with user context, may sleep.
  2043. */
  2044. void
  2045. scsi_device_resume(struct scsi_device *sdev)
  2046. {
  2047. if(scsi_device_set_state(sdev, SDEV_RUNNING))
  2048. return;
  2049. scsi_run_queue(sdev->request_queue);
  2050. }
  2051. EXPORT_SYMBOL(scsi_device_resume);
  2052. static void
  2053. device_quiesce_fn(struct scsi_device *sdev, void *data)
  2054. {
  2055. scsi_device_quiesce(sdev);
  2056. }
  2057. void
  2058. scsi_target_quiesce(struct scsi_target *starget)
  2059. {
  2060. starget_for_each_device(starget, NULL, device_quiesce_fn);
  2061. }
  2062. EXPORT_SYMBOL(scsi_target_quiesce);
  2063. static void
  2064. device_resume_fn(struct scsi_device *sdev, void *data)
  2065. {
  2066. scsi_device_resume(sdev);
  2067. }
  2068. void
  2069. scsi_target_resume(struct scsi_target *starget)
  2070. {
  2071. starget_for_each_device(starget, NULL, device_resume_fn);
  2072. }
  2073. EXPORT_SYMBOL(scsi_target_resume);
  2074. /**
  2075. * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
  2076. * @sdev: device to block
  2077. *
  2078. * Block request made by scsi lld's to temporarily stop all
  2079. * scsi commands on the specified device. Called from interrupt
  2080. * or normal process context.
  2081. *
  2082. * Returns zero if successful or error if not
  2083. *
  2084. * Notes:
  2085. * This routine transitions the device to the SDEV_BLOCK state
  2086. * (which must be a legal transition). When the device is in this
  2087. * state, all commands are deferred until the scsi lld reenables
  2088. * the device with scsi_device_unblock or device_block_tmo fires.
  2089. * This routine assumes the host_lock is held on entry.
  2090. */
  2091. int
  2092. scsi_internal_device_block(struct scsi_device *sdev)
  2093. {
  2094. struct request_queue *q = sdev->request_queue;
  2095. unsigned long flags;
  2096. int err = 0;
  2097. err = scsi_device_set_state(sdev, SDEV_BLOCK);
  2098. if (err) {
  2099. err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
  2100. if (err)
  2101. return err;
  2102. }
  2103. /*
  2104. * The device has transitioned to SDEV_BLOCK. Stop the
  2105. * block layer from calling the midlayer with this device's
  2106. * request queue.
  2107. */
  2108. spin_lock_irqsave(q->queue_lock, flags);
  2109. blk_stop_queue(q);
  2110. spin_unlock_irqrestore(q->queue_lock, flags);
  2111. return 0;
  2112. }
  2113. EXPORT_SYMBOL_GPL(scsi_internal_device_block);
  2114. /**
  2115. * scsi_internal_device_unblock - resume a device after a block request
  2116. * @sdev: device to resume
  2117. *
  2118. * Called by scsi lld's or the midlayer to restart the device queue
  2119. * for the previously suspended scsi device. Called from interrupt or
  2120. * normal process context.
  2121. *
  2122. * Returns zero if successful or error if not.
  2123. *
  2124. * Notes:
  2125. * This routine transitions the device to the SDEV_RUNNING state
  2126. * (which must be a legal transition) allowing the midlayer to
  2127. * goose the queue for this device. This routine assumes the
  2128. * host_lock is held upon entry.
  2129. */
  2130. int
  2131. scsi_internal_device_unblock(struct scsi_device *sdev)
  2132. {
  2133. struct request_queue *q = sdev->request_queue;
  2134. unsigned long flags;
  2135. /*
  2136. * Try to transition the scsi device to SDEV_RUNNING
  2137. * and goose the device queue if successful.
  2138. */
  2139. if (sdev->sdev_state == SDEV_BLOCK)
  2140. sdev->sdev_state = SDEV_RUNNING;
  2141. else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
  2142. sdev->sdev_state = SDEV_CREATED;
  2143. else if (sdev->sdev_state != SDEV_CANCEL &&
  2144. sdev->sdev_state != SDEV_OFFLINE)
  2145. return -EINVAL;
  2146. spin_lock_irqsave(q->queue_lock, flags);
  2147. blk_start_queue(q);
  2148. spin_unlock_irqrestore(q->queue_lock, flags);
  2149. return 0;
  2150. }
  2151. EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
  2152. static void
  2153. device_block(struct scsi_device *sdev, void *data)
  2154. {
  2155. scsi_internal_device_block(sdev);
  2156. }
  2157. static int
  2158. target_block(struct device *dev, void *data)
  2159. {
  2160. if (scsi_is_target_device(dev))
  2161. starget_for_each_device(to_scsi_target(dev), NULL,
  2162. device_block);
  2163. return 0;
  2164. }
  2165. void
  2166. scsi_target_block(struct device *dev)
  2167. {
  2168. if (scsi_is_target_device(dev))
  2169. starget_for_each_device(to_scsi_target(dev), NULL,
  2170. device_block);
  2171. else
  2172. device_for_each_child(dev, NULL, target_block);
  2173. }
  2174. EXPORT_SYMBOL_GPL(scsi_target_block);
  2175. static void
  2176. device_unblock(struct scsi_device *sdev, void *data)
  2177. {
  2178. scsi_internal_device_unblock(sdev);
  2179. }
  2180. static int
  2181. target_unblock(struct device *dev, void *data)
  2182. {
  2183. if (scsi_is_target_device(dev))
  2184. starget_for_each_device(to_scsi_target(dev), NULL,
  2185. device_unblock);
  2186. return 0;
  2187. }
  2188. void
  2189. scsi_target_unblock(struct device *dev)
  2190. {
  2191. if (scsi_is_target_device(dev))
  2192. starget_for_each_device(to_scsi_target(dev), NULL,
  2193. device_unblock);
  2194. else
  2195. device_for_each_child(dev, NULL, target_unblock);
  2196. }
  2197. EXPORT_SYMBOL_GPL(scsi_target_unblock);
  2198. /**
  2199. * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
  2200. * @sgl: scatter-gather list
  2201. * @sg_count: number of segments in sg
  2202. * @offset: offset in bytes into sg, on return offset into the mapped area
  2203. * @len: bytes to map, on return number of bytes mapped
  2204. *
  2205. * Returns virtual address of the start of the mapped page
  2206. */
  2207. void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
  2208. size_t *offset, size_t *len)
  2209. {
  2210. int i;
  2211. size_t sg_len = 0, len_complete = 0;
  2212. struct scatterlist *sg;
  2213. struct page *page;
  2214. WARN_ON(!irqs_disabled());
  2215. for_each_sg(sgl, sg, sg_count, i) {
  2216. len_complete = sg_len; /* Complete sg-entries */
  2217. sg_len += sg->length;
  2218. if (sg_len > *offset)
  2219. break;
  2220. }
  2221. if (unlikely(i == sg_count)) {
  2222. printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
  2223. "elements %d\n",
  2224. __func__, sg_len, *offset, sg_count);
  2225. WARN_ON(1);
  2226. return NULL;
  2227. }
  2228. /* Offset starting from the beginning of first page in this sg-entry */
  2229. *offset = *offset - len_complete + sg->offset;
  2230. /* Assumption: contiguous pages can be accessed as "page + i" */
  2231. page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
  2232. *offset &= ~PAGE_MASK;
  2233. /* Bytes in this sg-entry from *offset to the end of the page */
  2234. sg_len = PAGE_SIZE - *offset;
  2235. if (*len > sg_len)
  2236. *len = sg_len;
  2237. return kmap_atomic(page, KM_BIO_SRC_IRQ);
  2238. }
  2239. EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  2240. /**
  2241. * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
  2242. * @virt: virtual address to be unmapped
  2243. */
  2244. void scsi_kunmap_atomic_sg(void *virt)
  2245. {
  2246. kunmap_atomic(virt, KM_BIO_SRC_IRQ);
  2247. }
  2248. EXPORT_SYMBOL(scsi_kunmap_atomic_sg);