scsi_lib.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583
  1. /*
  2. * scsi_lib.c Copyright (C) 1999 Eric Youngdale
  3. *
  4. * SCSI queueing library.
  5. * Initial versions: Eric Youngdale (eric@andante.org).
  6. * Based upon conversations with large numbers
  7. * of people at Linux Expo.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/bitops.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/completion.h>
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/mempool.h>
  16. #include <linux/slab.h>
  17. #include <linux/init.h>
  18. #include <linux/pci.h>
  19. #include <linux/delay.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/scatterlist.h>
  22. #include <scsi/scsi.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <scsi/scsi_dbg.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_driver.h>
  27. #include <scsi/scsi_eh.h>
  28. #include <scsi/scsi_host.h>
  29. #include "scsi_priv.h"
  30. #include "scsi_logging.h"
  31. #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
  32. #define SG_MEMPOOL_SIZE 2
  33. struct scsi_host_sg_pool {
  34. size_t size;
  35. char *name;
  36. struct kmem_cache *slab;
  37. mempool_t *pool;
  38. };
  39. #define SP(x) { x, "sgpool-" __stringify(x) }
  40. #if (SCSI_MAX_SG_SEGMENTS < 32)
  41. #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
  42. #endif
  43. static struct scsi_host_sg_pool scsi_sg_pools[] = {
  44. SP(8),
  45. SP(16),
  46. #if (SCSI_MAX_SG_SEGMENTS > 32)
  47. SP(32),
  48. #if (SCSI_MAX_SG_SEGMENTS > 64)
  49. SP(64),
  50. #if (SCSI_MAX_SG_SEGMENTS > 128)
  51. SP(128),
  52. #if (SCSI_MAX_SG_SEGMENTS > 256)
  53. #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
  54. #endif
  55. #endif
  56. #endif
  57. #endif
  58. SP(SCSI_MAX_SG_SEGMENTS)
  59. };
  60. #undef SP
  61. struct kmem_cache *scsi_sdb_cache;
  62. /*
  63. * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  64. * not change behaviour from the previous unplug mechanism, experimentation
  65. * may prove this needs changing.
  66. */
  67. #define SCSI_QUEUE_DELAY 3
  68. /*
  69. * Function: scsi_unprep_request()
  70. *
  71. * Purpose: Remove all preparation done for a request, including its
  72. * associated scsi_cmnd, so that it can be requeued.
  73. *
  74. * Arguments: req - request to unprepare
  75. *
  76. * Lock status: Assumed that no locks are held upon entry.
  77. *
  78. * Returns: Nothing.
  79. */
  80. static void scsi_unprep_request(struct request *req)
  81. {
  82. struct scsi_cmnd *cmd = req->special;
  83. blk_unprep_request(req);
  84. req->special = NULL;
  85. scsi_put_command(cmd);
  86. }
  87. /**
  88. * __scsi_queue_insert - private queue insertion
  89. * @cmd: The SCSI command being requeued
  90. * @reason: The reason for the requeue
  91. * @unbusy: Whether the queue should be unbusied
  92. *
  93. * This is a private queue insertion. The public interface
  94. * scsi_queue_insert() always assumes the queue should be unbusied
  95. * because it's always called before the completion. This function is
  96. * for a requeue after completion, which should only occur in this
  97. * file.
  98. */
  99. static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
  100. {
  101. struct Scsi_Host *host = cmd->device->host;
  102. struct scsi_device *device = cmd->device;
  103. struct scsi_target *starget = scsi_target(device);
  104. struct request_queue *q = device->request_queue;
  105. unsigned long flags;
  106. SCSI_LOG_MLQUEUE(1,
  107. printk("Inserting command %p into mlqueue\n", cmd));
  108. /*
  109. * Set the appropriate busy bit for the device/host.
  110. *
  111. * If the host/device isn't busy, assume that something actually
  112. * completed, and that we should be able to queue a command now.
  113. *
  114. * Note that the prior mid-layer assumption that any host could
  115. * always queue at least one command is now broken. The mid-layer
  116. * will implement a user specifiable stall (see
  117. * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  118. * if a command is requeued with no other commands outstanding
  119. * either for the device or for the host.
  120. */
  121. switch (reason) {
  122. case SCSI_MLQUEUE_HOST_BUSY:
  123. host->host_blocked = host->max_host_blocked;
  124. break;
  125. case SCSI_MLQUEUE_DEVICE_BUSY:
  126. case SCSI_MLQUEUE_EH_RETRY:
  127. device->device_blocked = device->max_device_blocked;
  128. break;
  129. case SCSI_MLQUEUE_TARGET_BUSY:
  130. starget->target_blocked = starget->max_target_blocked;
  131. break;
  132. }
  133. /*
  134. * Decrement the counters, since these commands are no longer
  135. * active on the host/device.
  136. */
  137. if (unbusy)
  138. scsi_device_unbusy(device);
  139. /*
  140. * Requeue this command. It will go before all other commands
  141. * that are already in the queue.
  142. */
  143. spin_lock_irqsave(q->queue_lock, flags);
  144. blk_requeue_request(q, cmd->request);
  145. spin_unlock_irqrestore(q->queue_lock, flags);
  146. kblockd_schedule_work(q, &device->requeue_work);
  147. return 0;
  148. }
  149. /*
  150. * Function: scsi_queue_insert()
  151. *
  152. * Purpose: Insert a command in the midlevel queue.
  153. *
  154. * Arguments: cmd - command that we are adding to queue.
  155. * reason - why we are inserting command to queue.
  156. *
  157. * Lock status: Assumed that lock is not held upon entry.
  158. *
  159. * Returns: Nothing.
  160. *
  161. * Notes: We do this for one of two cases. Either the host is busy
  162. * and it cannot accept any more commands for the time being,
  163. * or the device returned QUEUE_FULL and can accept no more
  164. * commands.
  165. * Notes: This could be called either from an interrupt context or a
  166. * normal process context.
  167. */
  168. int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  169. {
  170. return __scsi_queue_insert(cmd, reason, 1);
  171. }
  172. /**
  173. * scsi_execute - insert request and wait for the result
  174. * @sdev: scsi device
  175. * @cmd: scsi command
  176. * @data_direction: data direction
  177. * @buffer: data buffer
  178. * @bufflen: len of buffer
  179. * @sense: optional sense buffer
  180. * @timeout: request timeout in seconds
  181. * @retries: number of times to retry request
  182. * @flags: or into request flags;
  183. * @resid: optional residual length
  184. *
  185. * returns the req->errors value which is the scsi_cmnd result
  186. * field.
  187. */
  188. int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
  189. int data_direction, void *buffer, unsigned bufflen,
  190. unsigned char *sense, int timeout, int retries, int flags,
  191. int *resid)
  192. {
  193. struct request *req;
  194. int write = (data_direction == DMA_TO_DEVICE);
  195. int ret = DRIVER_ERROR << 24;
  196. req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
  197. if (!req)
  198. return ret;
  199. if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
  200. buffer, bufflen, __GFP_WAIT))
  201. goto out;
  202. req->cmd_len = COMMAND_SIZE(cmd[0]);
  203. memcpy(req->cmd, cmd, req->cmd_len);
  204. req->sense = sense;
  205. req->sense_len = 0;
  206. req->retries = retries;
  207. req->timeout = timeout;
  208. req->cmd_type = REQ_TYPE_BLOCK_PC;
  209. req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
  210. /*
  211. * head injection *required* here otherwise quiesce won't work
  212. */
  213. blk_execute_rq(req->q, NULL, req, 1);
  214. /*
  215. * Some devices (USB mass-storage in particular) may transfer
  216. * garbage data together with a residue indicating that the data
  217. * is invalid. Prevent the garbage from being misinterpreted
  218. * and prevent security leaks by zeroing out the excess data.
  219. */
  220. if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
  221. memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
  222. if (resid)
  223. *resid = req->resid_len;
  224. ret = req->errors;
  225. out:
  226. blk_put_request(req);
  227. return ret;
  228. }
  229. EXPORT_SYMBOL(scsi_execute);
  230. int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
  231. int data_direction, void *buffer, unsigned bufflen,
  232. struct scsi_sense_hdr *sshdr, int timeout, int retries,
  233. int *resid)
  234. {
  235. char *sense = NULL;
  236. int result;
  237. if (sshdr) {
  238. sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
  239. if (!sense)
  240. return DRIVER_ERROR << 24;
  241. }
  242. result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
  243. sense, timeout, retries, 0, resid);
  244. if (sshdr)
  245. scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
  246. kfree(sense);
  247. return result;
  248. }
  249. EXPORT_SYMBOL(scsi_execute_req);
  250. /*
  251. * Function: scsi_init_cmd_errh()
  252. *
  253. * Purpose: Initialize cmd fields related to error handling.
  254. *
  255. * Arguments: cmd - command that is ready to be queued.
  256. *
  257. * Notes: This function has the job of initializing a number of
  258. * fields related to error handling. Typically this will
  259. * be called once for each command, as required.
  260. */
  261. static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
  262. {
  263. cmd->serial_number = 0;
  264. scsi_set_resid(cmd, 0);
  265. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  266. if (cmd->cmd_len == 0)
  267. cmd->cmd_len = scsi_command_size(cmd->cmnd);
  268. }
  269. void scsi_device_unbusy(struct scsi_device *sdev)
  270. {
  271. struct Scsi_Host *shost = sdev->host;
  272. struct scsi_target *starget = scsi_target(sdev);
  273. unsigned long flags;
  274. spin_lock_irqsave(shost->host_lock, flags);
  275. shost->host_busy--;
  276. starget->target_busy--;
  277. if (unlikely(scsi_host_in_recovery(shost) &&
  278. (shost->host_failed || shost->host_eh_scheduled)))
  279. scsi_eh_wakeup(shost);
  280. spin_unlock(shost->host_lock);
  281. spin_lock(sdev->request_queue->queue_lock);
  282. sdev->device_busy--;
  283. spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
  284. }
  285. /*
  286. * Called for single_lun devices on IO completion. Clear starget_sdev_user,
  287. * and call blk_run_queue for all the scsi_devices on the target -
  288. * including current_sdev first.
  289. *
  290. * Called with *no* scsi locks held.
  291. */
  292. static void scsi_single_lun_run(struct scsi_device *current_sdev)
  293. {
  294. struct Scsi_Host *shost = current_sdev->host;
  295. struct scsi_device *sdev, *tmp;
  296. struct scsi_target *starget = scsi_target(current_sdev);
  297. unsigned long flags;
  298. spin_lock_irqsave(shost->host_lock, flags);
  299. starget->starget_sdev_user = NULL;
  300. spin_unlock_irqrestore(shost->host_lock, flags);
  301. /*
  302. * Call blk_run_queue for all LUNs on the target, starting with
  303. * current_sdev. We race with others (to set starget_sdev_user),
  304. * but in most cases, we will be first. Ideally, each LU on the
  305. * target would get some limited time or requests on the target.
  306. */
  307. blk_run_queue(current_sdev->request_queue);
  308. spin_lock_irqsave(shost->host_lock, flags);
  309. if (starget->starget_sdev_user)
  310. goto out;
  311. list_for_each_entry_safe(sdev, tmp, &starget->devices,
  312. same_target_siblings) {
  313. if (sdev == current_sdev)
  314. continue;
  315. if (scsi_device_get(sdev))
  316. continue;
  317. spin_unlock_irqrestore(shost->host_lock, flags);
  318. blk_run_queue(sdev->request_queue);
  319. spin_lock_irqsave(shost->host_lock, flags);
  320. scsi_device_put(sdev);
  321. }
  322. out:
  323. spin_unlock_irqrestore(shost->host_lock, flags);
  324. }
  325. static inline int scsi_device_is_busy(struct scsi_device *sdev)
  326. {
  327. if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
  328. return 1;
  329. return 0;
  330. }
  331. static inline int scsi_target_is_busy(struct scsi_target *starget)
  332. {
  333. return ((starget->can_queue > 0 &&
  334. starget->target_busy >= starget->can_queue) ||
  335. starget->target_blocked);
  336. }
  337. static inline int scsi_host_is_busy(struct Scsi_Host *shost)
  338. {
  339. if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
  340. shost->host_blocked || shost->host_self_blocked)
  341. return 1;
  342. return 0;
  343. }
  344. /*
  345. * Function: scsi_run_queue()
  346. *
  347. * Purpose: Select a proper request queue to serve next
  348. *
  349. * Arguments: q - last request's queue
  350. *
  351. * Returns: Nothing
  352. *
  353. * Notes: The previous command was completely finished, start
  354. * a new one if possible.
  355. */
  356. static void scsi_run_queue(struct request_queue *q)
  357. {
  358. struct scsi_device *sdev = q->queuedata;
  359. struct Scsi_Host *shost;
  360. LIST_HEAD(starved_list);
  361. unsigned long flags;
  362. /* if the device is dead, sdev will be NULL, so no queue to run */
  363. if (!sdev)
  364. return;
  365. shost = sdev->host;
  366. if (scsi_target(sdev)->single_lun)
  367. scsi_single_lun_run(sdev);
  368. spin_lock_irqsave(shost->host_lock, flags);
  369. list_splice_init(&shost->starved_list, &starved_list);
  370. while (!list_empty(&starved_list)) {
  371. /*
  372. * As long as shost is accepting commands and we have
  373. * starved queues, call blk_run_queue. scsi_request_fn
  374. * drops the queue_lock and can add us back to the
  375. * starved_list.
  376. *
  377. * host_lock protects the starved_list and starved_entry.
  378. * scsi_request_fn must get the host_lock before checking
  379. * or modifying starved_list or starved_entry.
  380. */
  381. if (scsi_host_is_busy(shost))
  382. break;
  383. sdev = list_entry(starved_list.next,
  384. struct scsi_device, starved_entry);
  385. list_del_init(&sdev->starved_entry);
  386. if (scsi_target_is_busy(scsi_target(sdev))) {
  387. list_move_tail(&sdev->starved_entry,
  388. &shost->starved_list);
  389. continue;
  390. }
  391. spin_unlock(shost->host_lock);
  392. spin_lock(sdev->request_queue->queue_lock);
  393. __blk_run_queue(sdev->request_queue);
  394. spin_unlock(sdev->request_queue->queue_lock);
  395. spin_lock(shost->host_lock);
  396. }
  397. /* put any unprocessed entries back */
  398. list_splice(&starved_list, &shost->starved_list);
  399. spin_unlock_irqrestore(shost->host_lock, flags);
  400. blk_run_queue(q);
  401. }
  402. void scsi_requeue_run_queue(struct work_struct *work)
  403. {
  404. struct scsi_device *sdev;
  405. struct request_queue *q;
  406. sdev = container_of(work, struct scsi_device, requeue_work);
  407. q = sdev->request_queue;
  408. scsi_run_queue(q);
  409. }
  410. /*
  411. * Function: scsi_requeue_command()
  412. *
  413. * Purpose: Handle post-processing of completed commands.
  414. *
  415. * Arguments: q - queue to operate on
  416. * cmd - command that may need to be requeued.
  417. *
  418. * Returns: Nothing
  419. *
  420. * Notes: After command completion, there may be blocks left
  421. * over which weren't finished by the previous command
  422. * this can be for a number of reasons - the main one is
  423. * I/O errors in the middle of the request, in which case
  424. * we need to request the blocks that come after the bad
  425. * sector.
  426. * Notes: Upon return, cmd is a stale pointer.
  427. */
  428. static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
  429. {
  430. struct request *req = cmd->request;
  431. unsigned long flags;
  432. spin_lock_irqsave(q->queue_lock, flags);
  433. scsi_unprep_request(req);
  434. blk_requeue_request(q, req);
  435. spin_unlock_irqrestore(q->queue_lock, flags);
  436. scsi_run_queue(q);
  437. }
  438. void scsi_next_command(struct scsi_cmnd *cmd)
  439. {
  440. struct scsi_device *sdev = cmd->device;
  441. struct request_queue *q = sdev->request_queue;
  442. /* need to hold a reference on the device before we let go of the cmd */
  443. get_device(&sdev->sdev_gendev);
  444. scsi_put_command(cmd);
  445. scsi_run_queue(q);
  446. /* ok to remove device now */
  447. put_device(&sdev->sdev_gendev);
  448. }
  449. void scsi_run_host_queues(struct Scsi_Host *shost)
  450. {
  451. struct scsi_device *sdev;
  452. shost_for_each_device(sdev, shost)
  453. scsi_run_queue(sdev->request_queue);
  454. }
  455. static void __scsi_release_buffers(struct scsi_cmnd *, int);
  456. /*
  457. * Function: scsi_end_request()
  458. *
  459. * Purpose: Post-processing of completed commands (usually invoked at end
  460. * of upper level post-processing and scsi_io_completion).
  461. *
  462. * Arguments: cmd - command that is complete.
  463. * error - 0 if I/O indicates success, < 0 for I/O error.
  464. * bytes - number of bytes of completed I/O
  465. * requeue - indicates whether we should requeue leftovers.
  466. *
  467. * Lock status: Assumed that lock is not held upon entry.
  468. *
  469. * Returns: cmd if requeue required, NULL otherwise.
  470. *
  471. * Notes: This is called for block device requests in order to
  472. * mark some number of sectors as complete.
  473. *
  474. * We are guaranteeing that the request queue will be goosed
  475. * at some point during this call.
  476. * Notes: If cmd was requeued, upon return it will be a stale pointer.
  477. */
  478. static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
  479. int bytes, int requeue)
  480. {
  481. struct request_queue *q = cmd->device->request_queue;
  482. struct request *req = cmd->request;
  483. /*
  484. * If there are blocks left over at the end, set up the command
  485. * to queue the remainder of them.
  486. */
  487. if (blk_end_request(req, error, bytes)) {
  488. /* kill remainder if no retrys */
  489. if (error && scsi_noretry_cmd(cmd))
  490. blk_end_request_all(req, error);
  491. else {
  492. if (requeue) {
  493. /*
  494. * Bleah. Leftovers again. Stick the
  495. * leftovers in the front of the
  496. * queue, and goose the queue again.
  497. */
  498. scsi_release_buffers(cmd);
  499. scsi_requeue_command(q, cmd);
  500. cmd = NULL;
  501. }
  502. return cmd;
  503. }
  504. }
  505. /*
  506. * This will goose the queue request function at the end, so we don't
  507. * need to worry about launching another command.
  508. */
  509. __scsi_release_buffers(cmd, 0);
  510. scsi_next_command(cmd);
  511. return NULL;
  512. }
  513. static inline unsigned int scsi_sgtable_index(unsigned short nents)
  514. {
  515. unsigned int index;
  516. BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
  517. if (nents <= 8)
  518. index = 0;
  519. else
  520. index = get_count_order(nents) - 3;
  521. return index;
  522. }
  523. static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
  524. {
  525. struct scsi_host_sg_pool *sgp;
  526. sgp = scsi_sg_pools + scsi_sgtable_index(nents);
  527. mempool_free(sgl, sgp->pool);
  528. }
  529. static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
  530. {
  531. struct scsi_host_sg_pool *sgp;
  532. sgp = scsi_sg_pools + scsi_sgtable_index(nents);
  533. return mempool_alloc(sgp->pool, gfp_mask);
  534. }
  535. static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
  536. gfp_t gfp_mask)
  537. {
  538. int ret;
  539. BUG_ON(!nents);
  540. ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
  541. gfp_mask, scsi_sg_alloc);
  542. if (unlikely(ret))
  543. __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
  544. scsi_sg_free);
  545. return ret;
  546. }
  547. static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
  548. {
  549. __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
  550. }
  551. static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
  552. {
  553. if (cmd->sdb.table.nents)
  554. scsi_free_sgtable(&cmd->sdb);
  555. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  556. if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
  557. struct scsi_data_buffer *bidi_sdb =
  558. cmd->request->next_rq->special;
  559. scsi_free_sgtable(bidi_sdb);
  560. kmem_cache_free(scsi_sdb_cache, bidi_sdb);
  561. cmd->request->next_rq->special = NULL;
  562. }
  563. if (scsi_prot_sg_count(cmd))
  564. scsi_free_sgtable(cmd->prot_sdb);
  565. }
  566. /*
  567. * Function: scsi_release_buffers()
  568. *
  569. * Purpose: Completion processing for block device I/O requests.
  570. *
  571. * Arguments: cmd - command that we are bailing.
  572. *
  573. * Lock status: Assumed that no lock is held upon entry.
  574. *
  575. * Returns: Nothing
  576. *
  577. * Notes: In the event that an upper level driver rejects a
  578. * command, we must release resources allocated during
  579. * the __init_io() function. Primarily this would involve
  580. * the scatter-gather table, and potentially any bounce
  581. * buffers.
  582. */
  583. void scsi_release_buffers(struct scsi_cmnd *cmd)
  584. {
  585. __scsi_release_buffers(cmd, 1);
  586. }
  587. EXPORT_SYMBOL(scsi_release_buffers);
  588. static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
  589. {
  590. int error = 0;
  591. switch(host_byte(result)) {
  592. case DID_TRANSPORT_FAILFAST:
  593. error = -ENOLINK;
  594. break;
  595. case DID_TARGET_FAILURE:
  596. set_host_byte(cmd, DID_OK);
  597. error = -EREMOTEIO;
  598. break;
  599. case DID_NEXUS_FAILURE:
  600. set_host_byte(cmd, DID_OK);
  601. error = -EBADE;
  602. break;
  603. default:
  604. error = -EIO;
  605. break;
  606. }
  607. return error;
  608. }
  609. /*
  610. * Function: scsi_io_completion()
  611. *
  612. * Purpose: Completion processing for block device I/O requests.
  613. *
  614. * Arguments: cmd - command that is finished.
  615. *
  616. * Lock status: Assumed that no lock is held upon entry.
  617. *
  618. * Returns: Nothing
  619. *
  620. * Notes: This function is matched in terms of capabilities to
  621. * the function that created the scatter-gather list.
  622. * In other words, if there are no bounce buffers
  623. * (the normal case for most drivers), we don't need
  624. * the logic to deal with cleaning up afterwards.
  625. *
  626. * We must call scsi_end_request(). This will finish off
  627. * the specified number of sectors. If we are done, the
  628. * command block will be released and the queue function
  629. * will be goosed. If we are not done then we have to
  630. * figure out what to do next:
  631. *
  632. * a) We can call scsi_requeue_command(). The request
  633. * will be unprepared and put back on the queue. Then
  634. * a new command will be created for it. This should
  635. * be used if we made forward progress, or if we want
  636. * to switch from READ(10) to READ(6) for example.
  637. *
  638. * b) We can call scsi_queue_insert(). The request will
  639. * be put back on the queue and retried using the same
  640. * command as before, possibly after a delay.
  641. *
  642. * c) We can call blk_end_request() with -EIO to fail
  643. * the remainder of the request.
  644. */
  645. void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  646. {
  647. int result = cmd->result;
  648. struct request_queue *q = cmd->device->request_queue;
  649. struct request *req = cmd->request;
  650. int error = 0;
  651. struct scsi_sense_hdr sshdr;
  652. int sense_valid = 0;
  653. int sense_deferred = 0;
  654. enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
  655. ACTION_DELAYED_RETRY} action;
  656. char *description = NULL;
  657. if (result) {
  658. sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
  659. if (sense_valid)
  660. sense_deferred = scsi_sense_is_deferred(&sshdr);
  661. }
  662. if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
  663. req->errors = result;
  664. if (result) {
  665. if (sense_valid && req->sense) {
  666. /*
  667. * SG_IO wants current and deferred errors
  668. */
  669. int len = 8 + cmd->sense_buffer[7];
  670. if (len > SCSI_SENSE_BUFFERSIZE)
  671. len = SCSI_SENSE_BUFFERSIZE;
  672. memcpy(req->sense, cmd->sense_buffer, len);
  673. req->sense_len = len;
  674. }
  675. if (!sense_deferred)
  676. error = __scsi_error_from_host_byte(cmd, result);
  677. }
  678. req->resid_len = scsi_get_resid(cmd);
  679. if (scsi_bidi_cmnd(cmd)) {
  680. /*
  681. * Bidi commands Must be complete as a whole,
  682. * both sides at once.
  683. */
  684. req->next_rq->resid_len = scsi_in(cmd)->resid;
  685. scsi_release_buffers(cmd);
  686. blk_end_request_all(req, 0);
  687. scsi_next_command(cmd);
  688. return;
  689. }
  690. }
  691. /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
  692. BUG_ON(blk_bidi_rq(req));
  693. /*
  694. * Next deal with any sectors which we were able to correctly
  695. * handle.
  696. */
  697. SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
  698. "%d bytes done.\n",
  699. blk_rq_sectors(req), good_bytes));
  700. /*
  701. * Recovered errors need reporting, but they're always treated
  702. * as success, so fiddle the result code here. For BLOCK_PC
  703. * we already took a copy of the original into rq->errors which
  704. * is what gets returned to the user
  705. */
  706. if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
  707. /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
  708. * print since caller wants ATA registers. Only occurs on
  709. * SCSI ATA PASS_THROUGH commands when CK_COND=1
  710. */
  711. if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
  712. ;
  713. else if (!(req->cmd_flags & REQ_QUIET))
  714. scsi_print_sense("", cmd);
  715. result = 0;
  716. /* BLOCK_PC may have set error */
  717. error = 0;
  718. }
  719. /*
  720. * A number of bytes were successfully read. If there
  721. * are leftovers and there is some kind of error
  722. * (result != 0), retry the rest.
  723. */
  724. if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
  725. return;
  726. error = __scsi_error_from_host_byte(cmd, result);
  727. if (host_byte(result) == DID_RESET) {
  728. /* Third party bus reset or reset for error recovery
  729. * reasons. Just retry the command and see what
  730. * happens.
  731. */
  732. action = ACTION_RETRY;
  733. } else if (sense_valid && !sense_deferred) {
  734. switch (sshdr.sense_key) {
  735. case UNIT_ATTENTION:
  736. if (cmd->device->removable) {
  737. /* Detected disc change. Set a bit
  738. * and quietly refuse further access.
  739. */
  740. cmd->device->changed = 1;
  741. description = "Media Changed";
  742. action = ACTION_FAIL;
  743. } else {
  744. /* Must have been a power glitch, or a
  745. * bus reset. Could not have been a
  746. * media change, so we just retry the
  747. * command and see what happens.
  748. */
  749. action = ACTION_RETRY;
  750. }
  751. break;
  752. case ILLEGAL_REQUEST:
  753. /* If we had an ILLEGAL REQUEST returned, then
  754. * we may have performed an unsupported
  755. * command. The only thing this should be
  756. * would be a ten byte read where only a six
  757. * byte read was supported. Also, on a system
  758. * where READ CAPACITY failed, we may have
  759. * read past the end of the disk.
  760. */
  761. if ((cmd->device->use_10_for_rw &&
  762. sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
  763. (cmd->cmnd[0] == READ_10 ||
  764. cmd->cmnd[0] == WRITE_10)) {
  765. /* This will issue a new 6-byte command. */
  766. cmd->device->use_10_for_rw = 0;
  767. action = ACTION_REPREP;
  768. } else if (sshdr.asc == 0x10) /* DIX */ {
  769. description = "Host Data Integrity Failure";
  770. action = ACTION_FAIL;
  771. error = -EILSEQ;
  772. /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
  773. } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
  774. (cmd->cmnd[0] == UNMAP ||
  775. cmd->cmnd[0] == WRITE_SAME_16 ||
  776. cmd->cmnd[0] == WRITE_SAME)) {
  777. description = "Discard failure";
  778. action = ACTION_FAIL;
  779. error = -EREMOTEIO;
  780. } else
  781. action = ACTION_FAIL;
  782. break;
  783. case ABORTED_COMMAND:
  784. action = ACTION_FAIL;
  785. if (sshdr.asc == 0x10) { /* DIF */
  786. description = "Target Data Integrity Failure";
  787. error = -EILSEQ;
  788. }
  789. break;
  790. case NOT_READY:
  791. /* If the device is in the process of becoming
  792. * ready, or has a temporary blockage, retry.
  793. */
  794. if (sshdr.asc == 0x04) {
  795. switch (sshdr.ascq) {
  796. case 0x01: /* becoming ready */
  797. case 0x04: /* format in progress */
  798. case 0x05: /* rebuild in progress */
  799. case 0x06: /* recalculation in progress */
  800. case 0x07: /* operation in progress */
  801. case 0x08: /* Long write in progress */
  802. case 0x09: /* self test in progress */
  803. case 0x14: /* space allocation in progress */
  804. action = ACTION_DELAYED_RETRY;
  805. break;
  806. default:
  807. description = "Device not ready";
  808. action = ACTION_FAIL;
  809. break;
  810. }
  811. } else {
  812. description = "Device not ready";
  813. action = ACTION_FAIL;
  814. }
  815. break;
  816. case VOLUME_OVERFLOW:
  817. /* See SSC3rXX or current. */
  818. action = ACTION_FAIL;
  819. break;
  820. default:
  821. description = "Unhandled sense code";
  822. action = ACTION_FAIL;
  823. break;
  824. }
  825. } else {
  826. description = "Unhandled error code";
  827. action = ACTION_FAIL;
  828. }
  829. switch (action) {
  830. case ACTION_FAIL:
  831. /* Give up and fail the remainder of the request */
  832. scsi_release_buffers(cmd);
  833. if (!(req->cmd_flags & REQ_QUIET)) {
  834. if (description)
  835. scmd_printk(KERN_INFO, cmd, "%s\n",
  836. description);
  837. scsi_print_result(cmd);
  838. if (driver_byte(result) & DRIVER_SENSE)
  839. scsi_print_sense("", cmd);
  840. scsi_print_command(cmd);
  841. }
  842. if (blk_end_request_err(req, error))
  843. scsi_requeue_command(q, cmd);
  844. else
  845. scsi_next_command(cmd);
  846. break;
  847. case ACTION_REPREP:
  848. /* Unprep the request and put it back at the head of the queue.
  849. * A new command will be prepared and issued.
  850. */
  851. scsi_release_buffers(cmd);
  852. scsi_requeue_command(q, cmd);
  853. break;
  854. case ACTION_RETRY:
  855. /* Retry the same command immediately */
  856. __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
  857. break;
  858. case ACTION_DELAYED_RETRY:
  859. /* Retry the same command after a delay */
  860. __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
  861. break;
  862. }
  863. }
  864. static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
  865. gfp_t gfp_mask)
  866. {
  867. int count;
  868. /*
  869. * If sg table allocation fails, requeue request later.
  870. */
  871. if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
  872. gfp_mask))) {
  873. return BLKPREP_DEFER;
  874. }
  875. req->buffer = NULL;
  876. /*
  877. * Next, walk the list, and fill in the addresses and sizes of
  878. * each segment.
  879. */
  880. count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
  881. BUG_ON(count > sdb->table.nents);
  882. sdb->table.nents = count;
  883. sdb->length = blk_rq_bytes(req);
  884. return BLKPREP_OK;
  885. }
  886. /*
  887. * Function: scsi_init_io()
  888. *
  889. * Purpose: SCSI I/O initialize function.
  890. *
  891. * Arguments: cmd - Command descriptor we wish to initialize
  892. *
  893. * Returns: 0 on success
  894. * BLKPREP_DEFER if the failure is retryable
  895. * BLKPREP_KILL if the failure is fatal
  896. */
  897. int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
  898. {
  899. struct request *rq = cmd->request;
  900. int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
  901. if (error)
  902. goto err_exit;
  903. if (blk_bidi_rq(rq)) {
  904. struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
  905. scsi_sdb_cache, GFP_ATOMIC);
  906. if (!bidi_sdb) {
  907. error = BLKPREP_DEFER;
  908. goto err_exit;
  909. }
  910. rq->next_rq->special = bidi_sdb;
  911. error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
  912. if (error)
  913. goto err_exit;
  914. }
  915. if (blk_integrity_rq(rq)) {
  916. struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
  917. int ivecs, count;
  918. BUG_ON(prot_sdb == NULL);
  919. ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
  920. if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
  921. error = BLKPREP_DEFER;
  922. goto err_exit;
  923. }
  924. count = blk_rq_map_integrity_sg(rq->q, rq->bio,
  925. prot_sdb->table.sgl);
  926. BUG_ON(unlikely(count > ivecs));
  927. BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
  928. cmd->prot_sdb = prot_sdb;
  929. cmd->prot_sdb->table.nents = count;
  930. }
  931. return BLKPREP_OK ;
  932. err_exit:
  933. scsi_release_buffers(cmd);
  934. cmd->request->special = NULL;
  935. scsi_put_command(cmd);
  936. return error;
  937. }
  938. EXPORT_SYMBOL(scsi_init_io);
  939. static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
  940. struct request *req)
  941. {
  942. struct scsi_cmnd *cmd;
  943. if (!req->special) {
  944. cmd = scsi_get_command(sdev, GFP_ATOMIC);
  945. if (unlikely(!cmd))
  946. return NULL;
  947. req->special = cmd;
  948. } else {
  949. cmd = req->special;
  950. }
  951. /* pull a tag out of the request if we have one */
  952. cmd->tag = req->tag;
  953. cmd->request = req;
  954. cmd->cmnd = req->cmd;
  955. cmd->prot_op = SCSI_PROT_NORMAL;
  956. return cmd;
  957. }
  958. int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
  959. {
  960. struct scsi_cmnd *cmd;
  961. int ret = scsi_prep_state_check(sdev, req);
  962. if (ret != BLKPREP_OK)
  963. return ret;
  964. cmd = scsi_get_cmd_from_req(sdev, req);
  965. if (unlikely(!cmd))
  966. return BLKPREP_DEFER;
  967. /*
  968. * BLOCK_PC requests may transfer data, in which case they must
  969. * a bio attached to them. Or they might contain a SCSI command
  970. * that does not transfer data, in which case they may optionally
  971. * submit a request without an attached bio.
  972. */
  973. if (req->bio) {
  974. int ret;
  975. BUG_ON(!req->nr_phys_segments);
  976. ret = scsi_init_io(cmd, GFP_ATOMIC);
  977. if (unlikely(ret))
  978. return ret;
  979. } else {
  980. BUG_ON(blk_rq_bytes(req));
  981. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  982. req->buffer = NULL;
  983. }
  984. cmd->cmd_len = req->cmd_len;
  985. if (!blk_rq_bytes(req))
  986. cmd->sc_data_direction = DMA_NONE;
  987. else if (rq_data_dir(req) == WRITE)
  988. cmd->sc_data_direction = DMA_TO_DEVICE;
  989. else
  990. cmd->sc_data_direction = DMA_FROM_DEVICE;
  991. cmd->transfersize = blk_rq_bytes(req);
  992. cmd->allowed = req->retries;
  993. return BLKPREP_OK;
  994. }
  995. EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
  996. /*
  997. * Setup a REQ_TYPE_FS command. These are simple read/write request
  998. * from filesystems that still need to be translated to SCSI CDBs from
  999. * the ULD.
  1000. */
  1001. int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
  1002. {
  1003. struct scsi_cmnd *cmd;
  1004. int ret = scsi_prep_state_check(sdev, req);
  1005. if (ret != BLKPREP_OK)
  1006. return ret;
  1007. if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
  1008. && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
  1009. ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
  1010. if (ret != BLKPREP_OK)
  1011. return ret;
  1012. }
  1013. /*
  1014. * Filesystem requests must transfer data.
  1015. */
  1016. BUG_ON(!req->nr_phys_segments);
  1017. cmd = scsi_get_cmd_from_req(sdev, req);
  1018. if (unlikely(!cmd))
  1019. return BLKPREP_DEFER;
  1020. memset(cmd->cmnd, 0, BLK_MAX_CDB);
  1021. return scsi_init_io(cmd, GFP_ATOMIC);
  1022. }
  1023. EXPORT_SYMBOL(scsi_setup_fs_cmnd);
  1024. int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
  1025. {
  1026. int ret = BLKPREP_OK;
  1027. /*
  1028. * If the device is not in running state we will reject some
  1029. * or all commands.
  1030. */
  1031. if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
  1032. switch (sdev->sdev_state) {
  1033. case SDEV_OFFLINE:
  1034. /*
  1035. * If the device is offline we refuse to process any
  1036. * commands. The device must be brought online
  1037. * before trying any recovery commands.
  1038. */
  1039. sdev_printk(KERN_ERR, sdev,
  1040. "rejecting I/O to offline device\n");
  1041. ret = BLKPREP_KILL;
  1042. break;
  1043. case SDEV_DEL:
  1044. /*
  1045. * If the device is fully deleted, we refuse to
  1046. * process any commands as well.
  1047. */
  1048. sdev_printk(KERN_ERR, sdev,
  1049. "rejecting I/O to dead device\n");
  1050. ret = BLKPREP_KILL;
  1051. break;
  1052. case SDEV_QUIESCE:
  1053. case SDEV_BLOCK:
  1054. case SDEV_CREATED_BLOCK:
  1055. /*
  1056. * If the devices is blocked we defer normal commands.
  1057. */
  1058. if (!(req->cmd_flags & REQ_PREEMPT))
  1059. ret = BLKPREP_DEFER;
  1060. break;
  1061. default:
  1062. /*
  1063. * For any other not fully online state we only allow
  1064. * special commands. In particular any user initiated
  1065. * command is not allowed.
  1066. */
  1067. if (!(req->cmd_flags & REQ_PREEMPT))
  1068. ret = BLKPREP_KILL;
  1069. break;
  1070. }
  1071. }
  1072. return ret;
  1073. }
  1074. EXPORT_SYMBOL(scsi_prep_state_check);
  1075. int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
  1076. {
  1077. struct scsi_device *sdev = q->queuedata;
  1078. switch (ret) {
  1079. case BLKPREP_KILL:
  1080. req->errors = DID_NO_CONNECT << 16;
  1081. /* release the command and kill it */
  1082. if (req->special) {
  1083. struct scsi_cmnd *cmd = req->special;
  1084. scsi_release_buffers(cmd);
  1085. scsi_put_command(cmd);
  1086. req->special = NULL;
  1087. }
  1088. break;
  1089. case BLKPREP_DEFER:
  1090. /*
  1091. * If we defer, the blk_peek_request() returns NULL, but the
  1092. * queue must be restarted, so we schedule a callback to happen
  1093. * shortly.
  1094. */
  1095. if (sdev->device_busy == 0)
  1096. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1097. break;
  1098. default:
  1099. req->cmd_flags |= REQ_DONTPREP;
  1100. }
  1101. return ret;
  1102. }
  1103. EXPORT_SYMBOL(scsi_prep_return);
  1104. int scsi_prep_fn(struct request_queue *q, struct request *req)
  1105. {
  1106. struct scsi_device *sdev = q->queuedata;
  1107. int ret = BLKPREP_KILL;
  1108. if (req->cmd_type == REQ_TYPE_BLOCK_PC)
  1109. ret = scsi_setup_blk_pc_cmnd(sdev, req);
  1110. return scsi_prep_return(q, req, ret);
  1111. }
  1112. EXPORT_SYMBOL(scsi_prep_fn);
  1113. /*
  1114. * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
  1115. * return 0.
  1116. *
  1117. * Called with the queue_lock held.
  1118. */
  1119. static inline int scsi_dev_queue_ready(struct request_queue *q,
  1120. struct scsi_device *sdev)
  1121. {
  1122. if (sdev->device_busy == 0 && sdev->device_blocked) {
  1123. /*
  1124. * unblock after device_blocked iterates to zero
  1125. */
  1126. if (--sdev->device_blocked == 0) {
  1127. SCSI_LOG_MLQUEUE(3,
  1128. sdev_printk(KERN_INFO, sdev,
  1129. "unblocking device at zero depth\n"));
  1130. } else {
  1131. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1132. return 0;
  1133. }
  1134. }
  1135. if (scsi_device_is_busy(sdev))
  1136. return 0;
  1137. return 1;
  1138. }
  1139. /*
  1140. * scsi_target_queue_ready: checks if there we can send commands to target
  1141. * @sdev: scsi device on starget to check.
  1142. *
  1143. * Called with the host lock held.
  1144. */
  1145. static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
  1146. struct scsi_device *sdev)
  1147. {
  1148. struct scsi_target *starget = scsi_target(sdev);
  1149. if (starget->single_lun) {
  1150. if (starget->starget_sdev_user &&
  1151. starget->starget_sdev_user != sdev)
  1152. return 0;
  1153. starget->starget_sdev_user = sdev;
  1154. }
  1155. if (starget->target_busy == 0 && starget->target_blocked) {
  1156. /*
  1157. * unblock after target_blocked iterates to zero
  1158. */
  1159. if (--starget->target_blocked == 0) {
  1160. SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
  1161. "unblocking target at zero depth\n"));
  1162. } else
  1163. return 0;
  1164. }
  1165. if (scsi_target_is_busy(starget)) {
  1166. list_move_tail(&sdev->starved_entry, &shost->starved_list);
  1167. return 0;
  1168. }
  1169. return 1;
  1170. }
  1171. /*
  1172. * scsi_host_queue_ready: if we can send requests to shost, return 1 else
  1173. * return 0. We must end up running the queue again whenever 0 is
  1174. * returned, else IO can hang.
  1175. *
  1176. * Called with host_lock held.
  1177. */
  1178. static inline int scsi_host_queue_ready(struct request_queue *q,
  1179. struct Scsi_Host *shost,
  1180. struct scsi_device *sdev)
  1181. {
  1182. if (scsi_host_in_recovery(shost))
  1183. return 0;
  1184. if (shost->host_busy == 0 && shost->host_blocked) {
  1185. /*
  1186. * unblock after host_blocked iterates to zero
  1187. */
  1188. if (--shost->host_blocked == 0) {
  1189. SCSI_LOG_MLQUEUE(3,
  1190. printk("scsi%d unblocking host at zero depth\n",
  1191. shost->host_no));
  1192. } else {
  1193. return 0;
  1194. }
  1195. }
  1196. if (scsi_host_is_busy(shost)) {
  1197. if (list_empty(&sdev->starved_entry))
  1198. list_add_tail(&sdev->starved_entry, &shost->starved_list);
  1199. return 0;
  1200. }
  1201. /* We're OK to process the command, so we can't be starved */
  1202. if (!list_empty(&sdev->starved_entry))
  1203. list_del_init(&sdev->starved_entry);
  1204. return 1;
  1205. }
  1206. /*
  1207. * Busy state exporting function for request stacking drivers.
  1208. *
  1209. * For efficiency, no lock is taken to check the busy state of
  1210. * shost/starget/sdev, since the returned value is not guaranteed and
  1211. * may be changed after request stacking drivers call the function,
  1212. * regardless of taking lock or not.
  1213. *
  1214. * When scsi can't dispatch I/Os anymore and needs to kill I/Os
  1215. * (e.g. !sdev), scsi needs to return 'not busy'.
  1216. * Otherwise, request stacking drivers may hold requests forever.
  1217. */
  1218. static int scsi_lld_busy(struct request_queue *q)
  1219. {
  1220. struct scsi_device *sdev = q->queuedata;
  1221. struct Scsi_Host *shost;
  1222. struct scsi_target *starget;
  1223. if (!sdev)
  1224. return 0;
  1225. shost = sdev->host;
  1226. starget = scsi_target(sdev);
  1227. if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
  1228. scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
  1229. return 1;
  1230. return 0;
  1231. }
  1232. /*
  1233. * Kill a request for a dead device
  1234. */
  1235. static void scsi_kill_request(struct request *req, struct request_queue *q)
  1236. {
  1237. struct scsi_cmnd *cmd = req->special;
  1238. struct scsi_device *sdev;
  1239. struct scsi_target *starget;
  1240. struct Scsi_Host *shost;
  1241. blk_start_request(req);
  1242. scmd_printk(KERN_INFO, cmd, "killing request\n");
  1243. sdev = cmd->device;
  1244. starget = scsi_target(sdev);
  1245. shost = sdev->host;
  1246. scsi_init_cmd_errh(cmd);
  1247. cmd->result = DID_NO_CONNECT << 16;
  1248. atomic_inc(&cmd->device->iorequest_cnt);
  1249. /*
  1250. * SCSI request completion path will do scsi_device_unbusy(),
  1251. * bump busy counts. To bump the counters, we need to dance
  1252. * with the locks as normal issue path does.
  1253. */
  1254. sdev->device_busy++;
  1255. spin_unlock(sdev->request_queue->queue_lock);
  1256. spin_lock(shost->host_lock);
  1257. shost->host_busy++;
  1258. starget->target_busy++;
  1259. spin_unlock(shost->host_lock);
  1260. spin_lock(sdev->request_queue->queue_lock);
  1261. blk_complete_request(req);
  1262. }
  1263. static void scsi_softirq_done(struct request *rq)
  1264. {
  1265. struct scsi_cmnd *cmd = rq->special;
  1266. unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
  1267. int disposition;
  1268. INIT_LIST_HEAD(&cmd->eh_entry);
  1269. atomic_inc(&cmd->device->iodone_cnt);
  1270. if (cmd->result)
  1271. atomic_inc(&cmd->device->ioerr_cnt);
  1272. disposition = scsi_decide_disposition(cmd);
  1273. if (disposition != SUCCESS &&
  1274. time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
  1275. sdev_printk(KERN_ERR, cmd->device,
  1276. "timing out command, waited %lus\n",
  1277. wait_for/HZ);
  1278. disposition = SUCCESS;
  1279. }
  1280. scsi_log_completion(cmd, disposition);
  1281. switch (disposition) {
  1282. case SUCCESS:
  1283. scsi_finish_command(cmd);
  1284. break;
  1285. case NEEDS_RETRY:
  1286. scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
  1287. break;
  1288. case ADD_TO_MLQUEUE:
  1289. scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
  1290. break;
  1291. default:
  1292. if (!scsi_eh_scmd_add(cmd, 0))
  1293. scsi_finish_command(cmd);
  1294. }
  1295. }
  1296. /*
  1297. * Function: scsi_request_fn()
  1298. *
  1299. * Purpose: Main strategy routine for SCSI.
  1300. *
  1301. * Arguments: q - Pointer to actual queue.
  1302. *
  1303. * Returns: Nothing
  1304. *
  1305. * Lock status: IO request lock assumed to be held when called.
  1306. */
  1307. static void scsi_request_fn(struct request_queue *q)
  1308. {
  1309. struct scsi_device *sdev = q->queuedata;
  1310. struct Scsi_Host *shost;
  1311. struct scsi_cmnd *cmd;
  1312. struct request *req;
  1313. if (!sdev) {
  1314. while ((req = blk_peek_request(q)) != NULL)
  1315. scsi_kill_request(req, q);
  1316. return;
  1317. }
  1318. if(!get_device(&sdev->sdev_gendev))
  1319. /* We must be tearing the block queue down already */
  1320. return;
  1321. /*
  1322. * To start with, we keep looping until the queue is empty, or until
  1323. * the host is no longer able to accept any more requests.
  1324. */
  1325. shost = sdev->host;
  1326. for (;;) {
  1327. int rtn;
  1328. /*
  1329. * get next queueable request. We do this early to make sure
  1330. * that the request is fully prepared even if we cannot
  1331. * accept it.
  1332. */
  1333. req = blk_peek_request(q);
  1334. if (!req || !scsi_dev_queue_ready(q, sdev))
  1335. break;
  1336. if (unlikely(!scsi_device_online(sdev))) {
  1337. sdev_printk(KERN_ERR, sdev,
  1338. "rejecting I/O to offline device\n");
  1339. scsi_kill_request(req, q);
  1340. continue;
  1341. }
  1342. /*
  1343. * Remove the request from the request list.
  1344. */
  1345. if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
  1346. blk_start_request(req);
  1347. sdev->device_busy++;
  1348. spin_unlock(q->queue_lock);
  1349. cmd = req->special;
  1350. if (unlikely(cmd == NULL)) {
  1351. printk(KERN_CRIT "impossible request in %s.\n"
  1352. "please mail a stack trace to "
  1353. "linux-scsi@vger.kernel.org\n",
  1354. __func__);
  1355. blk_dump_rq_flags(req, "foo");
  1356. BUG();
  1357. }
  1358. spin_lock(shost->host_lock);
  1359. /*
  1360. * We hit this when the driver is using a host wide
  1361. * tag map. For device level tag maps the queue_depth check
  1362. * in the device ready fn would prevent us from trying
  1363. * to allocate a tag. Since the map is a shared host resource
  1364. * we add the dev to the starved list so it eventually gets
  1365. * a run when a tag is freed.
  1366. */
  1367. if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
  1368. if (list_empty(&sdev->starved_entry))
  1369. list_add_tail(&sdev->starved_entry,
  1370. &shost->starved_list);
  1371. goto not_ready;
  1372. }
  1373. if (!scsi_target_queue_ready(shost, sdev))
  1374. goto not_ready;
  1375. if (!scsi_host_queue_ready(q, shost, sdev))
  1376. goto not_ready;
  1377. scsi_target(sdev)->target_busy++;
  1378. shost->host_busy++;
  1379. /*
  1380. * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
  1381. * take the lock again.
  1382. */
  1383. spin_unlock_irq(shost->host_lock);
  1384. /*
  1385. * Finally, initialize any error handling parameters, and set up
  1386. * the timers for timeouts.
  1387. */
  1388. scsi_init_cmd_errh(cmd);
  1389. /*
  1390. * Dispatch the command to the low-level driver.
  1391. */
  1392. rtn = scsi_dispatch_cmd(cmd);
  1393. spin_lock_irq(q->queue_lock);
  1394. if (rtn)
  1395. goto out_delay;
  1396. }
  1397. goto out;
  1398. not_ready:
  1399. spin_unlock_irq(shost->host_lock);
  1400. /*
  1401. * lock q, handle tag, requeue req, and decrement device_busy. We
  1402. * must return with queue_lock held.
  1403. *
  1404. * Decrementing device_busy without checking it is OK, as all such
  1405. * cases (host limits or settings) should run the queue at some
  1406. * later time.
  1407. */
  1408. spin_lock_irq(q->queue_lock);
  1409. blk_requeue_request(q, req);
  1410. sdev->device_busy--;
  1411. out_delay:
  1412. if (sdev->device_busy == 0)
  1413. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1414. out:
  1415. /* must be careful here...if we trigger the ->remove() function
  1416. * we cannot be holding the q lock */
  1417. spin_unlock_irq(q->queue_lock);
  1418. put_device(&sdev->sdev_gendev);
  1419. spin_lock_irq(q->queue_lock);
  1420. }
  1421. u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
  1422. {
  1423. struct device *host_dev;
  1424. u64 bounce_limit = 0xffffffff;
  1425. if (shost->unchecked_isa_dma)
  1426. return BLK_BOUNCE_ISA;
  1427. /*
  1428. * Platforms with virtual-DMA translation
  1429. * hardware have no practical limit.
  1430. */
  1431. if (!PCI_DMA_BUS_IS_PHYS)
  1432. return BLK_BOUNCE_ANY;
  1433. host_dev = scsi_get_device(shost);
  1434. if (host_dev && host_dev->dma_mask)
  1435. bounce_limit = *host_dev->dma_mask;
  1436. return bounce_limit;
  1437. }
  1438. EXPORT_SYMBOL(scsi_calculate_bounce_limit);
  1439. struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
  1440. request_fn_proc *request_fn)
  1441. {
  1442. struct request_queue *q;
  1443. struct device *dev = shost->shost_gendev.parent;
  1444. q = blk_init_queue(request_fn, NULL);
  1445. if (!q)
  1446. return NULL;
  1447. /*
  1448. * this limit is imposed by hardware restrictions
  1449. */
  1450. blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
  1451. SCSI_MAX_SG_CHAIN_SEGMENTS));
  1452. if (scsi_host_prot_dma(shost)) {
  1453. shost->sg_prot_tablesize =
  1454. min_not_zero(shost->sg_prot_tablesize,
  1455. (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
  1456. BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
  1457. blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
  1458. }
  1459. blk_queue_max_hw_sectors(q, shost->max_sectors);
  1460. blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
  1461. blk_queue_segment_boundary(q, shost->dma_boundary);
  1462. dma_set_seg_boundary(dev, shost->dma_boundary);
  1463. blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
  1464. if (!shost->use_clustering)
  1465. q->limits.cluster = 0;
  1466. /*
  1467. * set a reasonable default alignment on word boundaries: the
  1468. * host and device may alter it using
  1469. * blk_queue_update_dma_alignment() later.
  1470. */
  1471. blk_queue_dma_alignment(q, 0x03);
  1472. return q;
  1473. }
  1474. EXPORT_SYMBOL(__scsi_alloc_queue);
  1475. struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
  1476. {
  1477. struct request_queue *q;
  1478. q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
  1479. if (!q)
  1480. return NULL;
  1481. blk_queue_prep_rq(q, scsi_prep_fn);
  1482. blk_queue_softirq_done(q, scsi_softirq_done);
  1483. blk_queue_rq_timed_out(q, scsi_times_out);
  1484. blk_queue_lld_busy(q, scsi_lld_busy);
  1485. return q;
  1486. }
  1487. void scsi_free_queue(struct request_queue *q)
  1488. {
  1489. unsigned long flags;
  1490. WARN_ON(q->queuedata);
  1491. /* cause scsi_request_fn() to kill all non-finished requests */
  1492. spin_lock_irqsave(q->queue_lock, flags);
  1493. q->request_fn(q);
  1494. spin_unlock_irqrestore(q->queue_lock, flags);
  1495. blk_cleanup_queue(q);
  1496. }
  1497. /*
  1498. * Function: scsi_block_requests()
  1499. *
  1500. * Purpose: Utility function used by low-level drivers to prevent further
  1501. * commands from being queued to the device.
  1502. *
  1503. * Arguments: shost - Host in question
  1504. *
  1505. * Returns: Nothing
  1506. *
  1507. * Lock status: No locks are assumed held.
  1508. *
  1509. * Notes: There is no timer nor any other means by which the requests
  1510. * get unblocked other than the low-level driver calling
  1511. * scsi_unblock_requests().
  1512. */
  1513. void scsi_block_requests(struct Scsi_Host *shost)
  1514. {
  1515. shost->host_self_blocked = 1;
  1516. }
  1517. EXPORT_SYMBOL(scsi_block_requests);
  1518. /*
  1519. * Function: scsi_unblock_requests()
  1520. *
  1521. * Purpose: Utility function used by low-level drivers to allow further
  1522. * commands from being queued to the device.
  1523. *
  1524. * Arguments: shost - Host in question
  1525. *
  1526. * Returns: Nothing
  1527. *
  1528. * Lock status: No locks are assumed held.
  1529. *
  1530. * Notes: There is no timer nor any other means by which the requests
  1531. * get unblocked other than the low-level driver calling
  1532. * scsi_unblock_requests().
  1533. *
  1534. * This is done as an API function so that changes to the
  1535. * internals of the scsi mid-layer won't require wholesale
  1536. * changes to drivers that use this feature.
  1537. */
  1538. void scsi_unblock_requests(struct Scsi_Host *shost)
  1539. {
  1540. shost->host_self_blocked = 0;
  1541. scsi_run_host_queues(shost);
  1542. }
  1543. EXPORT_SYMBOL(scsi_unblock_requests);
  1544. int __init scsi_init_queue(void)
  1545. {
  1546. int i;
  1547. scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
  1548. sizeof(struct scsi_data_buffer),
  1549. 0, 0, NULL);
  1550. if (!scsi_sdb_cache) {
  1551. printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
  1552. return -ENOMEM;
  1553. }
  1554. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1555. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1556. int size = sgp->size * sizeof(struct scatterlist);
  1557. sgp->slab = kmem_cache_create(sgp->name, size, 0,
  1558. SLAB_HWCACHE_ALIGN, NULL);
  1559. if (!sgp->slab) {
  1560. printk(KERN_ERR "SCSI: can't init sg slab %s\n",
  1561. sgp->name);
  1562. goto cleanup_sdb;
  1563. }
  1564. sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
  1565. sgp->slab);
  1566. if (!sgp->pool) {
  1567. printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
  1568. sgp->name);
  1569. goto cleanup_sdb;
  1570. }
  1571. }
  1572. return 0;
  1573. cleanup_sdb:
  1574. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1575. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1576. if (sgp->pool)
  1577. mempool_destroy(sgp->pool);
  1578. if (sgp->slab)
  1579. kmem_cache_destroy(sgp->slab);
  1580. }
  1581. kmem_cache_destroy(scsi_sdb_cache);
  1582. return -ENOMEM;
  1583. }
  1584. void scsi_exit_queue(void)
  1585. {
  1586. int i;
  1587. kmem_cache_destroy(scsi_sdb_cache);
  1588. for (i = 0; i < SG_MEMPOOL_NR; i++) {
  1589. struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
  1590. mempool_destroy(sgp->pool);
  1591. kmem_cache_destroy(sgp->slab);
  1592. }
  1593. }
  1594. /**
  1595. * scsi_mode_select - issue a mode select
  1596. * @sdev: SCSI device to be queried
  1597. * @pf: Page format bit (1 == standard, 0 == vendor specific)
  1598. * @sp: Save page bit (0 == don't save, 1 == save)
  1599. * @modepage: mode page being requested
  1600. * @buffer: request buffer (may not be smaller than eight bytes)
  1601. * @len: length of request buffer.
  1602. * @timeout: command timeout
  1603. * @retries: number of retries before failing
  1604. * @data: returns a structure abstracting the mode header data
  1605. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  1606. * must be SCSI_SENSE_BUFFERSIZE big.
  1607. *
  1608. * Returns zero if successful; negative error number or scsi
  1609. * status on error
  1610. *
  1611. */
  1612. int
  1613. scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
  1614. unsigned char *buffer, int len, int timeout, int retries,
  1615. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1616. {
  1617. unsigned char cmd[10];
  1618. unsigned char *real_buffer;
  1619. int ret;
  1620. memset(cmd, 0, sizeof(cmd));
  1621. cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
  1622. if (sdev->use_10_for_ms) {
  1623. if (len > 65535)
  1624. return -EINVAL;
  1625. real_buffer = kmalloc(8 + len, GFP_KERNEL);
  1626. if (!real_buffer)
  1627. return -ENOMEM;
  1628. memcpy(real_buffer + 8, buffer, len);
  1629. len += 8;
  1630. real_buffer[0] = 0;
  1631. real_buffer[1] = 0;
  1632. real_buffer[2] = data->medium_type;
  1633. real_buffer[3] = data->device_specific;
  1634. real_buffer[4] = data->longlba ? 0x01 : 0;
  1635. real_buffer[5] = 0;
  1636. real_buffer[6] = data->block_descriptor_length >> 8;
  1637. real_buffer[7] = data->block_descriptor_length;
  1638. cmd[0] = MODE_SELECT_10;
  1639. cmd[7] = len >> 8;
  1640. cmd[8] = len;
  1641. } else {
  1642. if (len > 255 || data->block_descriptor_length > 255 ||
  1643. data->longlba)
  1644. return -EINVAL;
  1645. real_buffer = kmalloc(4 + len, GFP_KERNEL);
  1646. if (!real_buffer)
  1647. return -ENOMEM;
  1648. memcpy(real_buffer + 4, buffer, len);
  1649. len += 4;
  1650. real_buffer[0] = 0;
  1651. real_buffer[1] = data->medium_type;
  1652. real_buffer[2] = data->device_specific;
  1653. real_buffer[3] = data->block_descriptor_length;
  1654. cmd[0] = MODE_SELECT;
  1655. cmd[4] = len;
  1656. }
  1657. ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
  1658. sshdr, timeout, retries, NULL);
  1659. kfree(real_buffer);
  1660. return ret;
  1661. }
  1662. EXPORT_SYMBOL_GPL(scsi_mode_select);
  1663. /**
  1664. * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
  1665. * @sdev: SCSI device to be queried
  1666. * @dbd: set if mode sense will allow block descriptors to be returned
  1667. * @modepage: mode page being requested
  1668. * @buffer: request buffer (may not be smaller than eight bytes)
  1669. * @len: length of request buffer.
  1670. * @timeout: command timeout
  1671. * @retries: number of retries before failing
  1672. * @data: returns a structure abstracting the mode header data
  1673. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  1674. * must be SCSI_SENSE_BUFFERSIZE big.
  1675. *
  1676. * Returns zero if unsuccessful, or the header offset (either 4
  1677. * or 8 depending on whether a six or ten byte command was
  1678. * issued) if successful.
  1679. */
  1680. int
  1681. scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  1682. unsigned char *buffer, int len, int timeout, int retries,
  1683. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  1684. {
  1685. unsigned char cmd[12];
  1686. int use_10_for_ms;
  1687. int header_length;
  1688. int result;
  1689. struct scsi_sense_hdr my_sshdr;
  1690. memset(data, 0, sizeof(*data));
  1691. memset(&cmd[0], 0, 12);
  1692. cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
  1693. cmd[2] = modepage;
  1694. /* caller might not be interested in sense, but we need it */
  1695. if (!sshdr)
  1696. sshdr = &my_sshdr;
  1697. retry:
  1698. use_10_for_ms = sdev->use_10_for_ms;
  1699. if (use_10_for_ms) {
  1700. if (len < 8)
  1701. len = 8;
  1702. cmd[0] = MODE_SENSE_10;
  1703. cmd[8] = len;
  1704. header_length = 8;
  1705. } else {
  1706. if (len < 4)
  1707. len = 4;
  1708. cmd[0] = MODE_SENSE;
  1709. cmd[4] = len;
  1710. header_length = 4;
  1711. }
  1712. memset(buffer, 0, len);
  1713. result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
  1714. sshdr, timeout, retries, NULL);
  1715. /* This code looks awful: what it's doing is making sure an
  1716. * ILLEGAL REQUEST sense return identifies the actual command
  1717. * byte as the problem. MODE_SENSE commands can return
  1718. * ILLEGAL REQUEST if the code page isn't supported */
  1719. if (use_10_for_ms && !scsi_status_is_good(result) &&
  1720. (driver_byte(result) & DRIVER_SENSE)) {
  1721. if (scsi_sense_valid(sshdr)) {
  1722. if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
  1723. (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
  1724. /*
  1725. * Invalid command operation code
  1726. */
  1727. sdev->use_10_for_ms = 0;
  1728. goto retry;
  1729. }
  1730. }
  1731. }
  1732. if(scsi_status_is_good(result)) {
  1733. if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
  1734. (modepage == 6 || modepage == 8))) {
  1735. /* Initio breakage? */
  1736. header_length = 0;
  1737. data->length = 13;
  1738. data->medium_type = 0;
  1739. data->device_specific = 0;
  1740. data->longlba = 0;
  1741. data->block_descriptor_length = 0;
  1742. } else if(use_10_for_ms) {
  1743. data->length = buffer[0]*256 + buffer[1] + 2;
  1744. data->medium_type = buffer[2];
  1745. data->device_specific = buffer[3];
  1746. data->longlba = buffer[4] & 0x01;
  1747. data->block_descriptor_length = buffer[6]*256
  1748. + buffer[7];
  1749. } else {
  1750. data->length = buffer[0] + 1;
  1751. data->medium_type = buffer[1];
  1752. data->device_specific = buffer[2];
  1753. data->block_descriptor_length = buffer[3];
  1754. }
  1755. data->header_length = header_length;
  1756. }
  1757. return result;
  1758. }
  1759. EXPORT_SYMBOL(scsi_mode_sense);
  1760. /**
  1761. * scsi_test_unit_ready - test if unit is ready
  1762. * @sdev: scsi device to change the state of.
  1763. * @timeout: command timeout
  1764. * @retries: number of retries before failing
  1765. * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
  1766. * returning sense. Make sure that this is cleared before passing
  1767. * in.
  1768. *
  1769. * Returns zero if unsuccessful or an error if TUR failed. For
  1770. * removable media, UNIT_ATTENTION sets ->changed flag.
  1771. **/
  1772. int
  1773. scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
  1774. struct scsi_sense_hdr *sshdr_external)
  1775. {
  1776. char cmd[] = {
  1777. TEST_UNIT_READY, 0, 0, 0, 0, 0,
  1778. };
  1779. struct scsi_sense_hdr *sshdr;
  1780. int result;
  1781. if (!sshdr_external)
  1782. sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
  1783. else
  1784. sshdr = sshdr_external;
  1785. /* try to eat the UNIT_ATTENTION if there are enough retries */
  1786. do {
  1787. result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
  1788. timeout, retries, NULL);
  1789. if (sdev->removable && scsi_sense_valid(sshdr) &&
  1790. sshdr->sense_key == UNIT_ATTENTION)
  1791. sdev->changed = 1;
  1792. } while (scsi_sense_valid(sshdr) &&
  1793. sshdr->sense_key == UNIT_ATTENTION && --retries);
  1794. if (!sshdr_external)
  1795. kfree(sshdr);
  1796. return result;
  1797. }
  1798. EXPORT_SYMBOL(scsi_test_unit_ready);
  1799. /**
  1800. * scsi_device_set_state - Take the given device through the device state model.
  1801. * @sdev: scsi device to change the state of.
  1802. * @state: state to change to.
  1803. *
  1804. * Returns zero if unsuccessful or an error if the requested
  1805. * transition is illegal.
  1806. */
  1807. int
  1808. scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
  1809. {
  1810. enum scsi_device_state oldstate = sdev->sdev_state;
  1811. if (state == oldstate)
  1812. return 0;
  1813. switch (state) {
  1814. case SDEV_CREATED:
  1815. switch (oldstate) {
  1816. case SDEV_CREATED_BLOCK:
  1817. break;
  1818. default:
  1819. goto illegal;
  1820. }
  1821. break;
  1822. case SDEV_RUNNING:
  1823. switch (oldstate) {
  1824. case SDEV_CREATED:
  1825. case SDEV_OFFLINE:
  1826. case SDEV_QUIESCE:
  1827. case SDEV_BLOCK:
  1828. break;
  1829. default:
  1830. goto illegal;
  1831. }
  1832. break;
  1833. case SDEV_QUIESCE:
  1834. switch (oldstate) {
  1835. case SDEV_RUNNING:
  1836. case SDEV_OFFLINE:
  1837. break;
  1838. default:
  1839. goto illegal;
  1840. }
  1841. break;
  1842. case SDEV_OFFLINE:
  1843. switch (oldstate) {
  1844. case SDEV_CREATED:
  1845. case SDEV_RUNNING:
  1846. case SDEV_QUIESCE:
  1847. case SDEV_BLOCK:
  1848. break;
  1849. default:
  1850. goto illegal;
  1851. }
  1852. break;
  1853. case SDEV_BLOCK:
  1854. switch (oldstate) {
  1855. case SDEV_RUNNING:
  1856. case SDEV_CREATED_BLOCK:
  1857. break;
  1858. default:
  1859. goto illegal;
  1860. }
  1861. break;
  1862. case SDEV_CREATED_BLOCK:
  1863. switch (oldstate) {
  1864. case SDEV_CREATED:
  1865. break;
  1866. default:
  1867. goto illegal;
  1868. }
  1869. break;
  1870. case SDEV_CANCEL:
  1871. switch (oldstate) {
  1872. case SDEV_CREATED:
  1873. case SDEV_RUNNING:
  1874. case SDEV_QUIESCE:
  1875. case SDEV_OFFLINE:
  1876. case SDEV_BLOCK:
  1877. break;
  1878. default:
  1879. goto illegal;
  1880. }
  1881. break;
  1882. case SDEV_DEL:
  1883. switch (oldstate) {
  1884. case SDEV_CREATED:
  1885. case SDEV_RUNNING:
  1886. case SDEV_OFFLINE:
  1887. case SDEV_CANCEL:
  1888. break;
  1889. default:
  1890. goto illegal;
  1891. }
  1892. break;
  1893. }
  1894. sdev->sdev_state = state;
  1895. return 0;
  1896. illegal:
  1897. SCSI_LOG_ERROR_RECOVERY(1,
  1898. sdev_printk(KERN_ERR, sdev,
  1899. "Illegal state transition %s->%s\n",
  1900. scsi_device_state_name(oldstate),
  1901. scsi_device_state_name(state))
  1902. );
  1903. return -EINVAL;
  1904. }
  1905. EXPORT_SYMBOL(scsi_device_set_state);
  1906. /**
  1907. * sdev_evt_emit - emit a single SCSI device uevent
  1908. * @sdev: associated SCSI device
  1909. * @evt: event to emit
  1910. *
  1911. * Send a single uevent (scsi_event) to the associated scsi_device.
  1912. */
  1913. static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
  1914. {
  1915. int idx = 0;
  1916. char *envp[3];
  1917. switch (evt->evt_type) {
  1918. case SDEV_EVT_MEDIA_CHANGE:
  1919. envp[idx++] = "SDEV_MEDIA_CHANGE=1";
  1920. break;
  1921. default:
  1922. /* do nothing */
  1923. break;
  1924. }
  1925. envp[idx++] = NULL;
  1926. kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
  1927. }
  1928. /**
  1929. * sdev_evt_thread - send a uevent for each scsi event
  1930. * @work: work struct for scsi_device
  1931. *
  1932. * Dispatch queued events to their associated scsi_device kobjects
  1933. * as uevents.
  1934. */
  1935. void scsi_evt_thread(struct work_struct *work)
  1936. {
  1937. struct scsi_device *sdev;
  1938. LIST_HEAD(event_list);
  1939. sdev = container_of(work, struct scsi_device, event_work);
  1940. while (1) {
  1941. struct scsi_event *evt;
  1942. struct list_head *this, *tmp;
  1943. unsigned long flags;
  1944. spin_lock_irqsave(&sdev->list_lock, flags);
  1945. list_splice_init(&sdev->event_list, &event_list);
  1946. spin_unlock_irqrestore(&sdev->list_lock, flags);
  1947. if (list_empty(&event_list))
  1948. break;
  1949. list_for_each_safe(this, tmp, &event_list) {
  1950. evt = list_entry(this, struct scsi_event, node);
  1951. list_del(&evt->node);
  1952. scsi_evt_emit(sdev, evt);
  1953. kfree(evt);
  1954. }
  1955. }
  1956. }
  1957. /**
  1958. * sdev_evt_send - send asserted event to uevent thread
  1959. * @sdev: scsi_device event occurred on
  1960. * @evt: event to send
  1961. *
  1962. * Assert scsi device event asynchronously.
  1963. */
  1964. void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
  1965. {
  1966. unsigned long flags;
  1967. #if 0
  1968. /* FIXME: currently this check eliminates all media change events
  1969. * for polled devices. Need to update to discriminate between AN
  1970. * and polled events */
  1971. if (!test_bit(evt->evt_type, sdev->supported_events)) {
  1972. kfree(evt);
  1973. return;
  1974. }
  1975. #endif
  1976. spin_lock_irqsave(&sdev->list_lock, flags);
  1977. list_add_tail(&evt->node, &sdev->event_list);
  1978. schedule_work(&sdev->event_work);
  1979. spin_unlock_irqrestore(&sdev->list_lock, flags);
  1980. }
  1981. EXPORT_SYMBOL_GPL(sdev_evt_send);
  1982. /**
  1983. * sdev_evt_alloc - allocate a new scsi event
  1984. * @evt_type: type of event to allocate
  1985. * @gfpflags: GFP flags for allocation
  1986. *
  1987. * Allocates and returns a new scsi_event.
  1988. */
  1989. struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
  1990. gfp_t gfpflags)
  1991. {
  1992. struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
  1993. if (!evt)
  1994. return NULL;
  1995. evt->evt_type = evt_type;
  1996. INIT_LIST_HEAD(&evt->node);
  1997. /* evt_type-specific initialization, if any */
  1998. switch (evt_type) {
  1999. case SDEV_EVT_MEDIA_CHANGE:
  2000. default:
  2001. /* do nothing */
  2002. break;
  2003. }
  2004. return evt;
  2005. }
  2006. EXPORT_SYMBOL_GPL(sdev_evt_alloc);
  2007. /**
  2008. * sdev_evt_send_simple - send asserted event to uevent thread
  2009. * @sdev: scsi_device event occurred on
  2010. * @evt_type: type of event to send
  2011. * @gfpflags: GFP flags for allocation
  2012. *
  2013. * Assert scsi device event asynchronously, given an event type.
  2014. */
  2015. void sdev_evt_send_simple(struct scsi_device *sdev,
  2016. enum scsi_device_event evt_type, gfp_t gfpflags)
  2017. {
  2018. struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
  2019. if (!evt) {
  2020. sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
  2021. evt_type);
  2022. return;
  2023. }
  2024. sdev_evt_send(sdev, evt);
  2025. }
  2026. EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
  2027. /**
  2028. * scsi_device_quiesce - Block user issued commands.
  2029. * @sdev: scsi device to quiesce.
  2030. *
  2031. * This works by trying to transition to the SDEV_QUIESCE state
  2032. * (which must be a legal transition). When the device is in this
  2033. * state, only special requests will be accepted, all others will
  2034. * be deferred. Since special requests may also be requeued requests,
  2035. * a successful return doesn't guarantee the device will be
  2036. * totally quiescent.
  2037. *
  2038. * Must be called with user context, may sleep.
  2039. *
  2040. * Returns zero if unsuccessful or an error if not.
  2041. */
  2042. int
  2043. scsi_device_quiesce(struct scsi_device *sdev)
  2044. {
  2045. int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
  2046. if (err)
  2047. return err;
  2048. scsi_run_queue(sdev->request_queue);
  2049. while (sdev->device_busy) {
  2050. msleep_interruptible(200);
  2051. scsi_run_queue(sdev->request_queue);
  2052. }
  2053. return 0;
  2054. }
  2055. EXPORT_SYMBOL(scsi_device_quiesce);
  2056. /**
  2057. * scsi_device_resume - Restart user issued commands to a quiesced device.
  2058. * @sdev: scsi device to resume.
  2059. *
  2060. * Moves the device from quiesced back to running and restarts the
  2061. * queues.
  2062. *
  2063. * Must be called with user context, may sleep.
  2064. */
  2065. void
  2066. scsi_device_resume(struct scsi_device *sdev)
  2067. {
  2068. if(scsi_device_set_state(sdev, SDEV_RUNNING))
  2069. return;
  2070. scsi_run_queue(sdev->request_queue);
  2071. }
  2072. EXPORT_SYMBOL(scsi_device_resume);
  2073. static void
  2074. device_quiesce_fn(struct scsi_device *sdev, void *data)
  2075. {
  2076. scsi_device_quiesce(sdev);
  2077. }
  2078. void
  2079. scsi_target_quiesce(struct scsi_target *starget)
  2080. {
  2081. starget_for_each_device(starget, NULL, device_quiesce_fn);
  2082. }
  2083. EXPORT_SYMBOL(scsi_target_quiesce);
  2084. static void
  2085. device_resume_fn(struct scsi_device *sdev, void *data)
  2086. {
  2087. scsi_device_resume(sdev);
  2088. }
  2089. void
  2090. scsi_target_resume(struct scsi_target *starget)
  2091. {
  2092. starget_for_each_device(starget, NULL, device_resume_fn);
  2093. }
  2094. EXPORT_SYMBOL(scsi_target_resume);
  2095. /**
  2096. * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
  2097. * @sdev: device to block
  2098. *
  2099. * Block request made by scsi lld's to temporarily stop all
  2100. * scsi commands on the specified device. Called from interrupt
  2101. * or normal process context.
  2102. *
  2103. * Returns zero if successful or error if not
  2104. *
  2105. * Notes:
  2106. * This routine transitions the device to the SDEV_BLOCK state
  2107. * (which must be a legal transition). When the device is in this
  2108. * state, all commands are deferred until the scsi lld reenables
  2109. * the device with scsi_device_unblock or device_block_tmo fires.
  2110. * This routine assumes the host_lock is held on entry.
  2111. */
  2112. int
  2113. scsi_internal_device_block(struct scsi_device *sdev)
  2114. {
  2115. struct request_queue *q = sdev->request_queue;
  2116. unsigned long flags;
  2117. int err = 0;
  2118. err = scsi_device_set_state(sdev, SDEV_BLOCK);
  2119. if (err) {
  2120. err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
  2121. if (err)
  2122. return err;
  2123. }
  2124. /*
  2125. * The device has transitioned to SDEV_BLOCK. Stop the
  2126. * block layer from calling the midlayer with this device's
  2127. * request queue.
  2128. */
  2129. spin_lock_irqsave(q->queue_lock, flags);
  2130. blk_stop_queue(q);
  2131. spin_unlock_irqrestore(q->queue_lock, flags);
  2132. return 0;
  2133. }
  2134. EXPORT_SYMBOL_GPL(scsi_internal_device_block);
  2135. /**
  2136. * scsi_internal_device_unblock - resume a device after a block request
  2137. * @sdev: device to resume
  2138. *
  2139. * Called by scsi lld's or the midlayer to restart the device queue
  2140. * for the previously suspended scsi device. Called from interrupt or
  2141. * normal process context.
  2142. *
  2143. * Returns zero if successful or error if not.
  2144. *
  2145. * Notes:
  2146. * This routine transitions the device to the SDEV_RUNNING state
  2147. * (which must be a legal transition) allowing the midlayer to
  2148. * goose the queue for this device. This routine assumes the
  2149. * host_lock is held upon entry.
  2150. */
  2151. int
  2152. scsi_internal_device_unblock(struct scsi_device *sdev)
  2153. {
  2154. struct request_queue *q = sdev->request_queue;
  2155. unsigned long flags;
  2156. /*
  2157. * Try to transition the scsi device to SDEV_RUNNING
  2158. * and goose the device queue if successful.
  2159. */
  2160. if (sdev->sdev_state == SDEV_BLOCK)
  2161. sdev->sdev_state = SDEV_RUNNING;
  2162. else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
  2163. sdev->sdev_state = SDEV_CREATED;
  2164. else if (sdev->sdev_state != SDEV_CANCEL &&
  2165. sdev->sdev_state != SDEV_OFFLINE)
  2166. return -EINVAL;
  2167. spin_lock_irqsave(q->queue_lock, flags);
  2168. blk_start_queue(q);
  2169. spin_unlock_irqrestore(q->queue_lock, flags);
  2170. return 0;
  2171. }
  2172. EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
  2173. static void
  2174. device_block(struct scsi_device *sdev, void *data)
  2175. {
  2176. scsi_internal_device_block(sdev);
  2177. }
  2178. static int
  2179. target_block(struct device *dev, void *data)
  2180. {
  2181. if (scsi_is_target_device(dev))
  2182. starget_for_each_device(to_scsi_target(dev), NULL,
  2183. device_block);
  2184. return 0;
  2185. }
  2186. void
  2187. scsi_target_block(struct device *dev)
  2188. {
  2189. if (scsi_is_target_device(dev))
  2190. starget_for_each_device(to_scsi_target(dev), NULL,
  2191. device_block);
  2192. else
  2193. device_for_each_child(dev, NULL, target_block);
  2194. }
  2195. EXPORT_SYMBOL_GPL(scsi_target_block);
  2196. static void
  2197. device_unblock(struct scsi_device *sdev, void *data)
  2198. {
  2199. scsi_internal_device_unblock(sdev);
  2200. }
  2201. static int
  2202. target_unblock(struct device *dev, void *data)
  2203. {
  2204. if (scsi_is_target_device(dev))
  2205. starget_for_each_device(to_scsi_target(dev), NULL,
  2206. device_unblock);
  2207. return 0;
  2208. }
  2209. void
  2210. scsi_target_unblock(struct device *dev)
  2211. {
  2212. if (scsi_is_target_device(dev))
  2213. starget_for_each_device(to_scsi_target(dev), NULL,
  2214. device_unblock);
  2215. else
  2216. device_for_each_child(dev, NULL, target_unblock);
  2217. }
  2218. EXPORT_SYMBOL_GPL(scsi_target_unblock);
  2219. /**
  2220. * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
  2221. * @sgl: scatter-gather list
  2222. * @sg_count: number of segments in sg
  2223. * @offset: offset in bytes into sg, on return offset into the mapped area
  2224. * @len: bytes to map, on return number of bytes mapped
  2225. *
  2226. * Returns virtual address of the start of the mapped page
  2227. */
  2228. void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
  2229. size_t *offset, size_t *len)
  2230. {
  2231. int i;
  2232. size_t sg_len = 0, len_complete = 0;
  2233. struct scatterlist *sg;
  2234. struct page *page;
  2235. WARN_ON(!irqs_disabled());
  2236. for_each_sg(sgl, sg, sg_count, i) {
  2237. len_complete = sg_len; /* Complete sg-entries */
  2238. sg_len += sg->length;
  2239. if (sg_len > *offset)
  2240. break;
  2241. }
  2242. if (unlikely(i == sg_count)) {
  2243. printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
  2244. "elements %d\n",
  2245. __func__, sg_len, *offset, sg_count);
  2246. WARN_ON(1);
  2247. return NULL;
  2248. }
  2249. /* Offset starting from the beginning of first page in this sg-entry */
  2250. *offset = *offset - len_complete + sg->offset;
  2251. /* Assumption: contiguous pages can be accessed as "page + i" */
  2252. page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
  2253. *offset &= ~PAGE_MASK;
  2254. /* Bytes in this sg-entry from *offset to the end of the page */
  2255. sg_len = PAGE_SIZE - *offset;
  2256. if (*len > sg_len)
  2257. *len = sg_len;
  2258. return kmap_atomic(page);
  2259. }
  2260. EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  2261. /**
  2262. * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
  2263. * @virt: virtual address to be unmapped
  2264. */
  2265. void scsi_kunmap_atomic_sg(void *virt)
  2266. {
  2267. kunmap_atomic(virt);
  2268. }
  2269. EXPORT_SYMBOL(scsi_kunmap_atomic_sg);