ibmvscsi.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. /* ------------------------------------------------------------
  2. * ibmvscsi.c
  3. * (C) Copyright IBM Corporation 1994, 2004
  4. * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
  5. * Santiago Leon (santil@us.ibm.com)
  6. * Dave Boutcher (sleddog@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21. * USA
  22. *
  23. * ------------------------------------------------------------
  24. * Emulation of a SCSI host adapter for Virtual I/O devices
  25. *
  26. * This driver supports the SCSI adapter implemented by the IBM
  27. * Power5 firmware. That SCSI adapter is not a physical adapter,
  28. * but allows Linux SCSI peripheral drivers to directly
  29. * access devices in another logical partition on the physical system.
  30. *
  31. * The virtual adapter(s) are present in the open firmware device
  32. * tree just like real adapters.
  33. *
  34. * One of the capabilities provided on these systems is the ability
  35. * to DMA between partitions. The architecture states that for VSCSI,
  36. * the server side is allowed to DMA to and from the client. The client
  37. * is never trusted to DMA to or from the server directly.
  38. *
  39. * Messages are sent between partitions on a "Command/Response Queue"
  40. * (CRQ), which is just a buffer of 16 byte entries in the receiver's
  41. * Senders cannot access the buffer directly, but send messages by
  42. * making a hypervisor call and passing in the 16 bytes. The hypervisor
  43. * puts the message in the next 16 byte space in round-robbin fashion,
  44. * turns on the high order bit of the message (the valid bit), and
  45. * generates an interrupt to the receiver (if interrupts are turned on.)
  46. * The receiver just turns off the valid bit when they have copied out
  47. * the message.
  48. *
  49. * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50. * (IU) (as defined in the T10 standard available at www.t10.org), gets
  51. * a DMA address for the message, and sends it to the server as the
  52. * payload of a CRQ message. The server DMAs the SRP IU and processes it,
  53. * including doing any additional data transfers. When it is done, it
  54. * DMAs the SRP response back to the same address as the request came from,
  55. * and sends a CRQ message back to inform the client that the request has
  56. * completed.
  57. *
  58. * Note that some of the underlying infrastructure is different between
  59. * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
  60. * the older iSeries hypervisor models. To support both, some low level
  61. * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
  62. * The Makefile should pick one, not two, not zero, of these.
  63. *
  64. * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
  65. * interfaces. It would be really nice to abstract this above an RDMA
  66. * layer.
  67. */
  68. #include <linux/module.h>
  69. #include <linux/moduleparam.h>
  70. #include <linux/dma-mapping.h>
  71. #include <linux/delay.h>
  72. #include <asm/vio.h>
  73. #include <scsi/scsi.h>
  74. #include <scsi/scsi_cmnd.h>
  75. #include <scsi/scsi_host.h>
  76. #include <scsi/scsi_device.h>
  77. #include "ibmvscsi.h"
  78. /* The values below are somewhat arbitrary default values, but
  79. * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  80. * Note that there are 3 bits of channel value, 6 bits of id, and
  81. * 5 bits of LUN.
  82. */
  83. static int max_id = 64;
  84. static int max_channel = 3;
  85. static int init_timeout = 5;
  86. static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
  87. #define IBMVSCSI_VERSION "1.5.8"
  88. MODULE_DESCRIPTION("IBM Virtual SCSI");
  89. MODULE_AUTHOR("Dave Boutcher");
  90. MODULE_LICENSE("GPL");
  91. MODULE_VERSION(IBMVSCSI_VERSION);
  92. module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
  93. MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
  94. module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
  95. MODULE_PARM_DESC(max_channel, "Largest channel value");
  96. module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
  97. MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
  98. module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
  99. MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
  100. /* ------------------------------------------------------------
  101. * Routines for the event pool and event structs
  102. */
  103. /**
  104. * initialize_event_pool: - Allocates and initializes the event pool for a host
  105. * @pool: event_pool to be initialized
  106. * @size: Number of events in pool
  107. * @hostdata: ibmvscsi_host_data who owns the event pool
  108. *
  109. * Returns zero on success.
  110. */
  111. static int initialize_event_pool(struct event_pool *pool,
  112. int size, struct ibmvscsi_host_data *hostdata)
  113. {
  114. int i;
  115. pool->size = size;
  116. pool->next = 0;
  117. pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
  118. if (!pool->events)
  119. return -ENOMEM;
  120. pool->iu_storage =
  121. dma_alloc_coherent(hostdata->dev,
  122. pool->size * sizeof(*pool->iu_storage),
  123. &pool->iu_token, 0);
  124. if (!pool->iu_storage) {
  125. kfree(pool->events);
  126. return -ENOMEM;
  127. }
  128. for (i = 0; i < pool->size; ++i) {
  129. struct srp_event_struct *evt = &pool->events[i];
  130. memset(&evt->crq, 0x00, sizeof(evt->crq));
  131. atomic_set(&evt->free, 1);
  132. evt->crq.valid = 0x80;
  133. evt->crq.IU_length = sizeof(*evt->xfer_iu);
  134. evt->crq.IU_data_ptr = pool->iu_token +
  135. sizeof(*evt->xfer_iu) * i;
  136. evt->xfer_iu = pool->iu_storage + i;
  137. evt->hostdata = hostdata;
  138. evt->ext_list = NULL;
  139. evt->ext_list_token = 0;
  140. }
  141. return 0;
  142. }
  143. /**
  144. * release_event_pool: - Frees memory of an event pool of a host
  145. * @pool: event_pool to be released
  146. * @hostdata: ibmvscsi_host_data who owns the even pool
  147. *
  148. * Returns zero on success.
  149. */
  150. static void release_event_pool(struct event_pool *pool,
  151. struct ibmvscsi_host_data *hostdata)
  152. {
  153. int i, in_use = 0;
  154. for (i = 0; i < pool->size; ++i) {
  155. if (atomic_read(&pool->events[i].free) != 1)
  156. ++in_use;
  157. if (pool->events[i].ext_list) {
  158. dma_free_coherent(hostdata->dev,
  159. SG_ALL * sizeof(struct srp_direct_buf),
  160. pool->events[i].ext_list,
  161. pool->events[i].ext_list_token);
  162. }
  163. }
  164. if (in_use)
  165. dev_warn(hostdata->dev, "releasing event pool with %d "
  166. "events still in use?\n", in_use);
  167. kfree(pool->events);
  168. dma_free_coherent(hostdata->dev,
  169. pool->size * sizeof(*pool->iu_storage),
  170. pool->iu_storage, pool->iu_token);
  171. }
  172. /**
  173. * valid_event_struct: - Determines if event is valid.
  174. * @pool: event_pool that contains the event
  175. * @evt: srp_event_struct to be checked for validity
  176. *
  177. * Returns zero if event is invalid, one otherwise.
  178. */
  179. static int valid_event_struct(struct event_pool *pool,
  180. struct srp_event_struct *evt)
  181. {
  182. int index = evt - pool->events;
  183. if (index < 0 || index >= pool->size) /* outside of bounds */
  184. return 0;
  185. if (evt != pool->events + index) /* unaligned */
  186. return 0;
  187. return 1;
  188. }
  189. /**
  190. * ibmvscsi_free-event_struct: - Changes status of event to "free"
  191. * @pool: event_pool that contains the event
  192. * @evt: srp_event_struct to be modified
  193. *
  194. */
  195. static void free_event_struct(struct event_pool *pool,
  196. struct srp_event_struct *evt)
  197. {
  198. if (!valid_event_struct(pool, evt)) {
  199. dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
  200. "(not in pool %p)\n", evt, pool->events);
  201. return;
  202. }
  203. if (atomic_inc_return(&evt->free) != 1) {
  204. dev_err(evt->hostdata->dev, "Freeing event_struct %p "
  205. "which is not in use!\n", evt);
  206. return;
  207. }
  208. }
  209. /**
  210. * get_evt_struct: - Gets the next free event in pool
  211. * @pool: event_pool that contains the events to be searched
  212. *
  213. * Returns the next event in "free" state, and NULL if none are free.
  214. * Note that no synchronization is done here, we assume the host_lock
  215. * will syncrhonze things.
  216. */
  217. static struct srp_event_struct *get_event_struct(struct event_pool *pool)
  218. {
  219. int i;
  220. int poolsize = pool->size;
  221. int offset = pool->next;
  222. for (i = 0; i < poolsize; i++) {
  223. offset = (offset + 1) % poolsize;
  224. if (!atomic_dec_if_positive(&pool->events[offset].free)) {
  225. pool->next = offset;
  226. return &pool->events[offset];
  227. }
  228. }
  229. printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
  230. return NULL;
  231. }
  232. /**
  233. * init_event_struct: Initialize fields in an event struct that are always
  234. * required.
  235. * @evt: The event
  236. * @done: Routine to call when the event is responded to
  237. * @format: SRP or MAD format
  238. * @timeout: timeout value set in the CRQ
  239. */
  240. static void init_event_struct(struct srp_event_struct *evt_struct,
  241. void (*done) (struct srp_event_struct *),
  242. u8 format,
  243. int timeout)
  244. {
  245. evt_struct->cmnd = NULL;
  246. evt_struct->cmnd_done = NULL;
  247. evt_struct->sync_srp = NULL;
  248. evt_struct->crq.format = format;
  249. evt_struct->crq.timeout = timeout;
  250. evt_struct->done = done;
  251. }
  252. /* ------------------------------------------------------------
  253. * Routines for receiving SCSI responses from the hosting partition
  254. */
  255. /**
  256. * set_srp_direction: Set the fields in the srp related to data
  257. * direction and number of buffers based on the direction in
  258. * the scsi_cmnd and the number of buffers
  259. */
  260. static void set_srp_direction(struct scsi_cmnd *cmd,
  261. struct srp_cmd *srp_cmd,
  262. int numbuf)
  263. {
  264. u8 fmt;
  265. if (numbuf == 0)
  266. return;
  267. if (numbuf == 1)
  268. fmt = SRP_DATA_DESC_DIRECT;
  269. else {
  270. fmt = SRP_DATA_DESC_INDIRECT;
  271. numbuf = min(numbuf, MAX_INDIRECT_BUFS);
  272. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  273. srp_cmd->data_out_desc_cnt = numbuf;
  274. else
  275. srp_cmd->data_in_desc_cnt = numbuf;
  276. }
  277. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  278. srp_cmd->buf_fmt = fmt << 4;
  279. else
  280. srp_cmd->buf_fmt = fmt;
  281. }
  282. static void unmap_sg_list(int num_entries,
  283. struct device *dev,
  284. struct srp_direct_buf *md)
  285. {
  286. int i;
  287. for (i = 0; i < num_entries; ++i)
  288. dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
  289. }
  290. /**
  291. * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
  292. * @cmd: srp_cmd whose additional_data member will be unmapped
  293. * @dev: device for which the memory is mapped
  294. *
  295. */
  296. static void unmap_cmd_data(struct srp_cmd *cmd,
  297. struct srp_event_struct *evt_struct,
  298. struct device *dev)
  299. {
  300. u8 out_fmt, in_fmt;
  301. out_fmt = cmd->buf_fmt >> 4;
  302. in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
  303. if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
  304. return;
  305. else if (out_fmt == SRP_DATA_DESC_DIRECT ||
  306. in_fmt == SRP_DATA_DESC_DIRECT) {
  307. struct srp_direct_buf *data =
  308. (struct srp_direct_buf *) cmd->add_data;
  309. dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
  310. } else {
  311. struct srp_indirect_buf *indirect =
  312. (struct srp_indirect_buf *) cmd->add_data;
  313. int num_mapped = indirect->table_desc.len /
  314. sizeof(struct srp_direct_buf);
  315. if (num_mapped <= MAX_INDIRECT_BUFS) {
  316. unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
  317. return;
  318. }
  319. unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
  320. }
  321. }
  322. static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
  323. struct srp_direct_buf *md)
  324. {
  325. int i;
  326. struct scatterlist *sg;
  327. u64 total_length = 0;
  328. scsi_for_each_sg(cmd, sg, nseg, i) {
  329. struct srp_direct_buf *descr = md + i;
  330. descr->va = sg_dma_address(sg);
  331. descr->len = sg_dma_len(sg);
  332. descr->key = 0;
  333. total_length += sg_dma_len(sg);
  334. }
  335. return total_length;
  336. }
  337. /**
  338. * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
  339. * @cmd: Scsi_Cmnd with the scatterlist
  340. * @srp_cmd: srp_cmd that contains the memory descriptor
  341. * @dev: device for which to map dma memory
  342. *
  343. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  344. * Returns 1 on success.
  345. */
  346. static int map_sg_data(struct scsi_cmnd *cmd,
  347. struct srp_event_struct *evt_struct,
  348. struct srp_cmd *srp_cmd, struct device *dev)
  349. {
  350. int sg_mapped;
  351. u64 total_length = 0;
  352. struct srp_direct_buf *data =
  353. (struct srp_direct_buf *) srp_cmd->add_data;
  354. struct srp_indirect_buf *indirect =
  355. (struct srp_indirect_buf *) data;
  356. sg_mapped = scsi_dma_map(cmd);
  357. if (!sg_mapped)
  358. return 1;
  359. else if (sg_mapped < 0)
  360. return 0;
  361. set_srp_direction(cmd, srp_cmd, sg_mapped);
  362. /* special case; we can use a single direct descriptor */
  363. if (sg_mapped == 1) {
  364. map_sg_list(cmd, sg_mapped, data);
  365. return 1;
  366. }
  367. indirect->table_desc.va = 0;
  368. indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
  369. indirect->table_desc.key = 0;
  370. if (sg_mapped <= MAX_INDIRECT_BUFS) {
  371. total_length = map_sg_list(cmd, sg_mapped,
  372. &indirect->desc_list[0]);
  373. indirect->len = total_length;
  374. return 1;
  375. }
  376. /* get indirect table */
  377. if (!evt_struct->ext_list) {
  378. evt_struct->ext_list = (struct srp_direct_buf *)
  379. dma_alloc_coherent(dev,
  380. SG_ALL * sizeof(struct srp_direct_buf),
  381. &evt_struct->ext_list_token, 0);
  382. if (!evt_struct->ext_list) {
  383. sdev_printk(KERN_ERR, cmd->device,
  384. "Can't allocate memory for indirect table\n");
  385. return 0;
  386. }
  387. }
  388. total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
  389. indirect->len = total_length;
  390. indirect->table_desc.va = evt_struct->ext_list_token;
  391. indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
  392. memcpy(indirect->desc_list, evt_struct->ext_list,
  393. MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
  394. return 1;
  395. }
  396. /**
  397. * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  398. * @cmd: struct scsi_cmnd with the memory to be mapped
  399. * @srp_cmd: srp_cmd that contains the memory descriptor
  400. * @dev: dma device for which to map dma memory
  401. *
  402. * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
  403. * Returns 1 on success.
  404. */
  405. static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
  406. struct srp_event_struct *evt_struct,
  407. struct srp_cmd *srp_cmd, struct device *dev)
  408. {
  409. switch (cmd->sc_data_direction) {
  410. case DMA_FROM_DEVICE:
  411. case DMA_TO_DEVICE:
  412. break;
  413. case DMA_NONE:
  414. return 1;
  415. case DMA_BIDIRECTIONAL:
  416. sdev_printk(KERN_ERR, cmd->device,
  417. "Can't map DMA_BIDIRECTIONAL to read/write\n");
  418. return 0;
  419. default:
  420. sdev_printk(KERN_ERR, cmd->device,
  421. "Unknown data direction 0x%02x; can't map!\n",
  422. cmd->sc_data_direction);
  423. return 0;
  424. }
  425. return map_sg_data(cmd, evt_struct, srp_cmd, dev);
  426. }
  427. /**
  428. * purge_requests: Our virtual adapter just shut down. purge any sent requests
  429. * @hostdata: the adapter
  430. */
  431. static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
  432. {
  433. struct srp_event_struct *tmp_evt, *pos;
  434. unsigned long flags;
  435. spin_lock_irqsave(hostdata->host->host_lock, flags);
  436. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  437. list_del(&tmp_evt->list);
  438. del_timer(&tmp_evt->timer);
  439. if (tmp_evt->cmnd) {
  440. tmp_evt->cmnd->result = (error_code << 16);
  441. unmap_cmd_data(&tmp_evt->iu.srp.cmd,
  442. tmp_evt,
  443. tmp_evt->hostdata->dev);
  444. if (tmp_evt->cmnd_done)
  445. tmp_evt->cmnd_done(tmp_evt->cmnd);
  446. } else if (tmp_evt->done)
  447. tmp_evt->done(tmp_evt);
  448. free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
  449. }
  450. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  451. }
  452. /**
  453. * ibmvscsi_reset_host - Reset the connection to the server
  454. * @hostdata: struct ibmvscsi_host_data to reset
  455. */
  456. static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
  457. {
  458. scsi_block_requests(hostdata->host);
  459. atomic_set(&hostdata->request_limit, 0);
  460. purge_requests(hostdata, DID_ERROR);
  461. if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
  462. (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
  463. (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
  464. atomic_set(&hostdata->request_limit, -1);
  465. dev_err(hostdata->dev, "error after reset\n");
  466. }
  467. scsi_unblock_requests(hostdata->host);
  468. }
  469. /**
  470. * ibmvscsi_timeout - Internal command timeout handler
  471. * @evt_struct: struct srp_event_struct that timed out
  472. *
  473. * Called when an internally generated command times out
  474. */
  475. static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
  476. {
  477. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  478. dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
  479. evt_struct->iu.srp.cmd.opcode);
  480. ibmvscsi_reset_host(hostdata);
  481. }
  482. /* ------------------------------------------------------------
  483. * Routines for sending and receiving SRPs
  484. */
  485. /**
  486. * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
  487. * @evt_struct: evt_struct to be sent
  488. * @hostdata: ibmvscsi_host_data of host
  489. * @timeout: timeout in seconds - 0 means do not time command
  490. *
  491. * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
  492. * Note that this routine assumes that host_lock is held for synchronization
  493. */
  494. static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
  495. struct ibmvscsi_host_data *hostdata,
  496. unsigned long timeout)
  497. {
  498. u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
  499. int request_status;
  500. int rc;
  501. /* If we have exhausted our request limit, just fail this request,
  502. * unless it is for a reset or abort.
  503. * Note that there are rare cases involving driver generated requests
  504. * (such as task management requests) that the mid layer may think we
  505. * can handle more requests (can_queue) when we actually can't
  506. */
  507. if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
  508. request_status =
  509. atomic_dec_if_positive(&hostdata->request_limit);
  510. /* If request limit was -1 when we started, it is now even
  511. * less than that
  512. */
  513. if (request_status < -1)
  514. goto send_error;
  515. /* Otherwise, we may have run out of requests. */
  516. /* Abort and reset calls should make it through.
  517. * Nothing except abort and reset should use the last two
  518. * slots unless we had two or less to begin with.
  519. */
  520. else if (request_status < 2 &&
  521. evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
  522. /* In the case that we have less than two requests
  523. * available, check the server limit as a combination
  524. * of the request limit and the number of requests
  525. * in-flight (the size of the send list). If the
  526. * server limit is greater than 2, return busy so
  527. * that the last two are reserved for reset and abort.
  528. */
  529. int server_limit = request_status;
  530. struct srp_event_struct *tmp_evt;
  531. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  532. server_limit++;
  533. }
  534. if (server_limit > 2)
  535. goto send_busy;
  536. }
  537. }
  538. /* Copy the IU into the transfer area */
  539. *evt_struct->xfer_iu = evt_struct->iu;
  540. evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
  541. /* Add this to the sent list. We need to do this
  542. * before we actually send
  543. * in case it comes back REALLY fast
  544. */
  545. list_add_tail(&evt_struct->list, &hostdata->sent);
  546. init_timer(&evt_struct->timer);
  547. if (timeout) {
  548. evt_struct->timer.data = (unsigned long) evt_struct;
  549. evt_struct->timer.expires = jiffies + (timeout * HZ);
  550. evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
  551. add_timer(&evt_struct->timer);
  552. }
  553. if ((rc =
  554. ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
  555. list_del(&evt_struct->list);
  556. del_timer(&evt_struct->timer);
  557. dev_err(hostdata->dev, "send error %d\n", rc);
  558. atomic_inc(&hostdata->request_limit);
  559. goto send_error;
  560. }
  561. return 0;
  562. send_busy:
  563. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  564. free_event_struct(&hostdata->pool, evt_struct);
  565. atomic_inc(&hostdata->request_limit);
  566. return SCSI_MLQUEUE_HOST_BUSY;
  567. send_error:
  568. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  569. if (evt_struct->cmnd != NULL) {
  570. evt_struct->cmnd->result = DID_ERROR << 16;
  571. evt_struct->cmnd_done(evt_struct->cmnd);
  572. } else if (evt_struct->done)
  573. evt_struct->done(evt_struct);
  574. free_event_struct(&hostdata->pool, evt_struct);
  575. return 0;
  576. }
  577. /**
  578. * handle_cmd_rsp: - Handle responses from commands
  579. * @evt_struct: srp_event_struct to be handled
  580. *
  581. * Used as a callback by when sending scsi cmds.
  582. * Gets called by ibmvscsi_handle_crq()
  583. */
  584. static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
  585. {
  586. struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
  587. struct scsi_cmnd *cmnd = evt_struct->cmnd;
  588. if (unlikely(rsp->opcode != SRP_RSP)) {
  589. if (printk_ratelimit())
  590. dev_warn(evt_struct->hostdata->dev,
  591. "bad SRP RSP type %d\n", rsp->opcode);
  592. }
  593. if (cmnd) {
  594. cmnd->result = rsp->status;
  595. if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
  596. memcpy(cmnd->sense_buffer,
  597. rsp->data,
  598. rsp->sense_data_len);
  599. unmap_cmd_data(&evt_struct->iu.srp.cmd,
  600. evt_struct,
  601. evt_struct->hostdata->dev);
  602. if (rsp->flags & SRP_RSP_FLAG_DOOVER)
  603. scsi_set_resid(cmnd, rsp->data_out_res_cnt);
  604. else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
  605. scsi_set_resid(cmnd, rsp->data_in_res_cnt);
  606. }
  607. if (evt_struct->cmnd_done)
  608. evt_struct->cmnd_done(cmnd);
  609. }
  610. /**
  611. * lun_from_dev: - Returns the lun of the scsi device
  612. * @dev: struct scsi_device
  613. *
  614. */
  615. static inline u16 lun_from_dev(struct scsi_device *dev)
  616. {
  617. return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
  618. }
  619. /**
  620. * ibmvscsi_queue: - The queuecommand function of the scsi template
  621. * @cmd: struct scsi_cmnd to be executed
  622. * @done: Callback function to be called when cmd is completed
  623. */
  624. static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
  625. void (*done) (struct scsi_cmnd *))
  626. {
  627. struct srp_cmd *srp_cmd;
  628. struct srp_event_struct *evt_struct;
  629. struct srp_indirect_buf *indirect;
  630. struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
  631. u16 lun = lun_from_dev(cmnd->device);
  632. u8 out_fmt, in_fmt;
  633. evt_struct = get_event_struct(&hostdata->pool);
  634. if (!evt_struct)
  635. return SCSI_MLQUEUE_HOST_BUSY;
  636. /* Set up the actual SRP IU */
  637. srp_cmd = &evt_struct->iu.srp.cmd;
  638. memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
  639. srp_cmd->opcode = SRP_CMD;
  640. memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
  641. srp_cmd->lun = ((u64) lun) << 48;
  642. if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
  643. sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
  644. free_event_struct(&hostdata->pool, evt_struct);
  645. return SCSI_MLQUEUE_HOST_BUSY;
  646. }
  647. init_event_struct(evt_struct,
  648. handle_cmd_rsp,
  649. VIOSRP_SRP_FORMAT,
  650. cmnd->timeout_per_command/HZ);
  651. evt_struct->cmnd = cmnd;
  652. evt_struct->cmnd_done = done;
  653. /* Fix up dma address of the buffer itself */
  654. indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
  655. out_fmt = srp_cmd->buf_fmt >> 4;
  656. in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
  657. if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
  658. out_fmt == SRP_DATA_DESC_INDIRECT) &&
  659. indirect->table_desc.va == 0) {
  660. indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
  661. offsetof(struct srp_cmd, add_data) +
  662. offsetof(struct srp_indirect_buf, desc_list);
  663. }
  664. return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
  665. }
  666. /* ------------------------------------------------------------
  667. * Routines for driver initialization
  668. */
  669. /**
  670. * adapter_info_rsp: - Handle response to MAD adapter info request
  671. * @evt_struct: srp_event_struct with the response
  672. *
  673. * Used as a "done" callback by when sending adapter_info. Gets called
  674. * by ibmvscsi_handle_crq()
  675. */
  676. static void adapter_info_rsp(struct srp_event_struct *evt_struct)
  677. {
  678. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  679. dma_unmap_single(hostdata->dev,
  680. evt_struct->iu.mad.adapter_info.buffer,
  681. evt_struct->iu.mad.adapter_info.common.length,
  682. DMA_BIDIRECTIONAL);
  683. if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
  684. dev_err(hostdata->dev, "error %d getting adapter info\n",
  685. evt_struct->xfer_iu->mad.adapter_info.common.status);
  686. } else {
  687. dev_info(hostdata->dev, "host srp version: %s, "
  688. "host partition %s (%d), OS %d, max io %u\n",
  689. hostdata->madapter_info.srp_version,
  690. hostdata->madapter_info.partition_name,
  691. hostdata->madapter_info.partition_number,
  692. hostdata->madapter_info.os_type,
  693. hostdata->madapter_info.port_max_txu[0]);
  694. if (hostdata->madapter_info.port_max_txu[0])
  695. hostdata->host->max_sectors =
  696. hostdata->madapter_info.port_max_txu[0] >> 9;
  697. if (hostdata->madapter_info.os_type == 3 &&
  698. strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
  699. dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
  700. hostdata->madapter_info.srp_version);
  701. dev_err(hostdata->dev, "limiting scatterlists to %d\n",
  702. MAX_INDIRECT_BUFS);
  703. hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
  704. }
  705. }
  706. }
  707. /**
  708. * send_mad_adapter_info: - Sends the mad adapter info request
  709. * and stores the result so it can be retrieved with
  710. * sysfs. We COULD consider causing a failure if the
  711. * returned SRP version doesn't match ours.
  712. * @hostdata: ibmvscsi_host_data of host
  713. *
  714. * Returns zero if successful.
  715. */
  716. static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
  717. {
  718. struct viosrp_adapter_info *req;
  719. struct srp_event_struct *evt_struct;
  720. unsigned long flags;
  721. dma_addr_t addr;
  722. evt_struct = get_event_struct(&hostdata->pool);
  723. if (!evt_struct) {
  724. dev_err(hostdata->dev,
  725. "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
  726. return;
  727. }
  728. init_event_struct(evt_struct,
  729. adapter_info_rsp,
  730. VIOSRP_MAD_FORMAT,
  731. init_timeout);
  732. req = &evt_struct->iu.mad.adapter_info;
  733. memset(req, 0x00, sizeof(*req));
  734. req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
  735. req->common.length = sizeof(hostdata->madapter_info);
  736. req->buffer = addr = dma_map_single(hostdata->dev,
  737. &hostdata->madapter_info,
  738. sizeof(hostdata->madapter_info),
  739. DMA_BIDIRECTIONAL);
  740. if (dma_mapping_error(req->buffer)) {
  741. dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
  742. free_event_struct(&hostdata->pool, evt_struct);
  743. return;
  744. }
  745. spin_lock_irqsave(hostdata->host->host_lock, flags);
  746. if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
  747. dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
  748. dma_unmap_single(hostdata->dev,
  749. addr,
  750. sizeof(hostdata->madapter_info),
  751. DMA_BIDIRECTIONAL);
  752. }
  753. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  754. };
  755. /**
  756. * login_rsp: - Handle response to SRP login request
  757. * @evt_struct: srp_event_struct with the response
  758. *
  759. * Used as a "done" callback by when sending srp_login. Gets called
  760. * by ibmvscsi_handle_crq()
  761. */
  762. static void login_rsp(struct srp_event_struct *evt_struct)
  763. {
  764. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  765. switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
  766. case SRP_LOGIN_RSP: /* it worked! */
  767. break;
  768. case SRP_LOGIN_REJ: /* refused! */
  769. dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
  770. evt_struct->xfer_iu->srp.login_rej.reason);
  771. /* Login failed. */
  772. atomic_set(&hostdata->request_limit, -1);
  773. return;
  774. default:
  775. dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
  776. evt_struct->xfer_iu->srp.login_rsp.opcode);
  777. /* Login failed. */
  778. atomic_set(&hostdata->request_limit, -1);
  779. return;
  780. }
  781. dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
  782. if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
  783. dev_err(hostdata->dev, "Invalid request_limit.\n");
  784. /* Now we know what the real request-limit is.
  785. * This value is set rather than added to request_limit because
  786. * request_limit could have been set to -1 by this client.
  787. */
  788. atomic_set(&hostdata->request_limit,
  789. evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
  790. /* If we had any pending I/Os, kick them */
  791. scsi_unblock_requests(hostdata->host);
  792. send_mad_adapter_info(hostdata);
  793. return;
  794. }
  795. /**
  796. * send_srp_login: - Sends the srp login
  797. * @hostdata: ibmvscsi_host_data of host
  798. *
  799. * Returns zero if successful.
  800. */
  801. static int send_srp_login(struct ibmvscsi_host_data *hostdata)
  802. {
  803. int rc;
  804. unsigned long flags;
  805. struct srp_login_req *login;
  806. struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
  807. if (!evt_struct) {
  808. dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
  809. return FAILED;
  810. }
  811. init_event_struct(evt_struct,
  812. login_rsp,
  813. VIOSRP_SRP_FORMAT,
  814. init_timeout);
  815. login = &evt_struct->iu.srp.login_req;
  816. memset(login, 0x00, sizeof(struct srp_login_req));
  817. login->opcode = SRP_LOGIN_REQ;
  818. login->req_it_iu_len = sizeof(union srp_iu);
  819. login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  820. spin_lock_irqsave(hostdata->host->host_lock, flags);
  821. /* Start out with a request limit of 1, since this is negotiated in
  822. * the login request we are just sending
  823. */
  824. atomic_set(&hostdata->request_limit, 1);
  825. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  826. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  827. dev_info(hostdata->dev, "sent SRP login\n");
  828. return rc;
  829. };
  830. /**
  831. * sync_completion: Signal that a synchronous command has completed
  832. * Note that after returning from this call, the evt_struct is freed.
  833. * the caller waiting on this completion shouldn't touch the evt_struct
  834. * again.
  835. */
  836. static void sync_completion(struct srp_event_struct *evt_struct)
  837. {
  838. /* copy the response back */
  839. if (evt_struct->sync_srp)
  840. *evt_struct->sync_srp = *evt_struct->xfer_iu;
  841. complete(&evt_struct->comp);
  842. }
  843. /**
  844. * ibmvscsi_abort: Abort a command...from scsi host template
  845. * send this over to the server and wait synchronously for the response
  846. */
  847. static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  848. {
  849. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  850. struct srp_tsk_mgmt *tsk_mgmt;
  851. struct srp_event_struct *evt;
  852. struct srp_event_struct *tmp_evt, *found_evt;
  853. union viosrp_iu srp_rsp;
  854. int rsp_rc;
  855. unsigned long flags;
  856. u16 lun = lun_from_dev(cmd->device);
  857. /* First, find this command in our sent list so we can figure
  858. * out the correct tag
  859. */
  860. spin_lock_irqsave(hostdata->host->host_lock, flags);
  861. found_evt = NULL;
  862. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  863. if (tmp_evt->cmnd == cmd) {
  864. found_evt = tmp_evt;
  865. break;
  866. }
  867. }
  868. if (!found_evt) {
  869. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  870. return SUCCESS;
  871. }
  872. evt = get_event_struct(&hostdata->pool);
  873. if (evt == NULL) {
  874. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  875. sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
  876. return FAILED;
  877. }
  878. init_event_struct(evt,
  879. sync_completion,
  880. VIOSRP_SRP_FORMAT,
  881. init_timeout);
  882. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  883. /* Set up an abort SRP command */
  884. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  885. tsk_mgmt->opcode = SRP_TSK_MGMT;
  886. tsk_mgmt->lun = ((u64) lun) << 48;
  887. tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
  888. tsk_mgmt->task_tag = (u64) found_evt;
  889. sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
  890. tsk_mgmt->lun, tsk_mgmt->task_tag);
  891. evt->sync_srp = &srp_rsp;
  892. init_completion(&evt->comp);
  893. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  894. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  895. if (rsp_rc != 0) {
  896. sdev_printk(KERN_ERR, cmd->device,
  897. "failed to send abort() event. rc=%d\n", rsp_rc);
  898. return FAILED;
  899. }
  900. wait_for_completion(&evt->comp);
  901. /* make sure we got a good response */
  902. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  903. if (printk_ratelimit())
  904. sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
  905. srp_rsp.srp.rsp.opcode);
  906. return FAILED;
  907. }
  908. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  909. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  910. else
  911. rsp_rc = srp_rsp.srp.rsp.status;
  912. if (rsp_rc) {
  913. if (printk_ratelimit())
  914. sdev_printk(KERN_WARNING, cmd->device,
  915. "abort code %d for task tag 0x%lx\n",
  916. rsp_rc, tsk_mgmt->task_tag);
  917. return FAILED;
  918. }
  919. /* Because we dropped the spinlock above, it's possible
  920. * The event is no longer in our list. Make sure it didn't
  921. * complete while we were aborting
  922. */
  923. spin_lock_irqsave(hostdata->host->host_lock, flags);
  924. found_evt = NULL;
  925. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  926. if (tmp_evt->cmnd == cmd) {
  927. found_evt = tmp_evt;
  928. break;
  929. }
  930. }
  931. if (found_evt == NULL) {
  932. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  933. sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
  934. tsk_mgmt->task_tag);
  935. return SUCCESS;
  936. }
  937. sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
  938. tsk_mgmt->task_tag);
  939. cmd->result = (DID_ABORT << 16);
  940. list_del(&found_evt->list);
  941. unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
  942. found_evt->hostdata->dev);
  943. free_event_struct(&found_evt->hostdata->pool, found_evt);
  944. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  945. atomic_inc(&hostdata->request_limit);
  946. return SUCCESS;
  947. }
  948. /**
  949. * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
  950. * template send this over to the server and wait synchronously for the
  951. * response
  952. */
  953. static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
  954. {
  955. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  956. struct srp_tsk_mgmt *tsk_mgmt;
  957. struct srp_event_struct *evt;
  958. struct srp_event_struct *tmp_evt, *pos;
  959. union viosrp_iu srp_rsp;
  960. int rsp_rc;
  961. unsigned long flags;
  962. u16 lun = lun_from_dev(cmd->device);
  963. spin_lock_irqsave(hostdata->host->host_lock, flags);
  964. evt = get_event_struct(&hostdata->pool);
  965. if (evt == NULL) {
  966. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  967. sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
  968. return FAILED;
  969. }
  970. init_event_struct(evt,
  971. sync_completion,
  972. VIOSRP_SRP_FORMAT,
  973. init_timeout);
  974. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  975. /* Set up a lun reset SRP command */
  976. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  977. tsk_mgmt->opcode = SRP_TSK_MGMT;
  978. tsk_mgmt->lun = ((u64) lun) << 48;
  979. tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
  980. sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
  981. tsk_mgmt->lun);
  982. evt->sync_srp = &srp_rsp;
  983. init_completion(&evt->comp);
  984. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  985. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  986. if (rsp_rc != 0) {
  987. sdev_printk(KERN_ERR, cmd->device,
  988. "failed to send reset event. rc=%d\n", rsp_rc);
  989. return FAILED;
  990. }
  991. wait_for_completion(&evt->comp);
  992. /* make sure we got a good response */
  993. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  994. if (printk_ratelimit())
  995. sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
  996. srp_rsp.srp.rsp.opcode);
  997. return FAILED;
  998. }
  999. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  1000. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  1001. else
  1002. rsp_rc = srp_rsp.srp.rsp.status;
  1003. if (rsp_rc) {
  1004. if (printk_ratelimit())
  1005. sdev_printk(KERN_WARNING, cmd->device,
  1006. "reset code %d for task tag 0x%lx\n",
  1007. rsp_rc, tsk_mgmt->task_tag);
  1008. return FAILED;
  1009. }
  1010. /* We need to find all commands for this LUN that have not yet been
  1011. * responded to, and fail them with DID_RESET
  1012. */
  1013. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1014. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  1015. if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
  1016. if (tmp_evt->cmnd)
  1017. tmp_evt->cmnd->result = (DID_RESET << 16);
  1018. list_del(&tmp_evt->list);
  1019. unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
  1020. tmp_evt->hostdata->dev);
  1021. free_event_struct(&tmp_evt->hostdata->pool,
  1022. tmp_evt);
  1023. atomic_inc(&hostdata->request_limit);
  1024. if (tmp_evt->cmnd_done)
  1025. tmp_evt->cmnd_done(tmp_evt->cmnd);
  1026. else if (tmp_evt->done)
  1027. tmp_evt->done(tmp_evt);
  1028. }
  1029. }
  1030. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1031. return SUCCESS;
  1032. }
  1033. /**
  1034. * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
  1035. * @cmd: struct scsi_cmnd having problems
  1036. */
  1037. static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
  1038. {
  1039. unsigned long wait_switch = 0;
  1040. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  1041. dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
  1042. ibmvscsi_reset_host(hostdata);
  1043. for (wait_switch = jiffies + (init_timeout * HZ);
  1044. time_before(jiffies, wait_switch) &&
  1045. atomic_read(&hostdata->request_limit) < 2;) {
  1046. msleep(10);
  1047. }
  1048. if (atomic_read(&hostdata->request_limit) <= 0)
  1049. return FAILED;
  1050. return SUCCESS;
  1051. }
  1052. /**
  1053. * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
  1054. * @crq: Command/Response queue
  1055. * @hostdata: ibmvscsi_host_data of host
  1056. *
  1057. */
  1058. void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  1059. struct ibmvscsi_host_data *hostdata)
  1060. {
  1061. long rc;
  1062. unsigned long flags;
  1063. struct srp_event_struct *evt_struct =
  1064. (struct srp_event_struct *)crq->IU_data_ptr;
  1065. switch (crq->valid) {
  1066. case 0xC0: /* initialization */
  1067. switch (crq->format) {
  1068. case 0x01: /* Initialization message */
  1069. dev_info(hostdata->dev, "partner initialized\n");
  1070. /* Send back a response */
  1071. if ((rc = ibmvscsi_send_crq(hostdata,
  1072. 0xC002000000000000LL, 0)) == 0) {
  1073. /* Now login */
  1074. send_srp_login(hostdata);
  1075. } else {
  1076. dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
  1077. }
  1078. break;
  1079. case 0x02: /* Initialization response */
  1080. dev_info(hostdata->dev, "partner initialization complete\n");
  1081. /* Now login */
  1082. send_srp_login(hostdata);
  1083. break;
  1084. default:
  1085. dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
  1086. }
  1087. return;
  1088. case 0xFF: /* Hypervisor telling us the connection is closed */
  1089. scsi_block_requests(hostdata->host);
  1090. atomic_set(&hostdata->request_limit, 0);
  1091. if (crq->format == 0x06) {
  1092. /* We need to re-setup the interpartition connection */
  1093. dev_info(hostdata->dev, "Re-enabling adapter!\n");
  1094. purge_requests(hostdata, DID_REQUEUE);
  1095. if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
  1096. hostdata)) ||
  1097. (ibmvscsi_send_crq(hostdata,
  1098. 0xC001000000000000LL, 0))) {
  1099. atomic_set(&hostdata->request_limit,
  1100. -1);
  1101. dev_err(hostdata->dev, "error after enable\n");
  1102. }
  1103. } else {
  1104. dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
  1105. crq->format);
  1106. purge_requests(hostdata, DID_ERROR);
  1107. if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
  1108. hostdata)) ||
  1109. (ibmvscsi_send_crq(hostdata,
  1110. 0xC001000000000000LL, 0))) {
  1111. atomic_set(&hostdata->request_limit,
  1112. -1);
  1113. dev_err(hostdata->dev, "error after reset\n");
  1114. }
  1115. }
  1116. scsi_unblock_requests(hostdata->host);
  1117. return;
  1118. case 0x80: /* real payload */
  1119. break;
  1120. default:
  1121. dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
  1122. crq->valid);
  1123. return;
  1124. }
  1125. /* The only kind of payload CRQs we should get are responses to
  1126. * things we send. Make sure this response is to something we
  1127. * actually sent
  1128. */
  1129. if (!valid_event_struct(&hostdata->pool, evt_struct)) {
  1130. dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
  1131. (void *)crq->IU_data_ptr);
  1132. return;
  1133. }
  1134. if (atomic_read(&evt_struct->free)) {
  1135. dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
  1136. (void *)crq->IU_data_ptr);
  1137. return;
  1138. }
  1139. if (crq->format == VIOSRP_SRP_FORMAT)
  1140. atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
  1141. &hostdata->request_limit);
  1142. del_timer(&evt_struct->timer);
  1143. if (evt_struct->done)
  1144. evt_struct->done(evt_struct);
  1145. else
  1146. dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
  1147. /*
  1148. * Lock the host_lock before messing with these structures, since we
  1149. * are running in a task context
  1150. */
  1151. spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
  1152. list_del(&evt_struct->list);
  1153. free_event_struct(&evt_struct->hostdata->pool, evt_struct);
  1154. spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
  1155. }
  1156. /**
  1157. * ibmvscsi_get_host_config: Send the command to the server to get host
  1158. * configuration data. The data is opaque to us.
  1159. */
  1160. static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
  1161. unsigned char *buffer, int length)
  1162. {
  1163. struct viosrp_host_config *host_config;
  1164. struct srp_event_struct *evt_struct;
  1165. unsigned long flags;
  1166. dma_addr_t addr;
  1167. int rc;
  1168. evt_struct = get_event_struct(&hostdata->pool);
  1169. if (!evt_struct) {
  1170. dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
  1171. return -1;
  1172. }
  1173. init_event_struct(evt_struct,
  1174. sync_completion,
  1175. VIOSRP_MAD_FORMAT,
  1176. init_timeout);
  1177. host_config = &evt_struct->iu.mad.host_config;
  1178. /* Set up a lun reset SRP command */
  1179. memset(host_config, 0x00, sizeof(*host_config));
  1180. host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
  1181. host_config->common.length = length;
  1182. host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
  1183. length,
  1184. DMA_BIDIRECTIONAL);
  1185. if (dma_mapping_error(host_config->buffer)) {
  1186. dev_err(hostdata->dev, "dma_mapping error getting host config\n");
  1187. free_event_struct(&hostdata->pool, evt_struct);
  1188. return -1;
  1189. }
  1190. init_completion(&evt_struct->comp);
  1191. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1192. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  1193. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1194. if (rc == 0)
  1195. wait_for_completion(&evt_struct->comp);
  1196. dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
  1197. return rc;
  1198. }
  1199. /**
  1200. * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
  1201. * @sdev: struct scsi_device device to configure
  1202. *
  1203. * Enable allow_restart for a device if it is a disk. Adjust the
  1204. * queue_depth here also as is required by the documentation for
  1205. * struct scsi_host_template.
  1206. */
  1207. static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  1208. {
  1209. struct Scsi_Host *shost = sdev->host;
  1210. unsigned long lock_flags = 0;
  1211. spin_lock_irqsave(shost->host_lock, lock_flags);
  1212. if (sdev->type == TYPE_DISK)
  1213. sdev->allow_restart = 1;
  1214. scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
  1215. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  1216. return 0;
  1217. }
  1218. /**
  1219. * ibmvscsi_change_queue_depth - Change the device's queue depth
  1220. * @sdev: scsi device struct
  1221. * @qdepth: depth to set
  1222. *
  1223. * Return value:
  1224. * actual depth set
  1225. **/
  1226. static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1227. {
  1228. if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
  1229. qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
  1230. scsi_adjust_queue_depth(sdev, 0, qdepth);
  1231. return sdev->queue_depth;
  1232. }
  1233. /* ------------------------------------------------------------
  1234. * sysfs attributes
  1235. */
  1236. static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
  1237. {
  1238. struct Scsi_Host *shost = class_to_shost(class_dev);
  1239. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1240. int len;
  1241. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1242. hostdata->madapter_info.srp_version);
  1243. return len;
  1244. }
  1245. static struct class_device_attribute ibmvscsi_host_srp_version = {
  1246. .attr = {
  1247. .name = "srp_version",
  1248. .mode = S_IRUGO,
  1249. },
  1250. .show = show_host_srp_version,
  1251. };
  1252. static ssize_t show_host_partition_name(struct class_device *class_dev,
  1253. char *buf)
  1254. {
  1255. struct Scsi_Host *shost = class_to_shost(class_dev);
  1256. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1257. int len;
  1258. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1259. hostdata->madapter_info.partition_name);
  1260. return len;
  1261. }
  1262. static struct class_device_attribute ibmvscsi_host_partition_name = {
  1263. .attr = {
  1264. .name = "partition_name",
  1265. .mode = S_IRUGO,
  1266. },
  1267. .show = show_host_partition_name,
  1268. };
  1269. static ssize_t show_host_partition_number(struct class_device *class_dev,
  1270. char *buf)
  1271. {
  1272. struct Scsi_Host *shost = class_to_shost(class_dev);
  1273. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1274. int len;
  1275. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1276. hostdata->madapter_info.partition_number);
  1277. return len;
  1278. }
  1279. static struct class_device_attribute ibmvscsi_host_partition_number = {
  1280. .attr = {
  1281. .name = "partition_number",
  1282. .mode = S_IRUGO,
  1283. },
  1284. .show = show_host_partition_number,
  1285. };
  1286. static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
  1287. {
  1288. struct Scsi_Host *shost = class_to_shost(class_dev);
  1289. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1290. int len;
  1291. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1292. hostdata->madapter_info.mad_version);
  1293. return len;
  1294. }
  1295. static struct class_device_attribute ibmvscsi_host_mad_version = {
  1296. .attr = {
  1297. .name = "mad_version",
  1298. .mode = S_IRUGO,
  1299. },
  1300. .show = show_host_mad_version,
  1301. };
  1302. static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
  1303. {
  1304. struct Scsi_Host *shost = class_to_shost(class_dev);
  1305. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1306. int len;
  1307. len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
  1308. return len;
  1309. }
  1310. static struct class_device_attribute ibmvscsi_host_os_type = {
  1311. .attr = {
  1312. .name = "os_type",
  1313. .mode = S_IRUGO,
  1314. },
  1315. .show = show_host_os_type,
  1316. };
  1317. static ssize_t show_host_config(struct class_device *class_dev, char *buf)
  1318. {
  1319. struct Scsi_Host *shost = class_to_shost(class_dev);
  1320. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1321. /* returns null-terminated host config data */
  1322. if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
  1323. return strlen(buf);
  1324. else
  1325. return 0;
  1326. }
  1327. static struct class_device_attribute ibmvscsi_host_config = {
  1328. .attr = {
  1329. .name = "config",
  1330. .mode = S_IRUGO,
  1331. },
  1332. .show = show_host_config,
  1333. };
  1334. static struct class_device_attribute *ibmvscsi_attrs[] = {
  1335. &ibmvscsi_host_srp_version,
  1336. &ibmvscsi_host_partition_name,
  1337. &ibmvscsi_host_partition_number,
  1338. &ibmvscsi_host_mad_version,
  1339. &ibmvscsi_host_os_type,
  1340. &ibmvscsi_host_config,
  1341. NULL
  1342. };
  1343. /* ------------------------------------------------------------
  1344. * SCSI driver registration
  1345. */
  1346. static struct scsi_host_template driver_template = {
  1347. .module = THIS_MODULE,
  1348. .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
  1349. .proc_name = "ibmvscsi",
  1350. .queuecommand = ibmvscsi_queuecommand,
  1351. .eh_abort_handler = ibmvscsi_eh_abort_handler,
  1352. .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
  1353. .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
  1354. .slave_configure = ibmvscsi_slave_configure,
  1355. .change_queue_depth = ibmvscsi_change_queue_depth,
  1356. .cmd_per_lun = 16,
  1357. .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
  1358. .this_id = -1,
  1359. .sg_tablesize = SG_ALL,
  1360. .use_clustering = ENABLE_CLUSTERING,
  1361. .shost_attrs = ibmvscsi_attrs,
  1362. };
  1363. /**
  1364. * Called by bus code for each adapter
  1365. */
  1366. static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1367. {
  1368. struct ibmvscsi_host_data *hostdata;
  1369. struct Scsi_Host *host;
  1370. struct device *dev = &vdev->dev;
  1371. unsigned long wait_switch = 0;
  1372. int rc;
  1373. vdev->dev.driver_data = NULL;
  1374. driver_template.can_queue = max_requests;
  1375. host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
  1376. if (!host) {
  1377. dev_err(&vdev->dev, "couldn't allocate host data\n");
  1378. goto scsi_host_alloc_failed;
  1379. }
  1380. hostdata = shost_priv(host);
  1381. memset(hostdata, 0x00, sizeof(*hostdata));
  1382. INIT_LIST_HEAD(&hostdata->sent);
  1383. hostdata->host = host;
  1384. hostdata->dev = dev;
  1385. atomic_set(&hostdata->request_limit, -1);
  1386. hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
  1387. rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
  1388. if (rc != 0 && rc != H_RESOURCE) {
  1389. dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
  1390. goto init_crq_failed;
  1391. }
  1392. if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
  1393. dev_err(&vdev->dev, "couldn't initialize event pool\n");
  1394. goto init_pool_failed;
  1395. }
  1396. host->max_lun = 8;
  1397. host->max_id = max_id;
  1398. host->max_channel = max_channel;
  1399. if (scsi_add_host(hostdata->host, hostdata->dev))
  1400. goto add_host_failed;
  1401. /* Try to send an initialization message. Note that this is allowed
  1402. * to fail if the other end is not acive. In that case we don't
  1403. * want to scan
  1404. */
  1405. if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
  1406. || rc == H_RESOURCE) {
  1407. /*
  1408. * Wait around max init_timeout secs for the adapter to finish
  1409. * initializing. When we are done initializing, we will have a
  1410. * valid request_limit. We don't want Linux scanning before
  1411. * we are ready.
  1412. */
  1413. for (wait_switch = jiffies + (init_timeout * HZ);
  1414. time_before(jiffies, wait_switch) &&
  1415. atomic_read(&hostdata->request_limit) < 2;) {
  1416. msleep(10);
  1417. }
  1418. /* if we now have a valid request_limit, initiate a scan */
  1419. if (atomic_read(&hostdata->request_limit) > 0)
  1420. scsi_scan_host(host);
  1421. }
  1422. vdev->dev.driver_data = hostdata;
  1423. return 0;
  1424. add_host_failed:
  1425. release_event_pool(&hostdata->pool, hostdata);
  1426. init_pool_failed:
  1427. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
  1428. init_crq_failed:
  1429. scsi_host_put(host);
  1430. scsi_host_alloc_failed:
  1431. return -1;
  1432. }
  1433. static int ibmvscsi_remove(struct vio_dev *vdev)
  1434. {
  1435. struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
  1436. release_event_pool(&hostdata->pool, hostdata);
  1437. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
  1438. max_requests);
  1439. scsi_remove_host(hostdata->host);
  1440. scsi_host_put(hostdata->host);
  1441. return 0;
  1442. }
  1443. /**
  1444. * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
  1445. * support.
  1446. */
  1447. static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
  1448. {"vscsi", "IBM,v-scsi"},
  1449. { "", "" }
  1450. };
  1451. MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
  1452. static struct vio_driver ibmvscsi_driver = {
  1453. .id_table = ibmvscsi_device_table,
  1454. .probe = ibmvscsi_probe,
  1455. .remove = ibmvscsi_remove,
  1456. .driver = {
  1457. .name = "ibmvscsi",
  1458. .owner = THIS_MODULE,
  1459. }
  1460. };
  1461. int __init ibmvscsi_module_init(void)
  1462. {
  1463. return vio_register_driver(&ibmvscsi_driver);
  1464. }
  1465. void __exit ibmvscsi_module_exit(void)
  1466. {
  1467. vio_unregister_driver(&ibmvscsi_driver);
  1468. }
  1469. module_init(ibmvscsi_module_init);
  1470. module_exit(ibmvscsi_module_exit);