ibmvscsi.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. /* ------------------------------------------------------------
  2. * ibmvscsi.c
  3. * (C) Copyright IBM Corporation 1994, 2004
  4. * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
  5. * Santiago Leon (santil@us.ibm.com)
  6. * Dave Boutcher (sleddog@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21. * USA
  22. *
  23. * ------------------------------------------------------------
  24. * Emulation of a SCSI host adapter for Virtual I/O devices
  25. *
  26. * This driver supports the SCSI adapter implemented by the IBM
  27. * Power5 firmware. That SCSI adapter is not a physical adapter,
  28. * but allows Linux SCSI peripheral drivers to directly
  29. * access devices in another logical partition on the physical system.
  30. *
  31. * The virtual adapter(s) are present in the open firmware device
  32. * tree just like real adapters.
  33. *
  34. * One of the capabilities provided on these systems is the ability
  35. * to DMA between partitions. The architecture states that for VSCSI,
  36. * the server side is allowed to DMA to and from the client. The client
  37. * is never trusted to DMA to or from the server directly.
  38. *
  39. * Messages are sent between partitions on a "Command/Response Queue"
  40. * (CRQ), which is just a buffer of 16 byte entries in the receiver's
  41. * Senders cannot access the buffer directly, but send messages by
  42. * making a hypervisor call and passing in the 16 bytes. The hypervisor
  43. * puts the message in the next 16 byte space in round-robbin fashion,
  44. * turns on the high order bit of the message (the valid bit), and
  45. * generates an interrupt to the receiver (if interrupts are turned on.)
  46. * The receiver just turns off the valid bit when they have copied out
  47. * the message.
  48. *
  49. * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50. * (IU) (as defined in the T10 standard available at www.t10.org), gets
  51. * a DMA address for the message, and sends it to the server as the
  52. * payload of a CRQ message. The server DMAs the SRP IU and processes it,
  53. * including doing any additional data transfers. When it is done, it
  54. * DMAs the SRP response back to the same address as the request came from,
  55. * and sends a CRQ message back to inform the client that the request has
  56. * completed.
  57. *
  58. * Note that some of the underlying infrastructure is different between
  59. * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
  60. * the older iSeries hypervisor models. To support both, some low level
  61. * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
  62. * The Makefile should pick one, not two, not zero, of these.
  63. *
  64. * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
  65. * interfaces. It would be really nice to abstract this above an RDMA
  66. * layer.
  67. */
  68. #include <linux/module.h>
  69. #include <linux/moduleparam.h>
  70. #include <linux/dma-mapping.h>
  71. #include <linux/delay.h>
  72. #include <asm/vio.h>
  73. #include <scsi/scsi.h>
  74. #include <scsi/scsi_cmnd.h>
  75. #include <scsi/scsi_host.h>
  76. #include <scsi/scsi_device.h>
  77. #include "ibmvscsi.h"
  78. /* The values below are somewhat arbitrary default values, but
  79. * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  80. * Note that there are 3 bits of channel value, 6 bits of id, and
  81. * 5 bits of LUN.
  82. */
  83. static int max_id = 64;
  84. static int max_channel = 3;
  85. static int init_timeout = 5;
  86. static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
  87. #define IBMVSCSI_VERSION "1.5.8"
  88. MODULE_DESCRIPTION("IBM Virtual SCSI");
  89. MODULE_AUTHOR("Dave Boutcher");
  90. MODULE_LICENSE("GPL");
  91. MODULE_VERSION(IBMVSCSI_VERSION);
  92. module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
  93. MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
  94. module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
  95. MODULE_PARM_DESC(max_channel, "Largest channel value");
  96. module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
  97. MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
  98. module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
  99. MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
  100. /* ------------------------------------------------------------
  101. * Routines for the event pool and event structs
  102. */
  103. /**
  104. * initialize_event_pool: - Allocates and initializes the event pool for a host
  105. * @pool: event_pool to be initialized
  106. * @size: Number of events in pool
  107. * @hostdata: ibmvscsi_host_data who owns the event pool
  108. *
  109. * Returns zero on success.
  110. */
  111. static int initialize_event_pool(struct event_pool *pool,
  112. int size, struct ibmvscsi_host_data *hostdata)
  113. {
  114. int i;
  115. pool->size = size;
  116. pool->next = 0;
  117. pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
  118. if (!pool->events)
  119. return -ENOMEM;
  120. pool->iu_storage =
  121. dma_alloc_coherent(hostdata->dev,
  122. pool->size * sizeof(*pool->iu_storage),
  123. &pool->iu_token, 0);
  124. if (!pool->iu_storage) {
  125. kfree(pool->events);
  126. return -ENOMEM;
  127. }
  128. for (i = 0; i < pool->size; ++i) {
  129. struct srp_event_struct *evt = &pool->events[i];
  130. memset(&evt->crq, 0x00, sizeof(evt->crq));
  131. atomic_set(&evt->free, 1);
  132. evt->crq.valid = 0x80;
  133. evt->crq.IU_length = sizeof(*evt->xfer_iu);
  134. evt->crq.IU_data_ptr = pool->iu_token +
  135. sizeof(*evt->xfer_iu) * i;
  136. evt->xfer_iu = pool->iu_storage + i;
  137. evt->hostdata = hostdata;
  138. evt->ext_list = NULL;
  139. evt->ext_list_token = 0;
  140. }
  141. return 0;
  142. }
  143. /**
  144. * release_event_pool: - Frees memory of an event pool of a host
  145. * @pool: event_pool to be released
  146. * @hostdata: ibmvscsi_host_data who owns the even pool
  147. *
  148. * Returns zero on success.
  149. */
  150. static void release_event_pool(struct event_pool *pool,
  151. struct ibmvscsi_host_data *hostdata)
  152. {
  153. int i, in_use = 0;
  154. for (i = 0; i < pool->size; ++i) {
  155. if (atomic_read(&pool->events[i].free) != 1)
  156. ++in_use;
  157. if (pool->events[i].ext_list) {
  158. dma_free_coherent(hostdata->dev,
  159. SG_ALL * sizeof(struct srp_direct_buf),
  160. pool->events[i].ext_list,
  161. pool->events[i].ext_list_token);
  162. }
  163. }
  164. if (in_use)
  165. dev_warn(hostdata->dev, "releasing event pool with %d "
  166. "events still in use?\n", in_use);
  167. kfree(pool->events);
  168. dma_free_coherent(hostdata->dev,
  169. pool->size * sizeof(*pool->iu_storage),
  170. pool->iu_storage, pool->iu_token);
  171. }
  172. /**
  173. * valid_event_struct: - Determines if event is valid.
  174. * @pool: event_pool that contains the event
  175. * @evt: srp_event_struct to be checked for validity
  176. *
  177. * Returns zero if event is invalid, one otherwise.
  178. */
  179. static int valid_event_struct(struct event_pool *pool,
  180. struct srp_event_struct *evt)
  181. {
  182. int index = evt - pool->events;
  183. if (index < 0 || index >= pool->size) /* outside of bounds */
  184. return 0;
  185. if (evt != pool->events + index) /* unaligned */
  186. return 0;
  187. return 1;
  188. }
  189. /**
  190. * ibmvscsi_free-event_struct: - Changes status of event to "free"
  191. * @pool: event_pool that contains the event
  192. * @evt: srp_event_struct to be modified
  193. *
  194. */
  195. static void free_event_struct(struct event_pool *pool,
  196. struct srp_event_struct *evt)
  197. {
  198. if (!valid_event_struct(pool, evt)) {
  199. dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
  200. "(not in pool %p)\n", evt, pool->events);
  201. return;
  202. }
  203. if (atomic_inc_return(&evt->free) != 1) {
  204. dev_err(evt->hostdata->dev, "Freeing event_struct %p "
  205. "which is not in use!\n", evt);
  206. return;
  207. }
  208. }
  209. /**
  210. * get_evt_struct: - Gets the next free event in pool
  211. * @pool: event_pool that contains the events to be searched
  212. *
  213. * Returns the next event in "free" state, and NULL if none are free.
  214. * Note that no synchronization is done here, we assume the host_lock
  215. * will syncrhonze things.
  216. */
  217. static struct srp_event_struct *get_event_struct(struct event_pool *pool)
  218. {
  219. int i;
  220. int poolsize = pool->size;
  221. int offset = pool->next;
  222. for (i = 0; i < poolsize; i++) {
  223. offset = (offset + 1) % poolsize;
  224. if (!atomic_dec_if_positive(&pool->events[offset].free)) {
  225. pool->next = offset;
  226. return &pool->events[offset];
  227. }
  228. }
  229. printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
  230. return NULL;
  231. }
  232. /**
  233. * init_event_struct: Initialize fields in an event struct that are always
  234. * required.
  235. * @evt: The event
  236. * @done: Routine to call when the event is responded to
  237. * @format: SRP or MAD format
  238. * @timeout: timeout value set in the CRQ
  239. */
  240. static void init_event_struct(struct srp_event_struct *evt_struct,
  241. void (*done) (struct srp_event_struct *),
  242. u8 format,
  243. int timeout)
  244. {
  245. evt_struct->cmnd = NULL;
  246. evt_struct->cmnd_done = NULL;
  247. evt_struct->sync_srp = NULL;
  248. evt_struct->crq.format = format;
  249. evt_struct->crq.timeout = timeout;
  250. evt_struct->done = done;
  251. }
  252. /* ------------------------------------------------------------
  253. * Routines for receiving SCSI responses from the hosting partition
  254. */
  255. /**
  256. * set_srp_direction: Set the fields in the srp related to data
  257. * direction and number of buffers based on the direction in
  258. * the scsi_cmnd and the number of buffers
  259. */
  260. static void set_srp_direction(struct scsi_cmnd *cmd,
  261. struct srp_cmd *srp_cmd,
  262. int numbuf)
  263. {
  264. u8 fmt;
  265. if (numbuf == 0)
  266. return;
  267. if (numbuf == 1)
  268. fmt = SRP_DATA_DESC_DIRECT;
  269. else {
  270. fmt = SRP_DATA_DESC_INDIRECT;
  271. numbuf = min(numbuf, MAX_INDIRECT_BUFS);
  272. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  273. srp_cmd->data_out_desc_cnt = numbuf;
  274. else
  275. srp_cmd->data_in_desc_cnt = numbuf;
  276. }
  277. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  278. srp_cmd->buf_fmt = fmt << 4;
  279. else
  280. srp_cmd->buf_fmt = fmt;
  281. }
  282. static void unmap_sg_list(int num_entries,
  283. struct device *dev,
  284. struct srp_direct_buf *md)
  285. {
  286. int i;
  287. for (i = 0; i < num_entries; ++i)
  288. dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
  289. }
  290. /**
  291. * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
  292. * @cmd: srp_cmd whose additional_data member will be unmapped
  293. * @dev: device for which the memory is mapped
  294. *
  295. */
  296. static void unmap_cmd_data(struct srp_cmd *cmd,
  297. struct srp_event_struct *evt_struct,
  298. struct device *dev)
  299. {
  300. u8 out_fmt, in_fmt;
  301. out_fmt = cmd->buf_fmt >> 4;
  302. in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
  303. if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
  304. return;
  305. else if (out_fmt == SRP_DATA_DESC_DIRECT ||
  306. in_fmt == SRP_DATA_DESC_DIRECT) {
  307. struct srp_direct_buf *data =
  308. (struct srp_direct_buf *) cmd->add_data;
  309. dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
  310. } else {
  311. struct srp_indirect_buf *indirect =
  312. (struct srp_indirect_buf *) cmd->add_data;
  313. int num_mapped = indirect->table_desc.len /
  314. sizeof(struct srp_direct_buf);
  315. if (num_mapped <= MAX_INDIRECT_BUFS) {
  316. unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
  317. return;
  318. }
  319. unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
  320. }
  321. }
  322. static int map_sg_list(int num_entries,
  323. struct scatterlist *sg,
  324. struct srp_direct_buf *md)
  325. {
  326. int i;
  327. u64 total_length = 0;
  328. for (i = 0; i < num_entries; ++i) {
  329. struct srp_direct_buf *descr = md + i;
  330. struct scatterlist *sg_entry = &sg[i];
  331. descr->va = sg_dma_address(sg_entry);
  332. descr->len = sg_dma_len(sg_entry);
  333. descr->key = 0;
  334. total_length += sg_dma_len(sg_entry);
  335. }
  336. return total_length;
  337. }
  338. /**
  339. * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
  340. * @cmd: Scsi_Cmnd with the scatterlist
  341. * @srp_cmd: srp_cmd that contains the memory descriptor
  342. * @dev: device for which to map dma memory
  343. *
  344. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  345. * Returns 1 on success.
  346. */
  347. static int map_sg_data(struct scsi_cmnd *cmd,
  348. struct srp_event_struct *evt_struct,
  349. struct srp_cmd *srp_cmd, struct device *dev)
  350. {
  351. int sg_mapped;
  352. u64 total_length = 0;
  353. struct scatterlist *sg = cmd->request_buffer;
  354. struct srp_direct_buf *data =
  355. (struct srp_direct_buf *) srp_cmd->add_data;
  356. struct srp_indirect_buf *indirect =
  357. (struct srp_indirect_buf *) data;
  358. sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
  359. if (sg_mapped == 0)
  360. return 0;
  361. set_srp_direction(cmd, srp_cmd, sg_mapped);
  362. /* special case; we can use a single direct descriptor */
  363. if (sg_mapped == 1) {
  364. data->va = sg_dma_address(&sg[0]);
  365. data->len = sg_dma_len(&sg[0]);
  366. data->key = 0;
  367. return 1;
  368. }
  369. indirect->table_desc.va = 0;
  370. indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
  371. indirect->table_desc.key = 0;
  372. if (sg_mapped <= MAX_INDIRECT_BUFS) {
  373. total_length = map_sg_list(sg_mapped, sg,
  374. &indirect->desc_list[0]);
  375. indirect->len = total_length;
  376. return 1;
  377. }
  378. /* get indirect table */
  379. if (!evt_struct->ext_list) {
  380. evt_struct->ext_list = (struct srp_direct_buf *)
  381. dma_alloc_coherent(dev,
  382. SG_ALL * sizeof(struct srp_direct_buf),
  383. &evt_struct->ext_list_token, 0);
  384. if (!evt_struct->ext_list) {
  385. sdev_printk(KERN_ERR, cmd->device,
  386. "Can't allocate memory for indirect table\n");
  387. return 0;
  388. }
  389. }
  390. total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
  391. indirect->len = total_length;
  392. indirect->table_desc.va = evt_struct->ext_list_token;
  393. indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
  394. memcpy(indirect->desc_list, evt_struct->ext_list,
  395. MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
  396. return 1;
  397. }
  398. /**
  399. * map_single_data: - Maps memory and initializes memory decriptor fields
  400. * @cmd: struct scsi_cmnd with the memory to be mapped
  401. * @srp_cmd: srp_cmd that contains the memory descriptor
  402. * @dev: device for which to map dma memory
  403. *
  404. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  405. * Returns 1 on success.
  406. */
  407. static int map_single_data(struct scsi_cmnd *cmd,
  408. struct srp_cmd *srp_cmd, struct device *dev)
  409. {
  410. struct srp_direct_buf *data =
  411. (struct srp_direct_buf *) srp_cmd->add_data;
  412. data->va =
  413. dma_map_single(dev, cmd->request_buffer,
  414. cmd->request_bufflen,
  415. DMA_BIDIRECTIONAL);
  416. if (dma_mapping_error(data->va)) {
  417. sdev_printk(KERN_ERR, cmd->device,
  418. "Unable to map request_buffer for command!\n");
  419. return 0;
  420. }
  421. data->len = cmd->request_bufflen;
  422. data->key = 0;
  423. set_srp_direction(cmd, srp_cmd, 1);
  424. return 1;
  425. }
  426. /**
  427. * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  428. * @cmd: struct scsi_cmnd with the memory to be mapped
  429. * @srp_cmd: srp_cmd that contains the memory descriptor
  430. * @dev: dma device for which to map dma memory
  431. *
  432. * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
  433. * Returns 1 on success.
  434. */
  435. static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
  436. struct srp_event_struct *evt_struct,
  437. struct srp_cmd *srp_cmd, struct device *dev)
  438. {
  439. switch (cmd->sc_data_direction) {
  440. case DMA_FROM_DEVICE:
  441. case DMA_TO_DEVICE:
  442. break;
  443. case DMA_NONE:
  444. return 1;
  445. case DMA_BIDIRECTIONAL:
  446. sdev_printk(KERN_ERR, cmd->device,
  447. "Can't map DMA_BIDIRECTIONAL to read/write\n");
  448. return 0;
  449. default:
  450. sdev_printk(KERN_ERR, cmd->device,
  451. "Unknown data direction 0x%02x; can't map!\n",
  452. cmd->sc_data_direction);
  453. return 0;
  454. }
  455. if (!cmd->request_buffer)
  456. return 1;
  457. if (cmd->use_sg)
  458. return map_sg_data(cmd, evt_struct, srp_cmd, dev);
  459. return map_single_data(cmd, srp_cmd, dev);
  460. }
  461. /**
  462. * purge_requests: Our virtual adapter just shut down. purge any sent requests
  463. * @hostdata: the adapter
  464. */
  465. static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
  466. {
  467. struct srp_event_struct *tmp_evt, *pos;
  468. unsigned long flags;
  469. spin_lock_irqsave(hostdata->host->host_lock, flags);
  470. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  471. list_del(&tmp_evt->list);
  472. del_timer(&tmp_evt->timer);
  473. if (tmp_evt->cmnd) {
  474. tmp_evt->cmnd->result = (error_code << 16);
  475. unmap_cmd_data(&tmp_evt->iu.srp.cmd,
  476. tmp_evt,
  477. tmp_evt->hostdata->dev);
  478. if (tmp_evt->cmnd_done)
  479. tmp_evt->cmnd_done(tmp_evt->cmnd);
  480. } else if (tmp_evt->done)
  481. tmp_evt->done(tmp_evt);
  482. free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
  483. }
  484. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  485. }
  486. /**
  487. * ibmvscsi_reset_host - Reset the connection to the server
  488. * @hostdata: struct ibmvscsi_host_data to reset
  489. */
  490. static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
  491. {
  492. scsi_block_requests(hostdata->host);
  493. atomic_set(&hostdata->request_limit, 0);
  494. purge_requests(hostdata, DID_ERROR);
  495. if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
  496. (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
  497. (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
  498. atomic_set(&hostdata->request_limit, -1);
  499. dev_err(hostdata->dev, "error after reset\n");
  500. }
  501. scsi_unblock_requests(hostdata->host);
  502. }
  503. /**
  504. * ibmvscsi_timeout - Internal command timeout handler
  505. * @evt_struct: struct srp_event_struct that timed out
  506. *
  507. * Called when an internally generated command times out
  508. */
  509. static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
  510. {
  511. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  512. dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
  513. evt_struct->iu.srp.cmd.opcode);
  514. ibmvscsi_reset_host(hostdata);
  515. }
  516. /* ------------------------------------------------------------
  517. * Routines for sending and receiving SRPs
  518. */
  519. /**
  520. * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
  521. * @evt_struct: evt_struct to be sent
  522. * @hostdata: ibmvscsi_host_data of host
  523. * @timeout: timeout in seconds - 0 means do not time command
  524. *
  525. * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
  526. * Note that this routine assumes that host_lock is held for synchronization
  527. */
  528. static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
  529. struct ibmvscsi_host_data *hostdata,
  530. unsigned long timeout)
  531. {
  532. u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
  533. int request_status;
  534. int rc;
  535. /* If we have exhausted our request limit, just fail this request,
  536. * unless it is for a reset or abort.
  537. * Note that there are rare cases involving driver generated requests
  538. * (such as task management requests) that the mid layer may think we
  539. * can handle more requests (can_queue) when we actually can't
  540. */
  541. if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
  542. request_status =
  543. atomic_dec_if_positive(&hostdata->request_limit);
  544. /* If request limit was -1 when we started, it is now even
  545. * less than that
  546. */
  547. if (request_status < -1)
  548. goto send_error;
  549. /* Otherwise, we may have run out of requests. */
  550. /* Abort and reset calls should make it through.
  551. * Nothing except abort and reset should use the last two
  552. * slots unless we had two or less to begin with.
  553. */
  554. else if (request_status < 2 &&
  555. evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
  556. /* In the case that we have less than two requests
  557. * available, check the server limit as a combination
  558. * of the request limit and the number of requests
  559. * in-flight (the size of the send list). If the
  560. * server limit is greater than 2, return busy so
  561. * that the last two are reserved for reset and abort.
  562. */
  563. int server_limit = request_status;
  564. struct srp_event_struct *tmp_evt;
  565. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  566. server_limit++;
  567. }
  568. if (server_limit > 2)
  569. goto send_busy;
  570. }
  571. }
  572. /* Copy the IU into the transfer area */
  573. *evt_struct->xfer_iu = evt_struct->iu;
  574. evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
  575. /* Add this to the sent list. We need to do this
  576. * before we actually send
  577. * in case it comes back REALLY fast
  578. */
  579. list_add_tail(&evt_struct->list, &hostdata->sent);
  580. init_timer(&evt_struct->timer);
  581. if (timeout) {
  582. evt_struct->timer.data = (unsigned long) evt_struct;
  583. evt_struct->timer.expires = jiffies + (timeout * HZ);
  584. evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
  585. add_timer(&evt_struct->timer);
  586. }
  587. if ((rc =
  588. ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
  589. list_del(&evt_struct->list);
  590. del_timer(&evt_struct->timer);
  591. dev_err(hostdata->dev, "send error %d\n", rc);
  592. atomic_inc(&hostdata->request_limit);
  593. goto send_error;
  594. }
  595. return 0;
  596. send_busy:
  597. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  598. free_event_struct(&hostdata->pool, evt_struct);
  599. atomic_inc(&hostdata->request_limit);
  600. return SCSI_MLQUEUE_HOST_BUSY;
  601. send_error:
  602. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  603. if (evt_struct->cmnd != NULL) {
  604. evt_struct->cmnd->result = DID_ERROR << 16;
  605. evt_struct->cmnd_done(evt_struct->cmnd);
  606. } else if (evt_struct->done)
  607. evt_struct->done(evt_struct);
  608. free_event_struct(&hostdata->pool, evt_struct);
  609. return 0;
  610. }
  611. /**
  612. * handle_cmd_rsp: - Handle responses from commands
  613. * @evt_struct: srp_event_struct to be handled
  614. *
  615. * Used as a callback by when sending scsi cmds.
  616. * Gets called by ibmvscsi_handle_crq()
  617. */
  618. static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
  619. {
  620. struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
  621. struct scsi_cmnd *cmnd = evt_struct->cmnd;
  622. if (unlikely(rsp->opcode != SRP_RSP)) {
  623. if (printk_ratelimit())
  624. dev_warn(evt_struct->hostdata->dev,
  625. "bad SRP RSP type %d\n", rsp->opcode);
  626. }
  627. if (cmnd) {
  628. cmnd->result = rsp->status;
  629. if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
  630. memcpy(cmnd->sense_buffer,
  631. rsp->data,
  632. rsp->sense_data_len);
  633. unmap_cmd_data(&evt_struct->iu.srp.cmd,
  634. evt_struct,
  635. evt_struct->hostdata->dev);
  636. if (rsp->flags & SRP_RSP_FLAG_DOOVER)
  637. cmnd->resid = rsp->data_out_res_cnt;
  638. else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
  639. cmnd->resid = rsp->data_in_res_cnt;
  640. }
  641. if (evt_struct->cmnd_done)
  642. evt_struct->cmnd_done(cmnd);
  643. }
  644. /**
  645. * lun_from_dev: - Returns the lun of the scsi device
  646. * @dev: struct scsi_device
  647. *
  648. */
  649. static inline u16 lun_from_dev(struct scsi_device *dev)
  650. {
  651. return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
  652. }
  653. /**
  654. * ibmvscsi_queue: - The queuecommand function of the scsi template
  655. * @cmd: struct scsi_cmnd to be executed
  656. * @done: Callback function to be called when cmd is completed
  657. */
  658. static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
  659. void (*done) (struct scsi_cmnd *))
  660. {
  661. struct srp_cmd *srp_cmd;
  662. struct srp_event_struct *evt_struct;
  663. struct srp_indirect_buf *indirect;
  664. struct ibmvscsi_host_data *hostdata =
  665. (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
  666. u16 lun = lun_from_dev(cmnd->device);
  667. u8 out_fmt, in_fmt;
  668. evt_struct = get_event_struct(&hostdata->pool);
  669. if (!evt_struct)
  670. return SCSI_MLQUEUE_HOST_BUSY;
  671. /* Set up the actual SRP IU */
  672. srp_cmd = &evt_struct->iu.srp.cmd;
  673. memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
  674. srp_cmd->opcode = SRP_CMD;
  675. memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
  676. srp_cmd->lun = ((u64) lun) << 48;
  677. if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
  678. sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
  679. free_event_struct(&hostdata->pool, evt_struct);
  680. return SCSI_MLQUEUE_HOST_BUSY;
  681. }
  682. init_event_struct(evt_struct,
  683. handle_cmd_rsp,
  684. VIOSRP_SRP_FORMAT,
  685. cmnd->timeout_per_command/HZ);
  686. evt_struct->cmnd = cmnd;
  687. evt_struct->cmnd_done = done;
  688. /* Fix up dma address of the buffer itself */
  689. indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
  690. out_fmt = srp_cmd->buf_fmt >> 4;
  691. in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
  692. if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
  693. out_fmt == SRP_DATA_DESC_INDIRECT) &&
  694. indirect->table_desc.va == 0) {
  695. indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
  696. offsetof(struct srp_cmd, add_data) +
  697. offsetof(struct srp_indirect_buf, desc_list);
  698. }
  699. return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
  700. }
  701. /* ------------------------------------------------------------
  702. * Routines for driver initialization
  703. */
  704. /**
  705. * adapter_info_rsp: - Handle response to MAD adapter info request
  706. * @evt_struct: srp_event_struct with the response
  707. *
  708. * Used as a "done" callback by when sending adapter_info. Gets called
  709. * by ibmvscsi_handle_crq()
  710. */
  711. static void adapter_info_rsp(struct srp_event_struct *evt_struct)
  712. {
  713. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  714. dma_unmap_single(hostdata->dev,
  715. evt_struct->iu.mad.adapter_info.buffer,
  716. evt_struct->iu.mad.adapter_info.common.length,
  717. DMA_BIDIRECTIONAL);
  718. if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
  719. dev_err(hostdata->dev, "error %d getting adapter info\n",
  720. evt_struct->xfer_iu->mad.adapter_info.common.status);
  721. } else {
  722. dev_info(hostdata->dev, "host srp version: %s, "
  723. "host partition %s (%d), OS %d, max io %u\n",
  724. hostdata->madapter_info.srp_version,
  725. hostdata->madapter_info.partition_name,
  726. hostdata->madapter_info.partition_number,
  727. hostdata->madapter_info.os_type,
  728. hostdata->madapter_info.port_max_txu[0]);
  729. if (hostdata->madapter_info.port_max_txu[0])
  730. hostdata->host->max_sectors =
  731. hostdata->madapter_info.port_max_txu[0] >> 9;
  732. if (hostdata->madapter_info.os_type == 3 &&
  733. strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
  734. dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
  735. hostdata->madapter_info.srp_version);
  736. dev_err(hostdata->dev, "limiting scatterlists to %d\n",
  737. MAX_INDIRECT_BUFS);
  738. hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
  739. }
  740. }
  741. }
  742. /**
  743. * send_mad_adapter_info: - Sends the mad adapter info request
  744. * and stores the result so it can be retrieved with
  745. * sysfs. We COULD consider causing a failure if the
  746. * returned SRP version doesn't match ours.
  747. * @hostdata: ibmvscsi_host_data of host
  748. *
  749. * Returns zero if successful.
  750. */
  751. static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
  752. {
  753. struct viosrp_adapter_info *req;
  754. struct srp_event_struct *evt_struct;
  755. unsigned long flags;
  756. dma_addr_t addr;
  757. evt_struct = get_event_struct(&hostdata->pool);
  758. if (!evt_struct) {
  759. dev_err(hostdata->dev,
  760. "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
  761. return;
  762. }
  763. init_event_struct(evt_struct,
  764. adapter_info_rsp,
  765. VIOSRP_MAD_FORMAT,
  766. init_timeout * HZ);
  767. req = &evt_struct->iu.mad.adapter_info;
  768. memset(req, 0x00, sizeof(*req));
  769. req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
  770. req->common.length = sizeof(hostdata->madapter_info);
  771. req->buffer = addr = dma_map_single(hostdata->dev,
  772. &hostdata->madapter_info,
  773. sizeof(hostdata->madapter_info),
  774. DMA_BIDIRECTIONAL);
  775. if (dma_mapping_error(req->buffer)) {
  776. dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
  777. free_event_struct(&hostdata->pool, evt_struct);
  778. return;
  779. }
  780. spin_lock_irqsave(hostdata->host->host_lock, flags);
  781. if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
  782. dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
  783. dma_unmap_single(hostdata->dev,
  784. addr,
  785. sizeof(hostdata->madapter_info),
  786. DMA_BIDIRECTIONAL);
  787. }
  788. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  789. };
  790. /**
  791. * login_rsp: - Handle response to SRP login request
  792. * @evt_struct: srp_event_struct with the response
  793. *
  794. * Used as a "done" callback by when sending srp_login. Gets called
  795. * by ibmvscsi_handle_crq()
  796. */
  797. static void login_rsp(struct srp_event_struct *evt_struct)
  798. {
  799. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  800. switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
  801. case SRP_LOGIN_RSP: /* it worked! */
  802. break;
  803. case SRP_LOGIN_REJ: /* refused! */
  804. dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
  805. evt_struct->xfer_iu->srp.login_rej.reason);
  806. /* Login failed. */
  807. atomic_set(&hostdata->request_limit, -1);
  808. return;
  809. default:
  810. dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
  811. evt_struct->xfer_iu->srp.login_rsp.opcode);
  812. /* Login failed. */
  813. atomic_set(&hostdata->request_limit, -1);
  814. return;
  815. }
  816. dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
  817. if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
  818. dev_err(hostdata->dev, "Invalid request_limit.\n");
  819. /* Now we know what the real request-limit is.
  820. * This value is set rather than added to request_limit because
  821. * request_limit could have been set to -1 by this client.
  822. */
  823. atomic_set(&hostdata->request_limit,
  824. evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
  825. /* If we had any pending I/Os, kick them */
  826. scsi_unblock_requests(hostdata->host);
  827. send_mad_adapter_info(hostdata);
  828. return;
  829. }
  830. /**
  831. * send_srp_login: - Sends the srp login
  832. * @hostdata: ibmvscsi_host_data of host
  833. *
  834. * Returns zero if successful.
  835. */
  836. static int send_srp_login(struct ibmvscsi_host_data *hostdata)
  837. {
  838. int rc;
  839. unsigned long flags;
  840. struct srp_login_req *login;
  841. struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
  842. if (!evt_struct) {
  843. dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
  844. return FAILED;
  845. }
  846. init_event_struct(evt_struct,
  847. login_rsp,
  848. VIOSRP_SRP_FORMAT,
  849. init_timeout * HZ);
  850. login = &evt_struct->iu.srp.login_req;
  851. memset(login, 0x00, sizeof(struct srp_login_req));
  852. login->opcode = SRP_LOGIN_REQ;
  853. login->req_it_iu_len = sizeof(union srp_iu);
  854. login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  855. spin_lock_irqsave(hostdata->host->host_lock, flags);
  856. /* Start out with a request limit of 1, since this is negotiated in
  857. * the login request we are just sending
  858. */
  859. atomic_set(&hostdata->request_limit, 1);
  860. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  861. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  862. dev_info(hostdata->dev, "sent SRP login\n");
  863. return rc;
  864. };
  865. /**
  866. * sync_completion: Signal that a synchronous command has completed
  867. * Note that after returning from this call, the evt_struct is freed.
  868. * the caller waiting on this completion shouldn't touch the evt_struct
  869. * again.
  870. */
  871. static void sync_completion(struct srp_event_struct *evt_struct)
  872. {
  873. /* copy the response back */
  874. if (evt_struct->sync_srp)
  875. *evt_struct->sync_srp = *evt_struct->xfer_iu;
  876. complete(&evt_struct->comp);
  877. }
  878. /**
  879. * ibmvscsi_abort: Abort a command...from scsi host template
  880. * send this over to the server and wait synchronously for the response
  881. */
  882. static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  883. {
  884. struct ibmvscsi_host_data *hostdata =
  885. (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
  886. struct srp_tsk_mgmt *tsk_mgmt;
  887. struct srp_event_struct *evt;
  888. struct srp_event_struct *tmp_evt, *found_evt;
  889. union viosrp_iu srp_rsp;
  890. int rsp_rc;
  891. unsigned long flags;
  892. u16 lun = lun_from_dev(cmd->device);
  893. /* First, find this command in our sent list so we can figure
  894. * out the correct tag
  895. */
  896. spin_lock_irqsave(hostdata->host->host_lock, flags);
  897. found_evt = NULL;
  898. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  899. if (tmp_evt->cmnd == cmd) {
  900. found_evt = tmp_evt;
  901. break;
  902. }
  903. }
  904. if (!found_evt) {
  905. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  906. return FAILED;
  907. }
  908. evt = get_event_struct(&hostdata->pool);
  909. if (evt == NULL) {
  910. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  911. sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
  912. return FAILED;
  913. }
  914. init_event_struct(evt,
  915. sync_completion,
  916. VIOSRP_SRP_FORMAT,
  917. init_timeout * HZ);
  918. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  919. /* Set up an abort SRP command */
  920. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  921. tsk_mgmt->opcode = SRP_TSK_MGMT;
  922. tsk_mgmt->lun = ((u64) lun) << 48;
  923. tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
  924. tsk_mgmt->task_tag = (u64) found_evt;
  925. sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
  926. tsk_mgmt->lun, tsk_mgmt->task_tag);
  927. evt->sync_srp = &srp_rsp;
  928. init_completion(&evt->comp);
  929. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  930. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  931. if (rsp_rc != 0) {
  932. sdev_printk(KERN_ERR, cmd->device,
  933. "failed to send abort() event. rc=%d\n", rsp_rc);
  934. return FAILED;
  935. }
  936. wait_for_completion(&evt->comp);
  937. /* make sure we got a good response */
  938. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  939. if (printk_ratelimit())
  940. sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
  941. srp_rsp.srp.rsp.opcode);
  942. return FAILED;
  943. }
  944. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  945. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  946. else
  947. rsp_rc = srp_rsp.srp.rsp.status;
  948. if (rsp_rc) {
  949. if (printk_ratelimit())
  950. sdev_printk(KERN_WARNING, cmd->device,
  951. "abort code %d for task tag 0x%lx\n",
  952. rsp_rc, tsk_mgmt->task_tag);
  953. return FAILED;
  954. }
  955. /* Because we dropped the spinlock above, it's possible
  956. * The event is no longer in our list. Make sure it didn't
  957. * complete while we were aborting
  958. */
  959. spin_lock_irqsave(hostdata->host->host_lock, flags);
  960. found_evt = NULL;
  961. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  962. if (tmp_evt->cmnd == cmd) {
  963. found_evt = tmp_evt;
  964. break;
  965. }
  966. }
  967. if (found_evt == NULL) {
  968. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  969. sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
  970. tsk_mgmt->task_tag);
  971. return SUCCESS;
  972. }
  973. sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
  974. tsk_mgmt->task_tag);
  975. cmd->result = (DID_ABORT << 16);
  976. list_del(&found_evt->list);
  977. unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
  978. found_evt->hostdata->dev);
  979. free_event_struct(&found_evt->hostdata->pool, found_evt);
  980. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  981. atomic_inc(&hostdata->request_limit);
  982. return SUCCESS;
  983. }
  984. /**
  985. * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
  986. * template send this over to the server and wait synchronously for the
  987. * response
  988. */
  989. static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
  990. {
  991. struct ibmvscsi_host_data *hostdata =
  992. (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
  993. struct srp_tsk_mgmt *tsk_mgmt;
  994. struct srp_event_struct *evt;
  995. struct srp_event_struct *tmp_evt, *pos;
  996. union viosrp_iu srp_rsp;
  997. int rsp_rc;
  998. unsigned long flags;
  999. u16 lun = lun_from_dev(cmd->device);
  1000. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1001. evt = get_event_struct(&hostdata->pool);
  1002. if (evt == NULL) {
  1003. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1004. sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
  1005. return FAILED;
  1006. }
  1007. init_event_struct(evt,
  1008. sync_completion,
  1009. VIOSRP_SRP_FORMAT,
  1010. init_timeout * HZ);
  1011. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  1012. /* Set up a lun reset SRP command */
  1013. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  1014. tsk_mgmt->opcode = SRP_TSK_MGMT;
  1015. tsk_mgmt->lun = ((u64) lun) << 48;
  1016. tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
  1017. sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
  1018. tsk_mgmt->lun);
  1019. evt->sync_srp = &srp_rsp;
  1020. init_completion(&evt->comp);
  1021. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  1022. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1023. if (rsp_rc != 0) {
  1024. sdev_printk(KERN_ERR, cmd->device,
  1025. "failed to send reset event. rc=%d\n", rsp_rc);
  1026. return FAILED;
  1027. }
  1028. wait_for_completion(&evt->comp);
  1029. /* make sure we got a good response */
  1030. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  1031. if (printk_ratelimit())
  1032. sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
  1033. srp_rsp.srp.rsp.opcode);
  1034. return FAILED;
  1035. }
  1036. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  1037. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  1038. else
  1039. rsp_rc = srp_rsp.srp.rsp.status;
  1040. if (rsp_rc) {
  1041. if (printk_ratelimit())
  1042. sdev_printk(KERN_WARNING, cmd->device,
  1043. "reset code %d for task tag 0x%lx\n",
  1044. rsp_rc, tsk_mgmt->task_tag);
  1045. return FAILED;
  1046. }
  1047. /* We need to find all commands for this LUN that have not yet been
  1048. * responded to, and fail them with DID_RESET
  1049. */
  1050. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1051. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  1052. if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
  1053. if (tmp_evt->cmnd)
  1054. tmp_evt->cmnd->result = (DID_RESET << 16);
  1055. list_del(&tmp_evt->list);
  1056. unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
  1057. tmp_evt->hostdata->dev);
  1058. free_event_struct(&tmp_evt->hostdata->pool,
  1059. tmp_evt);
  1060. atomic_inc(&hostdata->request_limit);
  1061. if (tmp_evt->cmnd_done)
  1062. tmp_evt->cmnd_done(tmp_evt->cmnd);
  1063. else if (tmp_evt->done)
  1064. tmp_evt->done(tmp_evt);
  1065. }
  1066. }
  1067. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1068. return SUCCESS;
  1069. }
  1070. /**
  1071. * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
  1072. * @cmd: struct scsi_cmnd having problems
  1073. */
  1074. static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
  1075. {
  1076. unsigned long wait_switch = 0;
  1077. struct ibmvscsi_host_data *hostdata =
  1078. (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
  1079. dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
  1080. ibmvscsi_reset_host(hostdata);
  1081. for (wait_switch = jiffies + (init_timeout * HZ);
  1082. time_before(jiffies, wait_switch) &&
  1083. atomic_read(&hostdata->request_limit) < 2;) {
  1084. msleep(10);
  1085. }
  1086. if (atomic_read(&hostdata->request_limit) <= 0)
  1087. return FAILED;
  1088. return SUCCESS;
  1089. }
  1090. /**
  1091. * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
  1092. * @crq: Command/Response queue
  1093. * @hostdata: ibmvscsi_host_data of host
  1094. *
  1095. */
  1096. void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  1097. struct ibmvscsi_host_data *hostdata)
  1098. {
  1099. long rc;
  1100. unsigned long flags;
  1101. struct srp_event_struct *evt_struct =
  1102. (struct srp_event_struct *)crq->IU_data_ptr;
  1103. switch (crq->valid) {
  1104. case 0xC0: /* initialization */
  1105. switch (crq->format) {
  1106. case 0x01: /* Initialization message */
  1107. dev_info(hostdata->dev, "partner initialized\n");
  1108. /* Send back a response */
  1109. if ((rc = ibmvscsi_send_crq(hostdata,
  1110. 0xC002000000000000LL, 0)) == 0) {
  1111. /* Now login */
  1112. send_srp_login(hostdata);
  1113. } else {
  1114. dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
  1115. }
  1116. break;
  1117. case 0x02: /* Initialization response */
  1118. dev_info(hostdata->dev, "partner initialization complete\n");
  1119. /* Now login */
  1120. send_srp_login(hostdata);
  1121. break;
  1122. default:
  1123. dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
  1124. }
  1125. return;
  1126. case 0xFF: /* Hypervisor telling us the connection is closed */
  1127. scsi_block_requests(hostdata->host);
  1128. atomic_set(&hostdata->request_limit, 0);
  1129. if (crq->format == 0x06) {
  1130. /* We need to re-setup the interpartition connection */
  1131. dev_info(hostdata->dev, "Re-enabling adapter!\n");
  1132. purge_requests(hostdata, DID_REQUEUE);
  1133. if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
  1134. hostdata)) ||
  1135. (ibmvscsi_send_crq(hostdata,
  1136. 0xC001000000000000LL, 0))) {
  1137. atomic_set(&hostdata->request_limit,
  1138. -1);
  1139. dev_err(hostdata->dev, "error after enable\n");
  1140. }
  1141. } else {
  1142. dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
  1143. crq->format);
  1144. purge_requests(hostdata, DID_ERROR);
  1145. if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
  1146. hostdata)) ||
  1147. (ibmvscsi_send_crq(hostdata,
  1148. 0xC001000000000000LL, 0))) {
  1149. atomic_set(&hostdata->request_limit,
  1150. -1);
  1151. dev_err(hostdata->dev, "error after reset\n");
  1152. }
  1153. }
  1154. scsi_unblock_requests(hostdata->host);
  1155. return;
  1156. case 0x80: /* real payload */
  1157. break;
  1158. default:
  1159. dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
  1160. crq->valid);
  1161. return;
  1162. }
  1163. /* The only kind of payload CRQs we should get are responses to
  1164. * things we send. Make sure this response is to something we
  1165. * actually sent
  1166. */
  1167. if (!valid_event_struct(&hostdata->pool, evt_struct)) {
  1168. dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
  1169. (void *)crq->IU_data_ptr);
  1170. return;
  1171. }
  1172. if (atomic_read(&evt_struct->free)) {
  1173. dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
  1174. (void *)crq->IU_data_ptr);
  1175. return;
  1176. }
  1177. if (crq->format == VIOSRP_SRP_FORMAT)
  1178. atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
  1179. &hostdata->request_limit);
  1180. del_timer(&evt_struct->timer);
  1181. if (evt_struct->done)
  1182. evt_struct->done(evt_struct);
  1183. else
  1184. dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
  1185. /*
  1186. * Lock the host_lock before messing with these structures, since we
  1187. * are running in a task context
  1188. */
  1189. spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
  1190. list_del(&evt_struct->list);
  1191. free_event_struct(&evt_struct->hostdata->pool, evt_struct);
  1192. spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
  1193. }
  1194. /**
  1195. * ibmvscsi_get_host_config: Send the command to the server to get host
  1196. * configuration data. The data is opaque to us.
  1197. */
  1198. static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
  1199. unsigned char *buffer, int length)
  1200. {
  1201. struct viosrp_host_config *host_config;
  1202. struct srp_event_struct *evt_struct;
  1203. unsigned long flags;
  1204. dma_addr_t addr;
  1205. int rc;
  1206. evt_struct = get_event_struct(&hostdata->pool);
  1207. if (!evt_struct) {
  1208. dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
  1209. return -1;
  1210. }
  1211. init_event_struct(evt_struct,
  1212. sync_completion,
  1213. VIOSRP_MAD_FORMAT,
  1214. init_timeout * HZ);
  1215. host_config = &evt_struct->iu.mad.host_config;
  1216. /* Set up a lun reset SRP command */
  1217. memset(host_config, 0x00, sizeof(*host_config));
  1218. host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
  1219. host_config->common.length = length;
  1220. host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
  1221. length,
  1222. DMA_BIDIRECTIONAL);
  1223. if (dma_mapping_error(host_config->buffer)) {
  1224. dev_err(hostdata->dev, "dma_mapping error getting host config\n");
  1225. free_event_struct(&hostdata->pool, evt_struct);
  1226. return -1;
  1227. }
  1228. init_completion(&evt_struct->comp);
  1229. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1230. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  1231. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1232. if (rc == 0)
  1233. wait_for_completion(&evt_struct->comp);
  1234. dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
  1235. return rc;
  1236. }
  1237. /**
  1238. * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
  1239. * @sdev: struct scsi_device device to configure
  1240. *
  1241. * Enable allow_restart for a device if it is a disk. Adjust the
  1242. * queue_depth here also as is required by the documentation for
  1243. * struct scsi_host_template.
  1244. */
  1245. static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  1246. {
  1247. struct Scsi_Host *shost = sdev->host;
  1248. unsigned long lock_flags = 0;
  1249. spin_lock_irqsave(shost->host_lock, lock_flags);
  1250. if (sdev->type == TYPE_DISK)
  1251. sdev->allow_restart = 1;
  1252. scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
  1253. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  1254. return 0;
  1255. }
  1256. /**
  1257. * ibmvscsi_change_queue_depth - Change the device's queue depth
  1258. * @sdev: scsi device struct
  1259. * @qdepth: depth to set
  1260. *
  1261. * Return value:
  1262. * actual depth set
  1263. **/
  1264. static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1265. {
  1266. if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
  1267. qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
  1268. scsi_adjust_queue_depth(sdev, 0, qdepth);
  1269. return sdev->queue_depth;
  1270. }
  1271. /* ------------------------------------------------------------
  1272. * sysfs attributes
  1273. */
  1274. static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
  1275. {
  1276. struct Scsi_Host *shost = class_to_shost(class_dev);
  1277. struct ibmvscsi_host_data *hostdata =
  1278. (struct ibmvscsi_host_data *)shost->hostdata;
  1279. int len;
  1280. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1281. hostdata->madapter_info.srp_version);
  1282. return len;
  1283. }
  1284. static struct class_device_attribute ibmvscsi_host_srp_version = {
  1285. .attr = {
  1286. .name = "srp_version",
  1287. .mode = S_IRUGO,
  1288. },
  1289. .show = show_host_srp_version,
  1290. };
  1291. static ssize_t show_host_partition_name(struct class_device *class_dev,
  1292. char *buf)
  1293. {
  1294. struct Scsi_Host *shost = class_to_shost(class_dev);
  1295. struct ibmvscsi_host_data *hostdata =
  1296. (struct ibmvscsi_host_data *)shost->hostdata;
  1297. int len;
  1298. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1299. hostdata->madapter_info.partition_name);
  1300. return len;
  1301. }
  1302. static struct class_device_attribute ibmvscsi_host_partition_name = {
  1303. .attr = {
  1304. .name = "partition_name",
  1305. .mode = S_IRUGO,
  1306. },
  1307. .show = show_host_partition_name,
  1308. };
  1309. static ssize_t show_host_partition_number(struct class_device *class_dev,
  1310. char *buf)
  1311. {
  1312. struct Scsi_Host *shost = class_to_shost(class_dev);
  1313. struct ibmvscsi_host_data *hostdata =
  1314. (struct ibmvscsi_host_data *)shost->hostdata;
  1315. int len;
  1316. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1317. hostdata->madapter_info.partition_number);
  1318. return len;
  1319. }
  1320. static struct class_device_attribute ibmvscsi_host_partition_number = {
  1321. .attr = {
  1322. .name = "partition_number",
  1323. .mode = S_IRUGO,
  1324. },
  1325. .show = show_host_partition_number,
  1326. };
  1327. static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
  1328. {
  1329. struct Scsi_Host *shost = class_to_shost(class_dev);
  1330. struct ibmvscsi_host_data *hostdata =
  1331. (struct ibmvscsi_host_data *)shost->hostdata;
  1332. int len;
  1333. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1334. hostdata->madapter_info.mad_version);
  1335. return len;
  1336. }
  1337. static struct class_device_attribute ibmvscsi_host_mad_version = {
  1338. .attr = {
  1339. .name = "mad_version",
  1340. .mode = S_IRUGO,
  1341. },
  1342. .show = show_host_mad_version,
  1343. };
  1344. static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
  1345. {
  1346. struct Scsi_Host *shost = class_to_shost(class_dev);
  1347. struct ibmvscsi_host_data *hostdata =
  1348. (struct ibmvscsi_host_data *)shost->hostdata;
  1349. int len;
  1350. len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
  1351. return len;
  1352. }
  1353. static struct class_device_attribute ibmvscsi_host_os_type = {
  1354. .attr = {
  1355. .name = "os_type",
  1356. .mode = S_IRUGO,
  1357. },
  1358. .show = show_host_os_type,
  1359. };
  1360. static ssize_t show_host_config(struct class_device *class_dev, char *buf)
  1361. {
  1362. struct Scsi_Host *shost = class_to_shost(class_dev);
  1363. struct ibmvscsi_host_data *hostdata =
  1364. (struct ibmvscsi_host_data *)shost->hostdata;
  1365. /* returns null-terminated host config data */
  1366. if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
  1367. return strlen(buf);
  1368. else
  1369. return 0;
  1370. }
  1371. static struct class_device_attribute ibmvscsi_host_config = {
  1372. .attr = {
  1373. .name = "config",
  1374. .mode = S_IRUGO,
  1375. },
  1376. .show = show_host_config,
  1377. };
  1378. static struct class_device_attribute *ibmvscsi_attrs[] = {
  1379. &ibmvscsi_host_srp_version,
  1380. &ibmvscsi_host_partition_name,
  1381. &ibmvscsi_host_partition_number,
  1382. &ibmvscsi_host_mad_version,
  1383. &ibmvscsi_host_os_type,
  1384. &ibmvscsi_host_config,
  1385. NULL
  1386. };
  1387. /* ------------------------------------------------------------
  1388. * SCSI driver registration
  1389. */
  1390. static struct scsi_host_template driver_template = {
  1391. .module = THIS_MODULE,
  1392. .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
  1393. .proc_name = "ibmvscsi",
  1394. .queuecommand = ibmvscsi_queuecommand,
  1395. .eh_abort_handler = ibmvscsi_eh_abort_handler,
  1396. .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
  1397. .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
  1398. .slave_configure = ibmvscsi_slave_configure,
  1399. .change_queue_depth = ibmvscsi_change_queue_depth,
  1400. .cmd_per_lun = 16,
  1401. .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
  1402. .this_id = -1,
  1403. .sg_tablesize = SG_ALL,
  1404. .use_clustering = ENABLE_CLUSTERING,
  1405. .shost_attrs = ibmvscsi_attrs,
  1406. };
  1407. /**
  1408. * Called by bus code for each adapter
  1409. */
  1410. static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1411. {
  1412. struct ibmvscsi_host_data *hostdata;
  1413. struct Scsi_Host *host;
  1414. struct device *dev = &vdev->dev;
  1415. unsigned long wait_switch = 0;
  1416. int rc;
  1417. vdev->dev.driver_data = NULL;
  1418. driver_template.can_queue = max_requests;
  1419. host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
  1420. if (!host) {
  1421. dev_err(&vdev->dev, "couldn't allocate host data\n");
  1422. goto scsi_host_alloc_failed;
  1423. }
  1424. hostdata = (struct ibmvscsi_host_data *)host->hostdata;
  1425. memset(hostdata, 0x00, sizeof(*hostdata));
  1426. INIT_LIST_HEAD(&hostdata->sent);
  1427. hostdata->host = host;
  1428. hostdata->dev = dev;
  1429. atomic_set(&hostdata->request_limit, -1);
  1430. hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
  1431. rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
  1432. if (rc != 0 && rc != H_RESOURCE) {
  1433. dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
  1434. goto init_crq_failed;
  1435. }
  1436. if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
  1437. dev_err(&vdev->dev, "couldn't initialize event pool\n");
  1438. goto init_pool_failed;
  1439. }
  1440. host->max_lun = 8;
  1441. host->max_id = max_id;
  1442. host->max_channel = max_channel;
  1443. if (scsi_add_host(hostdata->host, hostdata->dev))
  1444. goto add_host_failed;
  1445. /* Try to send an initialization message. Note that this is allowed
  1446. * to fail if the other end is not acive. In that case we don't
  1447. * want to scan
  1448. */
  1449. if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
  1450. || rc == H_RESOURCE) {
  1451. /*
  1452. * Wait around max init_timeout secs for the adapter to finish
  1453. * initializing. When we are done initializing, we will have a
  1454. * valid request_limit. We don't want Linux scanning before
  1455. * we are ready.
  1456. */
  1457. for (wait_switch = jiffies + (init_timeout * HZ);
  1458. time_before(jiffies, wait_switch) &&
  1459. atomic_read(&hostdata->request_limit) < 2;) {
  1460. msleep(10);
  1461. }
  1462. /* if we now have a valid request_limit, initiate a scan */
  1463. if (atomic_read(&hostdata->request_limit) > 0)
  1464. scsi_scan_host(host);
  1465. }
  1466. vdev->dev.driver_data = hostdata;
  1467. return 0;
  1468. add_host_failed:
  1469. release_event_pool(&hostdata->pool, hostdata);
  1470. init_pool_failed:
  1471. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
  1472. init_crq_failed:
  1473. scsi_host_put(host);
  1474. scsi_host_alloc_failed:
  1475. return -1;
  1476. }
  1477. static int ibmvscsi_remove(struct vio_dev *vdev)
  1478. {
  1479. struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
  1480. release_event_pool(&hostdata->pool, hostdata);
  1481. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
  1482. max_requests);
  1483. scsi_remove_host(hostdata->host);
  1484. scsi_host_put(hostdata->host);
  1485. return 0;
  1486. }
  1487. /**
  1488. * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
  1489. * support.
  1490. */
  1491. static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
  1492. {"vscsi", "IBM,v-scsi"},
  1493. { "", "" }
  1494. };
  1495. MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
  1496. static struct vio_driver ibmvscsi_driver = {
  1497. .id_table = ibmvscsi_device_table,
  1498. .probe = ibmvscsi_probe,
  1499. .remove = ibmvscsi_remove,
  1500. .driver = {
  1501. .name = "ibmvscsi",
  1502. .owner = THIS_MODULE,
  1503. }
  1504. };
  1505. int __init ibmvscsi_module_init(void)
  1506. {
  1507. return vio_register_driver(&ibmvscsi_driver);
  1508. }
  1509. void __exit ibmvscsi_module_exit(void)
  1510. {
  1511. vio_unregister_driver(&ibmvscsi_driver);
  1512. }
  1513. module_init(ibmvscsi_module_init);
  1514. module_exit(ibmvscsi_module_exit);