ibmvscsi.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811
  1. /* ------------------------------------------------------------
  2. * ibmvscsi.c
  3. * (C) Copyright IBM Corporation 1994, 2004
  4. * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
  5. * Santiago Leon (santil@us.ibm.com)
  6. * Dave Boutcher (sleddog@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21. * USA
  22. *
  23. * ------------------------------------------------------------
  24. * Emulation of a SCSI host adapter for Virtual I/O devices
  25. *
  26. * This driver supports the SCSI adapter implemented by the IBM
  27. * Power5 firmware. That SCSI adapter is not a physical adapter,
  28. * but allows Linux SCSI peripheral drivers to directly
  29. * access devices in another logical partition on the physical system.
  30. *
  31. * The virtual adapter(s) are present in the open firmware device
  32. * tree just like real adapters.
  33. *
  34. * One of the capabilities provided on these systems is the ability
  35. * to DMA between partitions. The architecture states that for VSCSI,
  36. * the server side is allowed to DMA to and from the client. The client
  37. * is never trusted to DMA to or from the server directly.
  38. *
  39. * Messages are sent between partitions on a "Command/Response Queue"
  40. * (CRQ), which is just a buffer of 16 byte entries in the receiver's
  41. * Senders cannot access the buffer directly, but send messages by
  42. * making a hypervisor call and passing in the 16 bytes. The hypervisor
  43. * puts the message in the next 16 byte space in round-robbin fashion,
  44. * turns on the high order bit of the message (the valid bit), and
  45. * generates an interrupt to the receiver (if interrupts are turned on.)
  46. * The receiver just turns off the valid bit when they have copied out
  47. * the message.
  48. *
  49. * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50. * (IU) (as defined in the T10 standard available at www.t10.org), gets
  51. * a DMA address for the message, and sends it to the server as the
  52. * payload of a CRQ message. The server DMAs the SRP IU and processes it,
  53. * including doing any additional data transfers. When it is done, it
  54. * DMAs the SRP response back to the same address as the request came from,
  55. * and sends a CRQ message back to inform the client that the request has
  56. * completed.
  57. *
  58. * Note that some of the underlying infrastructure is different between
  59. * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
  60. * the older iSeries hypervisor models. To support both, some low level
  61. * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
  62. * The Makefile should pick one, not two, not zero, of these.
  63. *
  64. * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
  65. * interfaces. It would be really nice to abstract this above an RDMA
  66. * layer.
  67. */
  68. #include <linux/module.h>
  69. #include <linux/moduleparam.h>
  70. #include <linux/dma-mapping.h>
  71. #include <linux/delay.h>
  72. #include <asm/firmware.h>
  73. #include <asm/vio.h>
  74. #include <asm/firmware.h>
  75. #include <scsi/scsi.h>
  76. #include <scsi/scsi_cmnd.h>
  77. #include <scsi/scsi_host.h>
  78. #include <scsi/scsi_device.h>
  79. #include <scsi/scsi_transport_srp.h>
  80. #include "ibmvscsi.h"
  81. /* The values below are somewhat arbitrary default values, but
  82. * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  83. * Note that there are 3 bits of channel value, 6 bits of id, and
  84. * 5 bits of LUN.
  85. */
  86. static int max_id = 64;
  87. static int max_channel = 3;
  88. static int init_timeout = 5;
  89. static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
  90. static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
  91. static struct scsi_transport_template *ibmvscsi_transport_template;
  92. #define IBMVSCSI_VERSION "1.5.8"
  93. static struct ibmvscsi_ops *ibmvscsi_ops;
  94. MODULE_DESCRIPTION("IBM Virtual SCSI");
  95. MODULE_AUTHOR("Dave Boutcher");
  96. MODULE_LICENSE("GPL");
  97. MODULE_VERSION(IBMVSCSI_VERSION);
  98. module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
  99. MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
  100. module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
  101. MODULE_PARM_DESC(max_channel, "Largest channel value");
  102. module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
  103. MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
  104. module_param_named(max_requests, max_requests, int, S_IRUGO);
  105. MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
  106. /* ------------------------------------------------------------
  107. * Routines for the event pool and event structs
  108. */
  109. /**
  110. * initialize_event_pool: - Allocates and initializes the event pool for a host
  111. * @pool: event_pool to be initialized
  112. * @size: Number of events in pool
  113. * @hostdata: ibmvscsi_host_data who owns the event pool
  114. *
  115. * Returns zero on success.
  116. */
  117. static int initialize_event_pool(struct event_pool *pool,
  118. int size, struct ibmvscsi_host_data *hostdata)
  119. {
  120. int i;
  121. pool->size = size;
  122. pool->next = 0;
  123. pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
  124. if (!pool->events)
  125. return -ENOMEM;
  126. pool->iu_storage =
  127. dma_alloc_coherent(hostdata->dev,
  128. pool->size * sizeof(*pool->iu_storage),
  129. &pool->iu_token, 0);
  130. if (!pool->iu_storage) {
  131. kfree(pool->events);
  132. return -ENOMEM;
  133. }
  134. for (i = 0; i < pool->size; ++i) {
  135. struct srp_event_struct *evt = &pool->events[i];
  136. memset(&evt->crq, 0x00, sizeof(evt->crq));
  137. atomic_set(&evt->free, 1);
  138. evt->crq.valid = 0x80;
  139. evt->crq.IU_length = sizeof(*evt->xfer_iu);
  140. evt->crq.IU_data_ptr = pool->iu_token +
  141. sizeof(*evt->xfer_iu) * i;
  142. evt->xfer_iu = pool->iu_storage + i;
  143. evt->hostdata = hostdata;
  144. evt->ext_list = NULL;
  145. evt->ext_list_token = 0;
  146. }
  147. return 0;
  148. }
  149. /**
  150. * release_event_pool: - Frees memory of an event pool of a host
  151. * @pool: event_pool to be released
  152. * @hostdata: ibmvscsi_host_data who owns the even pool
  153. *
  154. * Returns zero on success.
  155. */
  156. static void release_event_pool(struct event_pool *pool,
  157. struct ibmvscsi_host_data *hostdata)
  158. {
  159. int i, in_use = 0;
  160. for (i = 0; i < pool->size; ++i) {
  161. if (atomic_read(&pool->events[i].free) != 1)
  162. ++in_use;
  163. if (pool->events[i].ext_list) {
  164. dma_free_coherent(hostdata->dev,
  165. SG_ALL * sizeof(struct srp_direct_buf),
  166. pool->events[i].ext_list,
  167. pool->events[i].ext_list_token);
  168. }
  169. }
  170. if (in_use)
  171. dev_warn(hostdata->dev, "releasing event pool with %d "
  172. "events still in use?\n", in_use);
  173. kfree(pool->events);
  174. dma_free_coherent(hostdata->dev,
  175. pool->size * sizeof(*pool->iu_storage),
  176. pool->iu_storage, pool->iu_token);
  177. }
  178. /**
  179. * valid_event_struct: - Determines if event is valid.
  180. * @pool: event_pool that contains the event
  181. * @evt: srp_event_struct to be checked for validity
  182. *
  183. * Returns zero if event is invalid, one otherwise.
  184. */
  185. static int valid_event_struct(struct event_pool *pool,
  186. struct srp_event_struct *evt)
  187. {
  188. int index = evt - pool->events;
  189. if (index < 0 || index >= pool->size) /* outside of bounds */
  190. return 0;
  191. if (evt != pool->events + index) /* unaligned */
  192. return 0;
  193. return 1;
  194. }
  195. /**
  196. * ibmvscsi_free-event_struct: - Changes status of event to "free"
  197. * @pool: event_pool that contains the event
  198. * @evt: srp_event_struct to be modified
  199. *
  200. */
  201. static void free_event_struct(struct event_pool *pool,
  202. struct srp_event_struct *evt)
  203. {
  204. if (!valid_event_struct(pool, evt)) {
  205. dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
  206. "(not in pool %p)\n", evt, pool->events);
  207. return;
  208. }
  209. if (atomic_inc_return(&evt->free) != 1) {
  210. dev_err(evt->hostdata->dev, "Freeing event_struct %p "
  211. "which is not in use!\n", evt);
  212. return;
  213. }
  214. }
  215. /**
  216. * get_evt_struct: - Gets the next free event in pool
  217. * @pool: event_pool that contains the events to be searched
  218. *
  219. * Returns the next event in "free" state, and NULL if none are free.
  220. * Note that no synchronization is done here, we assume the host_lock
  221. * will syncrhonze things.
  222. */
  223. static struct srp_event_struct *get_event_struct(struct event_pool *pool)
  224. {
  225. int i;
  226. int poolsize = pool->size;
  227. int offset = pool->next;
  228. for (i = 0; i < poolsize; i++) {
  229. offset = (offset + 1) % poolsize;
  230. if (!atomic_dec_if_positive(&pool->events[offset].free)) {
  231. pool->next = offset;
  232. return &pool->events[offset];
  233. }
  234. }
  235. printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
  236. return NULL;
  237. }
  238. /**
  239. * init_event_struct: Initialize fields in an event struct that are always
  240. * required.
  241. * @evt: The event
  242. * @done: Routine to call when the event is responded to
  243. * @format: SRP or MAD format
  244. * @timeout: timeout value set in the CRQ
  245. */
  246. static void init_event_struct(struct srp_event_struct *evt_struct,
  247. void (*done) (struct srp_event_struct *),
  248. u8 format,
  249. int timeout)
  250. {
  251. evt_struct->cmnd = NULL;
  252. evt_struct->cmnd_done = NULL;
  253. evt_struct->sync_srp = NULL;
  254. evt_struct->crq.format = format;
  255. evt_struct->crq.timeout = timeout;
  256. evt_struct->done = done;
  257. }
  258. /* ------------------------------------------------------------
  259. * Routines for receiving SCSI responses from the hosting partition
  260. */
  261. /**
  262. * set_srp_direction: Set the fields in the srp related to data
  263. * direction and number of buffers based on the direction in
  264. * the scsi_cmnd and the number of buffers
  265. */
  266. static void set_srp_direction(struct scsi_cmnd *cmd,
  267. struct srp_cmd *srp_cmd,
  268. int numbuf)
  269. {
  270. u8 fmt;
  271. if (numbuf == 0)
  272. return;
  273. if (numbuf == 1)
  274. fmt = SRP_DATA_DESC_DIRECT;
  275. else {
  276. fmt = SRP_DATA_DESC_INDIRECT;
  277. numbuf = min(numbuf, MAX_INDIRECT_BUFS);
  278. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  279. srp_cmd->data_out_desc_cnt = numbuf;
  280. else
  281. srp_cmd->data_in_desc_cnt = numbuf;
  282. }
  283. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  284. srp_cmd->buf_fmt = fmt << 4;
  285. else
  286. srp_cmd->buf_fmt = fmt;
  287. }
  288. static void unmap_sg_list(int num_entries,
  289. struct device *dev,
  290. struct srp_direct_buf *md)
  291. {
  292. int i;
  293. for (i = 0; i < num_entries; ++i)
  294. dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
  295. }
  296. /**
  297. * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
  298. * @cmd: srp_cmd whose additional_data member will be unmapped
  299. * @dev: device for which the memory is mapped
  300. *
  301. */
  302. static void unmap_cmd_data(struct srp_cmd *cmd,
  303. struct srp_event_struct *evt_struct,
  304. struct device *dev)
  305. {
  306. u8 out_fmt, in_fmt;
  307. out_fmt = cmd->buf_fmt >> 4;
  308. in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
  309. if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
  310. return;
  311. else if (out_fmt == SRP_DATA_DESC_DIRECT ||
  312. in_fmt == SRP_DATA_DESC_DIRECT) {
  313. struct srp_direct_buf *data =
  314. (struct srp_direct_buf *) cmd->add_data;
  315. dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
  316. } else {
  317. struct srp_indirect_buf *indirect =
  318. (struct srp_indirect_buf *) cmd->add_data;
  319. int num_mapped = indirect->table_desc.len /
  320. sizeof(struct srp_direct_buf);
  321. if (num_mapped <= MAX_INDIRECT_BUFS) {
  322. unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
  323. return;
  324. }
  325. unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
  326. }
  327. }
  328. static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
  329. struct srp_direct_buf *md)
  330. {
  331. int i;
  332. struct scatterlist *sg;
  333. u64 total_length = 0;
  334. scsi_for_each_sg(cmd, sg, nseg, i) {
  335. struct srp_direct_buf *descr = md + i;
  336. descr->va = sg_dma_address(sg);
  337. descr->len = sg_dma_len(sg);
  338. descr->key = 0;
  339. total_length += sg_dma_len(sg);
  340. }
  341. return total_length;
  342. }
  343. /**
  344. * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
  345. * @cmd: Scsi_Cmnd with the scatterlist
  346. * @srp_cmd: srp_cmd that contains the memory descriptor
  347. * @dev: device for which to map dma memory
  348. *
  349. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  350. * Returns 1 on success.
  351. */
  352. static int map_sg_data(struct scsi_cmnd *cmd,
  353. struct srp_event_struct *evt_struct,
  354. struct srp_cmd *srp_cmd, struct device *dev)
  355. {
  356. int sg_mapped;
  357. u64 total_length = 0;
  358. struct srp_direct_buf *data =
  359. (struct srp_direct_buf *) srp_cmd->add_data;
  360. struct srp_indirect_buf *indirect =
  361. (struct srp_indirect_buf *) data;
  362. sg_mapped = scsi_dma_map(cmd);
  363. if (!sg_mapped)
  364. return 1;
  365. else if (sg_mapped < 0)
  366. return 0;
  367. set_srp_direction(cmd, srp_cmd, sg_mapped);
  368. /* special case; we can use a single direct descriptor */
  369. if (sg_mapped == 1) {
  370. map_sg_list(cmd, sg_mapped, data);
  371. return 1;
  372. }
  373. indirect->table_desc.va = 0;
  374. indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
  375. indirect->table_desc.key = 0;
  376. if (sg_mapped <= MAX_INDIRECT_BUFS) {
  377. total_length = map_sg_list(cmd, sg_mapped,
  378. &indirect->desc_list[0]);
  379. indirect->len = total_length;
  380. return 1;
  381. }
  382. /* get indirect table */
  383. if (!evt_struct->ext_list) {
  384. evt_struct->ext_list = (struct srp_direct_buf *)
  385. dma_alloc_coherent(dev,
  386. SG_ALL * sizeof(struct srp_direct_buf),
  387. &evt_struct->ext_list_token, 0);
  388. if (!evt_struct->ext_list) {
  389. if (!firmware_has_feature(FW_FEATURE_CMO))
  390. sdev_printk(KERN_ERR, cmd->device,
  391. "Can't allocate memory "
  392. "for indirect table\n");
  393. return 0;
  394. }
  395. }
  396. total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
  397. indirect->len = total_length;
  398. indirect->table_desc.va = evt_struct->ext_list_token;
  399. indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
  400. memcpy(indirect->desc_list, evt_struct->ext_list,
  401. MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
  402. return 1;
  403. }
  404. /**
  405. * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  406. * @cmd: struct scsi_cmnd with the memory to be mapped
  407. * @srp_cmd: srp_cmd that contains the memory descriptor
  408. * @dev: dma device for which to map dma memory
  409. *
  410. * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
  411. * Returns 1 on success.
  412. */
  413. static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
  414. struct srp_event_struct *evt_struct,
  415. struct srp_cmd *srp_cmd, struct device *dev)
  416. {
  417. switch (cmd->sc_data_direction) {
  418. case DMA_FROM_DEVICE:
  419. case DMA_TO_DEVICE:
  420. break;
  421. case DMA_NONE:
  422. return 1;
  423. case DMA_BIDIRECTIONAL:
  424. sdev_printk(KERN_ERR, cmd->device,
  425. "Can't map DMA_BIDIRECTIONAL to read/write\n");
  426. return 0;
  427. default:
  428. sdev_printk(KERN_ERR, cmd->device,
  429. "Unknown data direction 0x%02x; can't map!\n",
  430. cmd->sc_data_direction);
  431. return 0;
  432. }
  433. return map_sg_data(cmd, evt_struct, srp_cmd, dev);
  434. }
  435. /**
  436. * purge_requests: Our virtual adapter just shut down. purge any sent requests
  437. * @hostdata: the adapter
  438. */
  439. static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
  440. {
  441. struct srp_event_struct *tmp_evt, *pos;
  442. unsigned long flags;
  443. spin_lock_irqsave(hostdata->host->host_lock, flags);
  444. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  445. list_del(&tmp_evt->list);
  446. del_timer(&tmp_evt->timer);
  447. if (tmp_evt->cmnd) {
  448. tmp_evt->cmnd->result = (error_code << 16);
  449. unmap_cmd_data(&tmp_evt->iu.srp.cmd,
  450. tmp_evt,
  451. tmp_evt->hostdata->dev);
  452. if (tmp_evt->cmnd_done)
  453. tmp_evt->cmnd_done(tmp_evt->cmnd);
  454. } else if (tmp_evt->done)
  455. tmp_evt->done(tmp_evt);
  456. free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
  457. }
  458. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  459. }
  460. /**
  461. * ibmvscsi_reset_host - Reset the connection to the server
  462. * @hostdata: struct ibmvscsi_host_data to reset
  463. */
  464. static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
  465. {
  466. scsi_block_requests(hostdata->host);
  467. atomic_set(&hostdata->request_limit, 0);
  468. purge_requests(hostdata, DID_ERROR);
  469. if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata)) ||
  470. (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0)) ||
  471. (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
  472. atomic_set(&hostdata->request_limit, -1);
  473. dev_err(hostdata->dev, "error after reset\n");
  474. }
  475. scsi_unblock_requests(hostdata->host);
  476. }
  477. /**
  478. * ibmvscsi_timeout - Internal command timeout handler
  479. * @evt_struct: struct srp_event_struct that timed out
  480. *
  481. * Called when an internally generated command times out
  482. */
  483. static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
  484. {
  485. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  486. dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
  487. evt_struct->iu.srp.cmd.opcode);
  488. ibmvscsi_reset_host(hostdata);
  489. }
  490. /* ------------------------------------------------------------
  491. * Routines for sending and receiving SRPs
  492. */
  493. /**
  494. * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
  495. * @evt_struct: evt_struct to be sent
  496. * @hostdata: ibmvscsi_host_data of host
  497. * @timeout: timeout in seconds - 0 means do not time command
  498. *
  499. * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
  500. * Note that this routine assumes that host_lock is held for synchronization
  501. */
  502. static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
  503. struct ibmvscsi_host_data *hostdata,
  504. unsigned long timeout)
  505. {
  506. u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
  507. int request_status = 0;
  508. int rc;
  509. /* If we have exhausted our request limit, just fail this request,
  510. * unless it is for a reset or abort.
  511. * Note that there are rare cases involving driver generated requests
  512. * (such as task management requests) that the mid layer may think we
  513. * can handle more requests (can_queue) when we actually can't
  514. */
  515. if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
  516. request_status =
  517. atomic_dec_if_positive(&hostdata->request_limit);
  518. /* If request limit was -1 when we started, it is now even
  519. * less than that
  520. */
  521. if (request_status < -1)
  522. goto send_error;
  523. /* Otherwise, we may have run out of requests. */
  524. /* If request limit was 0 when we started the adapter is in the
  525. * process of performing a login with the server adapter, or
  526. * we may have run out of requests.
  527. */
  528. else if (request_status == -1 &&
  529. evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
  530. goto send_busy;
  531. /* Abort and reset calls should make it through.
  532. * Nothing except abort and reset should use the last two
  533. * slots unless we had two or less to begin with.
  534. */
  535. else if (request_status < 2 &&
  536. evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
  537. /* In the case that we have less than two requests
  538. * available, check the server limit as a combination
  539. * of the request limit and the number of requests
  540. * in-flight (the size of the send list). If the
  541. * server limit is greater than 2, return busy so
  542. * that the last two are reserved for reset and abort.
  543. */
  544. int server_limit = request_status;
  545. struct srp_event_struct *tmp_evt;
  546. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  547. server_limit++;
  548. }
  549. if (server_limit > 2)
  550. goto send_busy;
  551. }
  552. }
  553. /* Copy the IU into the transfer area */
  554. *evt_struct->xfer_iu = evt_struct->iu;
  555. evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
  556. /* Add this to the sent list. We need to do this
  557. * before we actually send
  558. * in case it comes back REALLY fast
  559. */
  560. list_add_tail(&evt_struct->list, &hostdata->sent);
  561. init_timer(&evt_struct->timer);
  562. if (timeout) {
  563. evt_struct->timer.data = (unsigned long) evt_struct;
  564. evt_struct->timer.expires = jiffies + (timeout * HZ);
  565. evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
  566. add_timer(&evt_struct->timer);
  567. }
  568. if ((rc =
  569. ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
  570. list_del(&evt_struct->list);
  571. del_timer(&evt_struct->timer);
  572. /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
  573. * Firmware will send a CRQ with a transport event (0xFF) to
  574. * tell this client what has happened to the transport. This
  575. * will be handled in ibmvscsi_handle_crq()
  576. */
  577. if (rc == H_CLOSED) {
  578. dev_warn(hostdata->dev, "send warning. "
  579. "Receive queue closed, will retry.\n");
  580. goto send_busy;
  581. }
  582. dev_err(hostdata->dev, "send error %d\n", rc);
  583. atomic_inc(&hostdata->request_limit);
  584. goto send_error;
  585. }
  586. return 0;
  587. send_busy:
  588. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  589. free_event_struct(&hostdata->pool, evt_struct);
  590. if (request_status != -1)
  591. atomic_inc(&hostdata->request_limit);
  592. return SCSI_MLQUEUE_HOST_BUSY;
  593. send_error:
  594. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  595. if (evt_struct->cmnd != NULL) {
  596. evt_struct->cmnd->result = DID_ERROR << 16;
  597. evt_struct->cmnd_done(evt_struct->cmnd);
  598. } else if (evt_struct->done)
  599. evt_struct->done(evt_struct);
  600. free_event_struct(&hostdata->pool, evt_struct);
  601. return 0;
  602. }
  603. /**
  604. * handle_cmd_rsp: - Handle responses from commands
  605. * @evt_struct: srp_event_struct to be handled
  606. *
  607. * Used as a callback by when sending scsi cmds.
  608. * Gets called by ibmvscsi_handle_crq()
  609. */
  610. static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
  611. {
  612. struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
  613. struct scsi_cmnd *cmnd = evt_struct->cmnd;
  614. if (unlikely(rsp->opcode != SRP_RSP)) {
  615. if (printk_ratelimit())
  616. dev_warn(evt_struct->hostdata->dev,
  617. "bad SRP RSP type %d\n", rsp->opcode);
  618. }
  619. if (cmnd) {
  620. cmnd->result |= rsp->status;
  621. if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
  622. memcpy(cmnd->sense_buffer,
  623. rsp->data,
  624. rsp->sense_data_len);
  625. unmap_cmd_data(&evt_struct->iu.srp.cmd,
  626. evt_struct,
  627. evt_struct->hostdata->dev);
  628. if (rsp->flags & SRP_RSP_FLAG_DOOVER)
  629. scsi_set_resid(cmnd, rsp->data_out_res_cnt);
  630. else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
  631. scsi_set_resid(cmnd, rsp->data_in_res_cnt);
  632. }
  633. if (evt_struct->cmnd_done)
  634. evt_struct->cmnd_done(cmnd);
  635. }
  636. /**
  637. * lun_from_dev: - Returns the lun of the scsi device
  638. * @dev: struct scsi_device
  639. *
  640. */
  641. static inline u16 lun_from_dev(struct scsi_device *dev)
  642. {
  643. return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
  644. }
  645. /**
  646. * ibmvscsi_queue: - The queuecommand function of the scsi template
  647. * @cmd: struct scsi_cmnd to be executed
  648. * @done: Callback function to be called when cmd is completed
  649. */
  650. static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
  651. void (*done) (struct scsi_cmnd *))
  652. {
  653. struct srp_cmd *srp_cmd;
  654. struct srp_event_struct *evt_struct;
  655. struct srp_indirect_buf *indirect;
  656. struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
  657. u16 lun = lun_from_dev(cmnd->device);
  658. u8 out_fmt, in_fmt;
  659. cmnd->result = (DID_OK << 16);
  660. evt_struct = get_event_struct(&hostdata->pool);
  661. if (!evt_struct)
  662. return SCSI_MLQUEUE_HOST_BUSY;
  663. /* Set up the actual SRP IU */
  664. srp_cmd = &evt_struct->iu.srp.cmd;
  665. memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
  666. srp_cmd->opcode = SRP_CMD;
  667. memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
  668. srp_cmd->lun = ((u64) lun) << 48;
  669. if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
  670. if (!firmware_has_feature(FW_FEATURE_CMO))
  671. sdev_printk(KERN_ERR, cmnd->device,
  672. "couldn't convert cmd to srp_cmd\n");
  673. free_event_struct(&hostdata->pool, evt_struct);
  674. return SCSI_MLQUEUE_HOST_BUSY;
  675. }
  676. init_event_struct(evt_struct,
  677. handle_cmd_rsp,
  678. VIOSRP_SRP_FORMAT,
  679. cmnd->request->timeout/HZ);
  680. evt_struct->cmnd = cmnd;
  681. evt_struct->cmnd_done = done;
  682. /* Fix up dma address of the buffer itself */
  683. indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
  684. out_fmt = srp_cmd->buf_fmt >> 4;
  685. in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
  686. if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
  687. out_fmt == SRP_DATA_DESC_INDIRECT) &&
  688. indirect->table_desc.va == 0) {
  689. indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
  690. offsetof(struct srp_cmd, add_data) +
  691. offsetof(struct srp_indirect_buf, desc_list);
  692. }
  693. return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
  694. }
  695. /* ------------------------------------------------------------
  696. * Routines for driver initialization
  697. */
  698. /**
  699. * adapter_info_rsp: - Handle response to MAD adapter info request
  700. * @evt_struct: srp_event_struct with the response
  701. *
  702. * Used as a "done" callback by when sending adapter_info. Gets called
  703. * by ibmvscsi_handle_crq()
  704. */
  705. static void adapter_info_rsp(struct srp_event_struct *evt_struct)
  706. {
  707. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  708. dma_unmap_single(hostdata->dev,
  709. evt_struct->iu.mad.adapter_info.buffer,
  710. evt_struct->iu.mad.adapter_info.common.length,
  711. DMA_BIDIRECTIONAL);
  712. if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
  713. dev_err(hostdata->dev, "error %d getting adapter info\n",
  714. evt_struct->xfer_iu->mad.adapter_info.common.status);
  715. } else {
  716. dev_info(hostdata->dev, "host srp version: %s, "
  717. "host partition %s (%d), OS %d, max io %u\n",
  718. hostdata->madapter_info.srp_version,
  719. hostdata->madapter_info.partition_name,
  720. hostdata->madapter_info.partition_number,
  721. hostdata->madapter_info.os_type,
  722. hostdata->madapter_info.port_max_txu[0]);
  723. if (hostdata->madapter_info.port_max_txu[0])
  724. hostdata->host->max_sectors =
  725. hostdata->madapter_info.port_max_txu[0] >> 9;
  726. if (hostdata->madapter_info.os_type == 3 &&
  727. strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
  728. dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
  729. hostdata->madapter_info.srp_version);
  730. dev_err(hostdata->dev, "limiting scatterlists to %d\n",
  731. MAX_INDIRECT_BUFS);
  732. hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
  733. }
  734. }
  735. }
  736. /**
  737. * send_mad_adapter_info: - Sends the mad adapter info request
  738. * and stores the result so it can be retrieved with
  739. * sysfs. We COULD consider causing a failure if the
  740. * returned SRP version doesn't match ours.
  741. * @hostdata: ibmvscsi_host_data of host
  742. *
  743. * Returns zero if successful.
  744. */
  745. static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
  746. {
  747. struct viosrp_adapter_info *req;
  748. struct srp_event_struct *evt_struct;
  749. unsigned long flags;
  750. dma_addr_t addr;
  751. evt_struct = get_event_struct(&hostdata->pool);
  752. if (!evt_struct) {
  753. dev_err(hostdata->dev,
  754. "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
  755. return;
  756. }
  757. init_event_struct(evt_struct,
  758. adapter_info_rsp,
  759. VIOSRP_MAD_FORMAT,
  760. init_timeout);
  761. req = &evt_struct->iu.mad.adapter_info;
  762. memset(req, 0x00, sizeof(*req));
  763. req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
  764. req->common.length = sizeof(hostdata->madapter_info);
  765. req->buffer = addr = dma_map_single(hostdata->dev,
  766. &hostdata->madapter_info,
  767. sizeof(hostdata->madapter_info),
  768. DMA_BIDIRECTIONAL);
  769. if (dma_mapping_error(hostdata->dev, req->buffer)) {
  770. if (!firmware_has_feature(FW_FEATURE_CMO))
  771. dev_err(hostdata->dev,
  772. "Unable to map request_buffer for "
  773. "adapter_info!\n");
  774. free_event_struct(&hostdata->pool, evt_struct);
  775. return;
  776. }
  777. spin_lock_irqsave(hostdata->host->host_lock, flags);
  778. if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
  779. dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
  780. dma_unmap_single(hostdata->dev,
  781. addr,
  782. sizeof(hostdata->madapter_info),
  783. DMA_BIDIRECTIONAL);
  784. }
  785. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  786. };
  787. /**
  788. * login_rsp: - Handle response to SRP login request
  789. * @evt_struct: srp_event_struct with the response
  790. *
  791. * Used as a "done" callback by when sending srp_login. Gets called
  792. * by ibmvscsi_handle_crq()
  793. */
  794. static void login_rsp(struct srp_event_struct *evt_struct)
  795. {
  796. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  797. switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
  798. case SRP_LOGIN_RSP: /* it worked! */
  799. break;
  800. case SRP_LOGIN_REJ: /* refused! */
  801. dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
  802. evt_struct->xfer_iu->srp.login_rej.reason);
  803. /* Login failed. */
  804. atomic_set(&hostdata->request_limit, -1);
  805. return;
  806. default:
  807. dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
  808. evt_struct->xfer_iu->srp.login_rsp.opcode);
  809. /* Login failed. */
  810. atomic_set(&hostdata->request_limit, -1);
  811. return;
  812. }
  813. dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
  814. if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
  815. dev_err(hostdata->dev, "Invalid request_limit.\n");
  816. /* Now we know what the real request-limit is.
  817. * This value is set rather than added to request_limit because
  818. * request_limit could have been set to -1 by this client.
  819. */
  820. atomic_set(&hostdata->request_limit,
  821. evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
  822. /* If we had any pending I/Os, kick them */
  823. scsi_unblock_requests(hostdata->host);
  824. send_mad_adapter_info(hostdata);
  825. return;
  826. }
  827. /**
  828. * send_srp_login: - Sends the srp login
  829. * @hostdata: ibmvscsi_host_data of host
  830. *
  831. * Returns zero if successful.
  832. */
  833. static int send_srp_login(struct ibmvscsi_host_data *hostdata)
  834. {
  835. int rc;
  836. unsigned long flags;
  837. struct srp_login_req *login;
  838. struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
  839. if (!evt_struct) {
  840. dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
  841. return FAILED;
  842. }
  843. init_event_struct(evt_struct,
  844. login_rsp,
  845. VIOSRP_SRP_FORMAT,
  846. init_timeout);
  847. login = &evt_struct->iu.srp.login_req;
  848. memset(login, 0x00, sizeof(struct srp_login_req));
  849. login->opcode = SRP_LOGIN_REQ;
  850. login->req_it_iu_len = sizeof(union srp_iu);
  851. login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  852. spin_lock_irqsave(hostdata->host->host_lock, flags);
  853. /* Start out with a request limit of 0, since this is negotiated in
  854. * the login request we are just sending and login requests always
  855. * get sent by the driver regardless of request_limit.
  856. */
  857. atomic_set(&hostdata->request_limit, 0);
  858. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  859. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  860. dev_info(hostdata->dev, "sent SRP login\n");
  861. return rc;
  862. };
  863. /**
  864. * sync_completion: Signal that a synchronous command has completed
  865. * Note that after returning from this call, the evt_struct is freed.
  866. * the caller waiting on this completion shouldn't touch the evt_struct
  867. * again.
  868. */
  869. static void sync_completion(struct srp_event_struct *evt_struct)
  870. {
  871. /* copy the response back */
  872. if (evt_struct->sync_srp)
  873. *evt_struct->sync_srp = *evt_struct->xfer_iu;
  874. complete(&evt_struct->comp);
  875. }
  876. /**
  877. * ibmvscsi_abort: Abort a command...from scsi host template
  878. * send this over to the server and wait synchronously for the response
  879. */
  880. static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  881. {
  882. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  883. struct srp_tsk_mgmt *tsk_mgmt;
  884. struct srp_event_struct *evt;
  885. struct srp_event_struct *tmp_evt, *found_evt;
  886. union viosrp_iu srp_rsp;
  887. int rsp_rc;
  888. unsigned long flags;
  889. u16 lun = lun_from_dev(cmd->device);
  890. unsigned long wait_switch = 0;
  891. /* First, find this command in our sent list so we can figure
  892. * out the correct tag
  893. */
  894. spin_lock_irqsave(hostdata->host->host_lock, flags);
  895. wait_switch = jiffies + (init_timeout * HZ);
  896. do {
  897. found_evt = NULL;
  898. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  899. if (tmp_evt->cmnd == cmd) {
  900. found_evt = tmp_evt;
  901. break;
  902. }
  903. }
  904. if (!found_evt) {
  905. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  906. return SUCCESS;
  907. }
  908. evt = get_event_struct(&hostdata->pool);
  909. if (evt == NULL) {
  910. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  911. sdev_printk(KERN_ERR, cmd->device,
  912. "failed to allocate abort event\n");
  913. return FAILED;
  914. }
  915. init_event_struct(evt,
  916. sync_completion,
  917. VIOSRP_SRP_FORMAT,
  918. init_timeout);
  919. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  920. /* Set up an abort SRP command */
  921. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  922. tsk_mgmt->opcode = SRP_TSK_MGMT;
  923. tsk_mgmt->lun = ((u64) lun) << 48;
  924. tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
  925. tsk_mgmt->task_tag = (u64) found_evt;
  926. evt->sync_srp = &srp_rsp;
  927. init_completion(&evt->comp);
  928. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  929. if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
  930. break;
  931. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  932. msleep(10);
  933. spin_lock_irqsave(hostdata->host->host_lock, flags);
  934. } while (time_before(jiffies, wait_switch));
  935. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  936. if (rsp_rc != 0) {
  937. sdev_printk(KERN_ERR, cmd->device,
  938. "failed to send abort() event. rc=%d\n", rsp_rc);
  939. return FAILED;
  940. }
  941. sdev_printk(KERN_INFO, cmd->device,
  942. "aborting command. lun 0x%llx, tag 0x%llx\n",
  943. (((u64) lun) << 48), (u64) found_evt);
  944. wait_for_completion(&evt->comp);
  945. /* make sure we got a good response */
  946. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  947. if (printk_ratelimit())
  948. sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
  949. srp_rsp.srp.rsp.opcode);
  950. return FAILED;
  951. }
  952. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  953. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  954. else
  955. rsp_rc = srp_rsp.srp.rsp.status;
  956. if (rsp_rc) {
  957. if (printk_ratelimit())
  958. sdev_printk(KERN_WARNING, cmd->device,
  959. "abort code %d for task tag 0x%llx\n",
  960. rsp_rc, tsk_mgmt->task_tag);
  961. return FAILED;
  962. }
  963. /* Because we dropped the spinlock above, it's possible
  964. * The event is no longer in our list. Make sure it didn't
  965. * complete while we were aborting
  966. */
  967. spin_lock_irqsave(hostdata->host->host_lock, flags);
  968. found_evt = NULL;
  969. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  970. if (tmp_evt->cmnd == cmd) {
  971. found_evt = tmp_evt;
  972. break;
  973. }
  974. }
  975. if (found_evt == NULL) {
  976. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  977. sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
  978. tsk_mgmt->task_tag);
  979. return SUCCESS;
  980. }
  981. sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
  982. tsk_mgmt->task_tag);
  983. cmd->result = (DID_ABORT << 16);
  984. list_del(&found_evt->list);
  985. unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
  986. found_evt->hostdata->dev);
  987. free_event_struct(&found_evt->hostdata->pool, found_evt);
  988. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  989. atomic_inc(&hostdata->request_limit);
  990. return SUCCESS;
  991. }
  992. /**
  993. * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
  994. * template send this over to the server and wait synchronously for the
  995. * response
  996. */
  997. static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
  998. {
  999. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  1000. struct srp_tsk_mgmt *tsk_mgmt;
  1001. struct srp_event_struct *evt;
  1002. struct srp_event_struct *tmp_evt, *pos;
  1003. union viosrp_iu srp_rsp;
  1004. int rsp_rc;
  1005. unsigned long flags;
  1006. u16 lun = lun_from_dev(cmd->device);
  1007. unsigned long wait_switch = 0;
  1008. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1009. wait_switch = jiffies + (init_timeout * HZ);
  1010. do {
  1011. evt = get_event_struct(&hostdata->pool);
  1012. if (evt == NULL) {
  1013. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1014. sdev_printk(KERN_ERR, cmd->device,
  1015. "failed to allocate reset event\n");
  1016. return FAILED;
  1017. }
  1018. init_event_struct(evt,
  1019. sync_completion,
  1020. VIOSRP_SRP_FORMAT,
  1021. init_timeout);
  1022. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  1023. /* Set up a lun reset SRP command */
  1024. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  1025. tsk_mgmt->opcode = SRP_TSK_MGMT;
  1026. tsk_mgmt->lun = ((u64) lun) << 48;
  1027. tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
  1028. evt->sync_srp = &srp_rsp;
  1029. init_completion(&evt->comp);
  1030. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
  1031. if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
  1032. break;
  1033. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1034. msleep(10);
  1035. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1036. } while (time_before(jiffies, wait_switch));
  1037. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1038. if (rsp_rc != 0) {
  1039. sdev_printk(KERN_ERR, cmd->device,
  1040. "failed to send reset event. rc=%d\n", rsp_rc);
  1041. return FAILED;
  1042. }
  1043. sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
  1044. (((u64) lun) << 48));
  1045. wait_for_completion(&evt->comp);
  1046. /* make sure we got a good response */
  1047. if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
  1048. if (printk_ratelimit())
  1049. sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
  1050. srp_rsp.srp.rsp.opcode);
  1051. return FAILED;
  1052. }
  1053. if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
  1054. rsp_rc = *((int *)srp_rsp.srp.rsp.data);
  1055. else
  1056. rsp_rc = srp_rsp.srp.rsp.status;
  1057. if (rsp_rc) {
  1058. if (printk_ratelimit())
  1059. sdev_printk(KERN_WARNING, cmd->device,
  1060. "reset code %d for task tag 0x%llx\n",
  1061. rsp_rc, tsk_mgmt->task_tag);
  1062. return FAILED;
  1063. }
  1064. /* We need to find all commands for this LUN that have not yet been
  1065. * responded to, and fail them with DID_RESET
  1066. */
  1067. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1068. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  1069. if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
  1070. if (tmp_evt->cmnd)
  1071. tmp_evt->cmnd->result = (DID_RESET << 16);
  1072. list_del(&tmp_evt->list);
  1073. unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
  1074. tmp_evt->hostdata->dev);
  1075. free_event_struct(&tmp_evt->hostdata->pool,
  1076. tmp_evt);
  1077. atomic_inc(&hostdata->request_limit);
  1078. if (tmp_evt->cmnd_done)
  1079. tmp_evt->cmnd_done(tmp_evt->cmnd);
  1080. else if (tmp_evt->done)
  1081. tmp_evt->done(tmp_evt);
  1082. }
  1083. }
  1084. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1085. return SUCCESS;
  1086. }
  1087. /**
  1088. * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
  1089. * @cmd: struct scsi_cmnd having problems
  1090. */
  1091. static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
  1092. {
  1093. unsigned long wait_switch = 0;
  1094. struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
  1095. dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
  1096. ibmvscsi_reset_host(hostdata);
  1097. for (wait_switch = jiffies + (init_timeout * HZ);
  1098. time_before(jiffies, wait_switch) &&
  1099. atomic_read(&hostdata->request_limit) < 2;) {
  1100. msleep(10);
  1101. }
  1102. if (atomic_read(&hostdata->request_limit) <= 0)
  1103. return FAILED;
  1104. return SUCCESS;
  1105. }
  1106. /**
  1107. * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
  1108. * @crq: Command/Response queue
  1109. * @hostdata: ibmvscsi_host_data of host
  1110. *
  1111. */
  1112. void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  1113. struct ibmvscsi_host_data *hostdata)
  1114. {
  1115. long rc;
  1116. unsigned long flags;
  1117. struct srp_event_struct *evt_struct =
  1118. (struct srp_event_struct *)crq->IU_data_ptr;
  1119. switch (crq->valid) {
  1120. case 0xC0: /* initialization */
  1121. switch (crq->format) {
  1122. case 0x01: /* Initialization message */
  1123. dev_info(hostdata->dev, "partner initialized\n");
  1124. /* Send back a response */
  1125. if ((rc = ibmvscsi_ops->send_crq(hostdata,
  1126. 0xC002000000000000LL, 0)) == 0) {
  1127. /* Now login */
  1128. send_srp_login(hostdata);
  1129. } else {
  1130. dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
  1131. }
  1132. break;
  1133. case 0x02: /* Initialization response */
  1134. dev_info(hostdata->dev, "partner initialization complete\n");
  1135. /* Now login */
  1136. send_srp_login(hostdata);
  1137. break;
  1138. default:
  1139. dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
  1140. }
  1141. return;
  1142. case 0xFF: /* Hypervisor telling us the connection is closed */
  1143. scsi_block_requests(hostdata->host);
  1144. atomic_set(&hostdata->request_limit, 0);
  1145. if (crq->format == 0x06) {
  1146. /* We need to re-setup the interpartition connection */
  1147. dev_info(hostdata->dev, "Re-enabling adapter!\n");
  1148. purge_requests(hostdata, DID_REQUEUE);
  1149. if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
  1150. hostdata)) ||
  1151. (ibmvscsi_ops->send_crq(hostdata,
  1152. 0xC001000000000000LL, 0))) {
  1153. atomic_set(&hostdata->request_limit,
  1154. -1);
  1155. dev_err(hostdata->dev, "error after enable\n");
  1156. }
  1157. } else {
  1158. dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
  1159. crq->format);
  1160. purge_requests(hostdata, DID_ERROR);
  1161. if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue,
  1162. hostdata)) ||
  1163. (ibmvscsi_ops->send_crq(hostdata,
  1164. 0xC001000000000000LL, 0))) {
  1165. atomic_set(&hostdata->request_limit,
  1166. -1);
  1167. dev_err(hostdata->dev, "error after reset\n");
  1168. }
  1169. }
  1170. scsi_unblock_requests(hostdata->host);
  1171. return;
  1172. case 0x80: /* real payload */
  1173. break;
  1174. default:
  1175. dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
  1176. crq->valid);
  1177. return;
  1178. }
  1179. /* The only kind of payload CRQs we should get are responses to
  1180. * things we send. Make sure this response is to something we
  1181. * actually sent
  1182. */
  1183. if (!valid_event_struct(&hostdata->pool, evt_struct)) {
  1184. dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
  1185. (void *)crq->IU_data_ptr);
  1186. return;
  1187. }
  1188. if (atomic_read(&evt_struct->free)) {
  1189. dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
  1190. (void *)crq->IU_data_ptr);
  1191. return;
  1192. }
  1193. if (crq->format == VIOSRP_SRP_FORMAT)
  1194. atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
  1195. &hostdata->request_limit);
  1196. del_timer(&evt_struct->timer);
  1197. if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
  1198. evt_struct->cmnd->result = DID_ERROR << 16;
  1199. if (evt_struct->done)
  1200. evt_struct->done(evt_struct);
  1201. else
  1202. dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
  1203. /*
  1204. * Lock the host_lock before messing with these structures, since we
  1205. * are running in a task context
  1206. */
  1207. spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
  1208. list_del(&evt_struct->list);
  1209. free_event_struct(&evt_struct->hostdata->pool, evt_struct);
  1210. spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
  1211. }
  1212. /**
  1213. * ibmvscsi_get_host_config: Send the command to the server to get host
  1214. * configuration data. The data is opaque to us.
  1215. */
  1216. static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
  1217. unsigned char *buffer, int length)
  1218. {
  1219. struct viosrp_host_config *host_config;
  1220. struct srp_event_struct *evt_struct;
  1221. unsigned long flags;
  1222. dma_addr_t addr;
  1223. int rc;
  1224. evt_struct = get_event_struct(&hostdata->pool);
  1225. if (!evt_struct) {
  1226. dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
  1227. return -1;
  1228. }
  1229. init_event_struct(evt_struct,
  1230. sync_completion,
  1231. VIOSRP_MAD_FORMAT,
  1232. init_timeout);
  1233. host_config = &evt_struct->iu.mad.host_config;
  1234. /* Set up a lun reset SRP command */
  1235. memset(host_config, 0x00, sizeof(*host_config));
  1236. host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
  1237. host_config->common.length = length;
  1238. host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
  1239. length,
  1240. DMA_BIDIRECTIONAL);
  1241. if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
  1242. if (!firmware_has_feature(FW_FEATURE_CMO))
  1243. dev_err(hostdata->dev,
  1244. "dma_mapping error getting host config\n");
  1245. free_event_struct(&hostdata->pool, evt_struct);
  1246. return -1;
  1247. }
  1248. init_completion(&evt_struct->comp);
  1249. spin_lock_irqsave(hostdata->host->host_lock, flags);
  1250. rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
  1251. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1252. if (rc == 0)
  1253. wait_for_completion(&evt_struct->comp);
  1254. dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
  1255. return rc;
  1256. }
  1257. /**
  1258. * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
  1259. * @sdev: struct scsi_device device to configure
  1260. *
  1261. * Enable allow_restart for a device if it is a disk. Adjust the
  1262. * queue_depth here also as is required by the documentation for
  1263. * struct scsi_host_template.
  1264. */
  1265. static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  1266. {
  1267. struct Scsi_Host *shost = sdev->host;
  1268. unsigned long lock_flags = 0;
  1269. spin_lock_irqsave(shost->host_lock, lock_flags);
  1270. if (sdev->type == TYPE_DISK) {
  1271. sdev->allow_restart = 1;
  1272. blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
  1273. }
  1274. scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
  1275. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  1276. return 0;
  1277. }
  1278. /**
  1279. * ibmvscsi_change_queue_depth - Change the device's queue depth
  1280. * @sdev: scsi device struct
  1281. * @qdepth: depth to set
  1282. *
  1283. * Return value:
  1284. * actual depth set
  1285. **/
  1286. static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1287. {
  1288. if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
  1289. qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
  1290. scsi_adjust_queue_depth(sdev, 0, qdepth);
  1291. return sdev->queue_depth;
  1292. }
  1293. /* ------------------------------------------------------------
  1294. * sysfs attributes
  1295. */
  1296. static ssize_t show_host_srp_version(struct device *dev,
  1297. struct device_attribute *attr, char *buf)
  1298. {
  1299. struct Scsi_Host *shost = class_to_shost(dev);
  1300. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1301. int len;
  1302. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1303. hostdata->madapter_info.srp_version);
  1304. return len;
  1305. }
  1306. static struct device_attribute ibmvscsi_host_srp_version = {
  1307. .attr = {
  1308. .name = "srp_version",
  1309. .mode = S_IRUGO,
  1310. },
  1311. .show = show_host_srp_version,
  1312. };
  1313. static ssize_t show_host_partition_name(struct device *dev,
  1314. struct device_attribute *attr,
  1315. char *buf)
  1316. {
  1317. struct Scsi_Host *shost = class_to_shost(dev);
  1318. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1319. int len;
  1320. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1321. hostdata->madapter_info.partition_name);
  1322. return len;
  1323. }
  1324. static struct device_attribute ibmvscsi_host_partition_name = {
  1325. .attr = {
  1326. .name = "partition_name",
  1327. .mode = S_IRUGO,
  1328. },
  1329. .show = show_host_partition_name,
  1330. };
  1331. static ssize_t show_host_partition_number(struct device *dev,
  1332. struct device_attribute *attr,
  1333. char *buf)
  1334. {
  1335. struct Scsi_Host *shost = class_to_shost(dev);
  1336. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1337. int len;
  1338. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1339. hostdata->madapter_info.partition_number);
  1340. return len;
  1341. }
  1342. static struct device_attribute ibmvscsi_host_partition_number = {
  1343. .attr = {
  1344. .name = "partition_number",
  1345. .mode = S_IRUGO,
  1346. },
  1347. .show = show_host_partition_number,
  1348. };
  1349. static ssize_t show_host_mad_version(struct device *dev,
  1350. struct device_attribute *attr, char *buf)
  1351. {
  1352. struct Scsi_Host *shost = class_to_shost(dev);
  1353. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1354. int len;
  1355. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1356. hostdata->madapter_info.mad_version);
  1357. return len;
  1358. }
  1359. static struct device_attribute ibmvscsi_host_mad_version = {
  1360. .attr = {
  1361. .name = "mad_version",
  1362. .mode = S_IRUGO,
  1363. },
  1364. .show = show_host_mad_version,
  1365. };
  1366. static ssize_t show_host_os_type(struct device *dev,
  1367. struct device_attribute *attr, char *buf)
  1368. {
  1369. struct Scsi_Host *shost = class_to_shost(dev);
  1370. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1371. int len;
  1372. len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
  1373. return len;
  1374. }
  1375. static struct device_attribute ibmvscsi_host_os_type = {
  1376. .attr = {
  1377. .name = "os_type",
  1378. .mode = S_IRUGO,
  1379. },
  1380. .show = show_host_os_type,
  1381. };
  1382. static ssize_t show_host_config(struct device *dev,
  1383. struct device_attribute *attr, char *buf)
  1384. {
  1385. struct Scsi_Host *shost = class_to_shost(dev);
  1386. struct ibmvscsi_host_data *hostdata = shost_priv(shost);
  1387. /* returns null-terminated host config data */
  1388. if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
  1389. return strlen(buf);
  1390. else
  1391. return 0;
  1392. }
  1393. static struct device_attribute ibmvscsi_host_config = {
  1394. .attr = {
  1395. .name = "config",
  1396. .mode = S_IRUGO,
  1397. },
  1398. .show = show_host_config,
  1399. };
  1400. static struct device_attribute *ibmvscsi_attrs[] = {
  1401. &ibmvscsi_host_srp_version,
  1402. &ibmvscsi_host_partition_name,
  1403. &ibmvscsi_host_partition_number,
  1404. &ibmvscsi_host_mad_version,
  1405. &ibmvscsi_host_os_type,
  1406. &ibmvscsi_host_config,
  1407. NULL
  1408. };
  1409. /* ------------------------------------------------------------
  1410. * SCSI driver registration
  1411. */
  1412. static struct scsi_host_template driver_template = {
  1413. .module = THIS_MODULE,
  1414. .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
  1415. .proc_name = "ibmvscsi",
  1416. .queuecommand = ibmvscsi_queuecommand,
  1417. .eh_abort_handler = ibmvscsi_eh_abort_handler,
  1418. .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
  1419. .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
  1420. .slave_configure = ibmvscsi_slave_configure,
  1421. .change_queue_depth = ibmvscsi_change_queue_depth,
  1422. .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
  1423. .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
  1424. .this_id = -1,
  1425. .sg_tablesize = SG_ALL,
  1426. .use_clustering = ENABLE_CLUSTERING,
  1427. .shost_attrs = ibmvscsi_attrs,
  1428. };
  1429. /**
  1430. * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
  1431. *
  1432. * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
  1433. *
  1434. * Return value:
  1435. * Number of bytes of IO data the driver will need to perform well.
  1436. */
  1437. static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
  1438. {
  1439. /* iu_storage data allocated in initialize_event_pool */
  1440. unsigned long desired_io = max_events * sizeof(union viosrp_iu);
  1441. /* add io space for sg data */
  1442. desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
  1443. IBMVSCSI_CMDS_PER_LUN_DEFAULT);
  1444. return desired_io;
  1445. }
  1446. /**
  1447. * Called by bus code for each adapter
  1448. */
  1449. static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1450. {
  1451. struct ibmvscsi_host_data *hostdata;
  1452. struct Scsi_Host *host;
  1453. struct device *dev = &vdev->dev;
  1454. struct srp_rport_identifiers ids;
  1455. struct srp_rport *rport;
  1456. unsigned long wait_switch = 0;
  1457. int rc;
  1458. vdev->dev.driver_data = NULL;
  1459. host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
  1460. if (!host) {
  1461. dev_err(&vdev->dev, "couldn't allocate host data\n");
  1462. goto scsi_host_alloc_failed;
  1463. }
  1464. host->transportt = ibmvscsi_transport_template;
  1465. hostdata = shost_priv(host);
  1466. memset(hostdata, 0x00, sizeof(*hostdata));
  1467. INIT_LIST_HEAD(&hostdata->sent);
  1468. hostdata->host = host;
  1469. hostdata->dev = dev;
  1470. atomic_set(&hostdata->request_limit, -1);
  1471. hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
  1472. rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
  1473. if (rc != 0 && rc != H_RESOURCE) {
  1474. dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
  1475. goto init_crq_failed;
  1476. }
  1477. if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
  1478. dev_err(&vdev->dev, "couldn't initialize event pool\n");
  1479. goto init_pool_failed;
  1480. }
  1481. host->max_lun = 8;
  1482. host->max_id = max_id;
  1483. host->max_channel = max_channel;
  1484. if (scsi_add_host(hostdata->host, hostdata->dev))
  1485. goto add_host_failed;
  1486. /* we don't have a proper target_port_id so let's use the fake one */
  1487. memcpy(ids.port_id, hostdata->madapter_info.partition_name,
  1488. sizeof(ids.port_id));
  1489. ids.roles = SRP_RPORT_ROLE_TARGET;
  1490. rport = srp_rport_add(host, &ids);
  1491. if (IS_ERR(rport))
  1492. goto add_srp_port_failed;
  1493. /* Try to send an initialization message. Note that this is allowed
  1494. * to fail if the other end is not acive. In that case we don't
  1495. * want to scan
  1496. */
  1497. if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
  1498. || rc == H_RESOURCE) {
  1499. /*
  1500. * Wait around max init_timeout secs for the adapter to finish
  1501. * initializing. When we are done initializing, we will have a
  1502. * valid request_limit. We don't want Linux scanning before
  1503. * we are ready.
  1504. */
  1505. for (wait_switch = jiffies + (init_timeout * HZ);
  1506. time_before(jiffies, wait_switch) &&
  1507. atomic_read(&hostdata->request_limit) < 2;) {
  1508. msleep(10);
  1509. }
  1510. /* if we now have a valid request_limit, initiate a scan */
  1511. if (atomic_read(&hostdata->request_limit) > 0)
  1512. scsi_scan_host(host);
  1513. }
  1514. vdev->dev.driver_data = hostdata;
  1515. return 0;
  1516. add_srp_port_failed:
  1517. scsi_remove_host(hostdata->host);
  1518. add_host_failed:
  1519. release_event_pool(&hostdata->pool, hostdata);
  1520. init_pool_failed:
  1521. ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
  1522. init_crq_failed:
  1523. scsi_host_put(host);
  1524. scsi_host_alloc_failed:
  1525. return -1;
  1526. }
  1527. static int ibmvscsi_remove(struct vio_dev *vdev)
  1528. {
  1529. struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
  1530. release_event_pool(&hostdata->pool, hostdata);
  1531. ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
  1532. max_events);
  1533. srp_remove_host(hostdata->host);
  1534. scsi_remove_host(hostdata->host);
  1535. scsi_host_put(hostdata->host);
  1536. return 0;
  1537. }
  1538. /**
  1539. * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
  1540. * support.
  1541. */
  1542. static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
  1543. {"vscsi", "IBM,v-scsi"},
  1544. { "", "" }
  1545. };
  1546. MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
  1547. static struct vio_driver ibmvscsi_driver = {
  1548. .id_table = ibmvscsi_device_table,
  1549. .probe = ibmvscsi_probe,
  1550. .remove = ibmvscsi_remove,
  1551. .get_desired_dma = ibmvscsi_get_desired_dma,
  1552. .driver = {
  1553. .name = "ibmvscsi",
  1554. .owner = THIS_MODULE,
  1555. }
  1556. };
  1557. static struct srp_function_template ibmvscsi_transport_functions = {
  1558. };
  1559. int __init ibmvscsi_module_init(void)
  1560. {
  1561. int ret;
  1562. /* Ensure we have two requests to do error recovery */
  1563. driver_template.can_queue = max_requests;
  1564. max_events = max_requests + 2;
  1565. if (firmware_has_feature(FW_FEATURE_ISERIES))
  1566. ibmvscsi_ops = &iseriesvscsi_ops;
  1567. else if (firmware_has_feature(FW_FEATURE_VIO))
  1568. ibmvscsi_ops = &rpavscsi_ops;
  1569. else
  1570. return -ENODEV;
  1571. ibmvscsi_transport_template =
  1572. srp_attach_transport(&ibmvscsi_transport_functions);
  1573. if (!ibmvscsi_transport_template)
  1574. return -ENOMEM;
  1575. ret = vio_register_driver(&ibmvscsi_driver);
  1576. if (ret)
  1577. srp_release_transport(ibmvscsi_transport_template);
  1578. return ret;
  1579. }
  1580. void __exit ibmvscsi_module_exit(void)
  1581. {
  1582. vio_unregister_driver(&ibmvscsi_driver);
  1583. srp_release_transport(ibmvscsi_transport_template);
  1584. }
  1585. module_init(ibmvscsi_module_init);
  1586. module_exit(ibmvscsi_module_exit);