ibmvscsi.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574
  1. /* ------------------------------------------------------------
  2. * ibmvscsi.c
  3. * (C) Copyright IBM Corporation 1994, 2004
  4. * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
  5. * Santiago Leon (santil@us.ibm.com)
  6. * Dave Boutcher (sleddog@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21. * USA
  22. *
  23. * ------------------------------------------------------------
  24. * Emulation of a SCSI host adapter for Virtual I/O devices
  25. *
  26. * This driver supports the SCSI adapter implemented by the IBM
  27. * Power5 firmware. That SCSI adapter is not a physical adapter,
  28. * but allows Linux SCSI peripheral drivers to directly
  29. * access devices in another logical partition on the physical system.
  30. *
  31. * The virtual adapter(s) are present in the open firmware device
  32. * tree just like real adapters.
  33. *
  34. * One of the capabilities provided on these systems is the ability
  35. * to DMA between partitions. The architecture states that for VSCSI,
  36. * the server side is allowed to DMA to and from the client. The client
  37. * is never trusted to DMA to or from the server directly.
  38. *
  39. * Messages are sent between partitions on a "Command/Response Queue"
  40. * (CRQ), which is just a buffer of 16 byte entries in the receiver's
  41. * Senders cannot access the buffer directly, but send messages by
  42. * making a hypervisor call and passing in the 16 bytes. The hypervisor
  43. * puts the message in the next 16 byte space in round-robbin fashion,
  44. * turns on the high order bit of the message (the valid bit), and
  45. * generates an interrupt to the receiver (if interrupts are turned on.)
  46. * The receiver just turns off the valid bit when they have copied out
  47. * the message.
  48. *
  49. * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50. * (IU) (as defined in the T10 standard available at www.t10.org), gets
  51. * a DMA address for the message, and sends it to the server as the
  52. * payload of a CRQ message. The server DMAs the SRP IU and processes it,
  53. * including doing any additional data transfers. When it is done, it
  54. * DMAs the SRP response back to the same address as the request came from,
  55. * and sends a CRQ message back to inform the client that the request has
  56. * completed.
  57. *
  58. * Note that some of the underlying infrastructure is different between
  59. * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
  60. * the older iSeries hypervisor models. To support both, some low level
  61. * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
  62. * The Makefile should pick one, not two, not zero, of these.
  63. *
  64. * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
  65. * interfaces. It would be really nice to abstract this above an RDMA
  66. * layer.
  67. */
  68. #include <linux/module.h>
  69. #include <linux/moduleparam.h>
  70. #include <linux/dma-mapping.h>
  71. #include <linux/delay.h>
  72. #include <asm/vio.h>
  73. #include <scsi/scsi.h>
  74. #include <scsi/scsi_cmnd.h>
  75. #include <scsi/scsi_host.h>
  76. #include <scsi/scsi_device.h>
  77. #include "ibmvscsi.h"
  78. /* The values below are somewhat arbitrary default values, but
  79. * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  80. * Note that there are 3 bits of channel value, 6 bits of id, and
  81. * 5 bits of LUN.
  82. */
  83. static int max_id = 64;
  84. static int max_channel = 3;
  85. static int init_timeout = 5;
  86. static int max_requests = 50;
  87. #define IBMVSCSI_VERSION "1.5.8"
  88. MODULE_DESCRIPTION("IBM Virtual SCSI");
  89. MODULE_AUTHOR("Dave Boutcher");
  90. MODULE_LICENSE("GPL");
  91. MODULE_VERSION(IBMVSCSI_VERSION);
  92. module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
  93. MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
  94. module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
  95. MODULE_PARM_DESC(max_channel, "Largest channel value");
  96. module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
  97. MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
  98. module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
  99. MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
  100. /* ------------------------------------------------------------
  101. * Routines for the event pool and event structs
  102. */
  103. /**
  104. * initialize_event_pool: - Allocates and initializes the event pool for a host
  105. * @pool: event_pool to be initialized
  106. * @size: Number of events in pool
  107. * @hostdata: ibmvscsi_host_data who owns the event pool
  108. *
  109. * Returns zero on success.
  110. */
  111. static int initialize_event_pool(struct event_pool *pool,
  112. int size, struct ibmvscsi_host_data *hostdata)
  113. {
  114. int i;
  115. pool->size = size;
  116. pool->next = 0;
  117. pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL);
  118. if (!pool->events)
  119. return -ENOMEM;
  120. memset(pool->events, 0x00, pool->size * sizeof(*pool->events));
  121. pool->iu_storage =
  122. dma_alloc_coherent(hostdata->dev,
  123. pool->size * sizeof(*pool->iu_storage),
  124. &pool->iu_token, 0);
  125. if (!pool->iu_storage) {
  126. kfree(pool->events);
  127. return -ENOMEM;
  128. }
  129. for (i = 0; i < pool->size; ++i) {
  130. struct srp_event_struct *evt = &pool->events[i];
  131. memset(&evt->crq, 0x00, sizeof(evt->crq));
  132. atomic_set(&evt->free, 1);
  133. evt->crq.valid = 0x80;
  134. evt->crq.IU_length = sizeof(*evt->xfer_iu);
  135. evt->crq.IU_data_ptr = pool->iu_token +
  136. sizeof(*evt->xfer_iu) * i;
  137. evt->xfer_iu = pool->iu_storage + i;
  138. evt->hostdata = hostdata;
  139. evt->ext_list = NULL;
  140. evt->ext_list_token = 0;
  141. }
  142. return 0;
  143. }
  144. /**
  145. * release_event_pool: - Frees memory of an event pool of a host
  146. * @pool: event_pool to be released
  147. * @hostdata: ibmvscsi_host_data who owns the even pool
  148. *
  149. * Returns zero on success.
  150. */
  151. static void release_event_pool(struct event_pool *pool,
  152. struct ibmvscsi_host_data *hostdata)
  153. {
  154. int i, in_use = 0;
  155. for (i = 0; i < pool->size; ++i) {
  156. if (atomic_read(&pool->events[i].free) != 1)
  157. ++in_use;
  158. if (pool->events[i].ext_list) {
  159. dma_free_coherent(hostdata->dev,
  160. SG_ALL * sizeof(struct memory_descriptor),
  161. pool->events[i].ext_list,
  162. pool->events[i].ext_list_token);
  163. }
  164. }
  165. if (in_use)
  166. printk(KERN_WARNING
  167. "ibmvscsi: releasing event pool with %d "
  168. "events still in use?\n", in_use);
  169. kfree(pool->events);
  170. dma_free_coherent(hostdata->dev,
  171. pool->size * sizeof(*pool->iu_storage),
  172. pool->iu_storage, pool->iu_token);
  173. }
  174. /**
  175. * valid_event_struct: - Determines if event is valid.
  176. * @pool: event_pool that contains the event
  177. * @evt: srp_event_struct to be checked for validity
  178. *
  179. * Returns zero if event is invalid, one otherwise.
  180. */
  181. static int valid_event_struct(struct event_pool *pool,
  182. struct srp_event_struct *evt)
  183. {
  184. int index = evt - pool->events;
  185. if (index < 0 || index >= pool->size) /* outside of bounds */
  186. return 0;
  187. if (evt != pool->events + index) /* unaligned */
  188. return 0;
  189. return 1;
  190. }
  191. /**
  192. * ibmvscsi_free-event_struct: - Changes status of event to "free"
  193. * @pool: event_pool that contains the event
  194. * @evt: srp_event_struct to be modified
  195. *
  196. */
  197. static void free_event_struct(struct event_pool *pool,
  198. struct srp_event_struct *evt)
  199. {
  200. if (!valid_event_struct(pool, evt)) {
  201. printk(KERN_ERR
  202. "ibmvscsi: Freeing invalid event_struct %p "
  203. "(not in pool %p)\n", evt, pool->events);
  204. return;
  205. }
  206. if (atomic_inc_return(&evt->free) != 1) {
  207. printk(KERN_ERR
  208. "ibmvscsi: Freeing event_struct %p "
  209. "which is not in use!\n", evt);
  210. return;
  211. }
  212. }
  213. /**
  214. * get_evt_struct: - Gets the next free event in pool
  215. * @pool: event_pool that contains the events to be searched
  216. *
  217. * Returns the next event in "free" state, and NULL if none are free.
  218. * Note that no synchronization is done here, we assume the host_lock
  219. * will syncrhonze things.
  220. */
  221. static struct srp_event_struct *get_event_struct(struct event_pool *pool)
  222. {
  223. int i;
  224. int poolsize = pool->size;
  225. int offset = pool->next;
  226. for (i = 0; i < poolsize; i++) {
  227. offset = (offset + 1) % poolsize;
  228. if (!atomic_dec_if_positive(&pool->events[offset].free)) {
  229. pool->next = offset;
  230. return &pool->events[offset];
  231. }
  232. }
  233. printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
  234. return NULL;
  235. }
  236. /**
  237. * init_event_struct: Initialize fields in an event struct that are always
  238. * required.
  239. * @evt: The event
  240. * @done: Routine to call when the event is responded to
  241. * @format: SRP or MAD format
  242. * @timeout: timeout value set in the CRQ
  243. */
  244. static void init_event_struct(struct srp_event_struct *evt_struct,
  245. void (*done) (struct srp_event_struct *),
  246. u8 format,
  247. int timeout)
  248. {
  249. evt_struct->cmnd = NULL;
  250. evt_struct->cmnd_done = NULL;
  251. evt_struct->sync_srp = NULL;
  252. evt_struct->crq.format = format;
  253. evt_struct->crq.timeout = timeout;
  254. evt_struct->done = done;
  255. }
  256. /* ------------------------------------------------------------
  257. * Routines for receiving SCSI responses from the hosting partition
  258. */
  259. /**
  260. * set_srp_direction: Set the fields in the srp related to data
  261. * direction and number of buffers based on the direction in
  262. * the scsi_cmnd and the number of buffers
  263. */
  264. static void set_srp_direction(struct scsi_cmnd *cmd,
  265. struct srp_cmd *srp_cmd,
  266. int numbuf)
  267. {
  268. if (numbuf == 0)
  269. return;
  270. if (numbuf == 1) {
  271. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  272. srp_cmd->data_out_format = SRP_DIRECT_BUFFER;
  273. else
  274. srp_cmd->data_in_format = SRP_DIRECT_BUFFER;
  275. } else {
  276. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  277. srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
  278. srp_cmd->data_out_count =
  279. numbuf < MAX_INDIRECT_BUFS ?
  280. numbuf: MAX_INDIRECT_BUFS;
  281. } else {
  282. srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
  283. srp_cmd->data_in_count =
  284. numbuf < MAX_INDIRECT_BUFS ?
  285. numbuf: MAX_INDIRECT_BUFS;
  286. }
  287. }
  288. }
  289. static void unmap_sg_list(int num_entries,
  290. struct device *dev,
  291. struct memory_descriptor *md)
  292. {
  293. int i;
  294. for (i = 0; i < num_entries; ++i) {
  295. dma_unmap_single(dev,
  296. md[i].virtual_address,
  297. md[i].length, DMA_BIDIRECTIONAL);
  298. }
  299. }
  300. /**
  301. * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
  302. * @cmd: srp_cmd whose additional_data member will be unmapped
  303. * @dev: device for which the memory is mapped
  304. *
  305. */
  306. static void unmap_cmd_data(struct srp_cmd *cmd,
  307. struct srp_event_struct *evt_struct,
  308. struct device *dev)
  309. {
  310. if ((cmd->data_out_format == SRP_NO_BUFFER) &&
  311. (cmd->data_in_format == SRP_NO_BUFFER))
  312. return;
  313. else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) ||
  314. (cmd->data_in_format == SRP_DIRECT_BUFFER)) {
  315. struct memory_descriptor *data =
  316. (struct memory_descriptor *)cmd->additional_data;
  317. dma_unmap_single(dev, data->virtual_address, data->length,
  318. DMA_BIDIRECTIONAL);
  319. } else {
  320. struct indirect_descriptor *indirect =
  321. (struct indirect_descriptor *)cmd->additional_data;
  322. int num_mapped = indirect->head.length /
  323. sizeof(indirect->list[0]);
  324. if (num_mapped <= MAX_INDIRECT_BUFS) {
  325. unmap_sg_list(num_mapped, dev, &indirect->list[0]);
  326. return;
  327. }
  328. unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
  329. }
  330. }
  331. static int map_sg_list(int num_entries,
  332. struct scatterlist *sg,
  333. struct memory_descriptor *md)
  334. {
  335. int i;
  336. u64 total_length = 0;
  337. for (i = 0; i < num_entries; ++i) {
  338. struct memory_descriptor *descr = md + i;
  339. struct scatterlist *sg_entry = &sg[i];
  340. descr->virtual_address = sg_dma_address(sg_entry);
  341. descr->length = sg_dma_len(sg_entry);
  342. descr->memory_handle = 0;
  343. total_length += sg_dma_len(sg_entry);
  344. }
  345. return total_length;
  346. }
  347. /**
  348. * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
  349. * @cmd: Scsi_Cmnd with the scatterlist
  350. * @srp_cmd: srp_cmd that contains the memory descriptor
  351. * @dev: device for which to map dma memory
  352. *
  353. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  354. * Returns 1 on success.
  355. */
  356. static int map_sg_data(struct scsi_cmnd *cmd,
  357. struct srp_event_struct *evt_struct,
  358. struct srp_cmd *srp_cmd, struct device *dev)
  359. {
  360. int sg_mapped;
  361. u64 total_length = 0;
  362. struct scatterlist *sg = cmd->request_buffer;
  363. struct memory_descriptor *data =
  364. (struct memory_descriptor *)srp_cmd->additional_data;
  365. struct indirect_descriptor *indirect =
  366. (struct indirect_descriptor *)data;
  367. sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
  368. if (sg_mapped == 0)
  369. return 0;
  370. set_srp_direction(cmd, srp_cmd, sg_mapped);
  371. /* special case; we can use a single direct descriptor */
  372. if (sg_mapped == 1) {
  373. data->virtual_address = sg_dma_address(&sg[0]);
  374. data->length = sg_dma_len(&sg[0]);
  375. data->memory_handle = 0;
  376. return 1;
  377. }
  378. if (sg_mapped > SG_ALL) {
  379. printk(KERN_ERR
  380. "ibmvscsi: More than %d mapped sg entries, got %d\n",
  381. SG_ALL, sg_mapped);
  382. return 0;
  383. }
  384. indirect->head.virtual_address = 0;
  385. indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
  386. indirect->head.memory_handle = 0;
  387. if (sg_mapped <= MAX_INDIRECT_BUFS) {
  388. total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
  389. indirect->total_length = total_length;
  390. return 1;
  391. }
  392. /* get indirect table */
  393. if (!evt_struct->ext_list) {
  394. evt_struct->ext_list =(struct memory_descriptor*)
  395. dma_alloc_coherent(dev,
  396. SG_ALL * sizeof(struct memory_descriptor),
  397. &evt_struct->ext_list_token, 0);
  398. if (!evt_struct->ext_list) {
  399. printk(KERN_ERR
  400. "ibmvscsi: Can't allocate memory for indirect table\n");
  401. return 0;
  402. }
  403. }
  404. total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
  405. indirect->total_length = total_length;
  406. indirect->head.virtual_address = evt_struct->ext_list_token;
  407. indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
  408. memcpy(indirect->list, evt_struct->ext_list,
  409. MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
  410. return 1;
  411. }
  412. /**
  413. * map_single_data: - Maps memory and initializes memory decriptor fields
  414. * @cmd: struct scsi_cmnd with the memory to be mapped
  415. * @srp_cmd: srp_cmd that contains the memory descriptor
  416. * @dev: device for which to map dma memory
  417. *
  418. * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
  419. * Returns 1 on success.
  420. */
  421. static int map_single_data(struct scsi_cmnd *cmd,
  422. struct srp_cmd *srp_cmd, struct device *dev)
  423. {
  424. struct memory_descriptor *data =
  425. (struct memory_descriptor *)srp_cmd->additional_data;
  426. data->virtual_address =
  427. dma_map_single(dev, cmd->request_buffer,
  428. cmd->request_bufflen,
  429. DMA_BIDIRECTIONAL);
  430. if (dma_mapping_error(data->virtual_address)) {
  431. printk(KERN_ERR
  432. "ibmvscsi: Unable to map request_buffer for command!\n");
  433. return 0;
  434. }
  435. data->length = cmd->request_bufflen;
  436. data->memory_handle = 0;
  437. set_srp_direction(cmd, srp_cmd, 1);
  438. return 1;
  439. }
  440. /**
  441. * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  442. * @cmd: struct scsi_cmnd with the memory to be mapped
  443. * @srp_cmd: srp_cmd that contains the memory descriptor
  444. * @dev: dma device for which to map dma memory
  445. *
  446. * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
  447. * Returns 1 on success.
  448. */
  449. static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
  450. struct srp_event_struct *evt_struct,
  451. struct srp_cmd *srp_cmd, struct device *dev)
  452. {
  453. switch (cmd->sc_data_direction) {
  454. case DMA_FROM_DEVICE:
  455. case DMA_TO_DEVICE:
  456. break;
  457. case DMA_NONE:
  458. return 1;
  459. case DMA_BIDIRECTIONAL:
  460. printk(KERN_ERR
  461. "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
  462. return 0;
  463. default:
  464. printk(KERN_ERR
  465. "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
  466. cmd->sc_data_direction);
  467. return 0;
  468. }
  469. if (!cmd->request_buffer)
  470. return 1;
  471. if (cmd->use_sg)
  472. return map_sg_data(cmd, evt_struct, srp_cmd, dev);
  473. return map_single_data(cmd, srp_cmd, dev);
  474. }
  475. /* ------------------------------------------------------------
  476. * Routines for sending and receiving SRPs
  477. */
  478. /**
  479. * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
  480. * @evt_struct: evt_struct to be sent
  481. * @hostdata: ibmvscsi_host_data of host
  482. *
  483. * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
  484. * Note that this routine assumes that host_lock is held for synchronization
  485. */
  486. static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
  487. struct ibmvscsi_host_data *hostdata)
  488. {
  489. u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
  490. int rc;
  491. /* If we have exhausted our request limit, just fail this request.
  492. * Note that there are rare cases involving driver generated requests
  493. * (such as task management requests) that the mid layer may think we
  494. * can handle more requests (can_queue) when we actually can't
  495. */
  496. if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
  497. (atomic_dec_if_positive(&hostdata->request_limit) < 0))
  498. goto send_error;
  499. /* Copy the IU into the transfer area */
  500. *evt_struct->xfer_iu = evt_struct->iu;
  501. evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct;
  502. /* Add this to the sent list. We need to do this
  503. * before we actually send
  504. * in case it comes back REALLY fast
  505. */
  506. list_add_tail(&evt_struct->list, &hostdata->sent);
  507. if ((rc =
  508. ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
  509. list_del(&evt_struct->list);
  510. printk(KERN_ERR "ibmvscsi: send error %d\n",
  511. rc);
  512. goto send_error;
  513. }
  514. return 0;
  515. send_error:
  516. unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
  517. free_event_struct(&hostdata->pool, evt_struct);
  518. return SCSI_MLQUEUE_HOST_BUSY;
  519. }
  520. /**
  521. * handle_cmd_rsp: - Handle responses from commands
  522. * @evt_struct: srp_event_struct to be handled
  523. *
  524. * Used as a callback by when sending scsi cmds.
  525. * Gets called by ibmvscsi_handle_crq()
  526. */
  527. static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
  528. {
  529. struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
  530. struct scsi_cmnd *cmnd = evt_struct->cmnd;
  531. if (unlikely(rsp->type != SRP_RSP_TYPE)) {
  532. if (printk_ratelimit())
  533. printk(KERN_WARNING
  534. "ibmvscsi: bad SRP RSP type %d\n",
  535. rsp->type);
  536. }
  537. if (cmnd) {
  538. cmnd->result = rsp->status;
  539. if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
  540. memcpy(cmnd->sense_buffer,
  541. rsp->sense_and_response_data,
  542. rsp->sense_data_list_length);
  543. unmap_cmd_data(&evt_struct->iu.srp.cmd,
  544. evt_struct,
  545. evt_struct->hostdata->dev);
  546. if (rsp->doover)
  547. cmnd->resid = rsp->data_out_residual_count;
  548. else if (rsp->diover)
  549. cmnd->resid = rsp->data_in_residual_count;
  550. }
  551. if (evt_struct->cmnd_done)
  552. evt_struct->cmnd_done(cmnd);
  553. }
  554. /**
  555. * lun_from_dev: - Returns the lun of the scsi device
  556. * @dev: struct scsi_device
  557. *
  558. */
  559. static inline u16 lun_from_dev(struct scsi_device *dev)
  560. {
  561. return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
  562. }
  563. /**
  564. * ibmvscsi_queue: - The queuecommand function of the scsi template
  565. * @cmd: struct scsi_cmnd to be executed
  566. * @done: Callback function to be called when cmd is completed
  567. */
  568. static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
  569. void (*done) (struct scsi_cmnd *))
  570. {
  571. struct srp_cmd *srp_cmd;
  572. struct srp_event_struct *evt_struct;
  573. struct indirect_descriptor *indirect;
  574. struct ibmvscsi_host_data *hostdata =
  575. (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
  576. u16 lun = lun_from_dev(cmnd->device);
  577. evt_struct = get_event_struct(&hostdata->pool);
  578. if (!evt_struct)
  579. return SCSI_MLQUEUE_HOST_BUSY;
  580. /* Set up the actual SRP IU */
  581. srp_cmd = &evt_struct->iu.srp.cmd;
  582. memset(srp_cmd, 0x00, sizeof(*srp_cmd));
  583. srp_cmd->type = SRP_CMD_TYPE;
  584. memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
  585. srp_cmd->lun = ((u64) lun) << 48;
  586. if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
  587. printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
  588. free_event_struct(&hostdata->pool, evt_struct);
  589. return SCSI_MLQUEUE_HOST_BUSY;
  590. }
  591. init_event_struct(evt_struct,
  592. handle_cmd_rsp,
  593. VIOSRP_SRP_FORMAT,
  594. cmnd->timeout_per_command/HZ);
  595. evt_struct->cmnd = cmnd;
  596. evt_struct->cmnd_done = done;
  597. /* Fix up dma address of the buffer itself */
  598. indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
  599. if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
  600. (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
  601. (indirect->head.virtual_address == 0)) {
  602. indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
  603. offsetof(struct srp_cmd, additional_data) +
  604. offsetof(struct indirect_descriptor, list);
  605. }
  606. return ibmvscsi_send_srp_event(evt_struct, hostdata);
  607. }
  608. /* ------------------------------------------------------------
  609. * Routines for driver initialization
  610. */
  611. /**
  612. * adapter_info_rsp: - Handle response to MAD adapter info request
  613. * @evt_struct: srp_event_struct with the response
  614. *
  615. * Used as a "done" callback by when sending adapter_info. Gets called
  616. * by ibmvscsi_handle_crq()
  617. */
  618. static void adapter_info_rsp(struct srp_event_struct *evt_struct)
  619. {
  620. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  621. dma_unmap_single(hostdata->dev,
  622. evt_struct->iu.mad.adapter_info.buffer,
  623. evt_struct->iu.mad.adapter_info.common.length,
  624. DMA_BIDIRECTIONAL);
  625. if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
  626. printk("ibmvscsi: error %d getting adapter info\n",
  627. evt_struct->xfer_iu->mad.adapter_info.common.status);
  628. } else {
  629. printk("ibmvscsi: host srp version: %s, "
  630. "host partition %s (%d), OS %d, max io %u\n",
  631. hostdata->madapter_info.srp_version,
  632. hostdata->madapter_info.partition_name,
  633. hostdata->madapter_info.partition_number,
  634. hostdata->madapter_info.os_type,
  635. hostdata->madapter_info.port_max_txu[0]);
  636. if (hostdata->madapter_info.port_max_txu[0])
  637. hostdata->host->max_sectors =
  638. hostdata->madapter_info.port_max_txu[0] >> 9;
  639. if (hostdata->madapter_info.os_type == 3 &&
  640. strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
  641. printk("ibmvscsi: host (Ver. %s) doesn't support large"
  642. "transfers\n",
  643. hostdata->madapter_info.srp_version);
  644. printk("ibmvscsi: limiting scatterlists to %d\n",
  645. MAX_INDIRECT_BUFS);
  646. hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
  647. }
  648. }
  649. }
  650. /**
  651. * send_mad_adapter_info: - Sends the mad adapter info request
  652. * and stores the result so it can be retrieved with
  653. * sysfs. We COULD consider causing a failure if the
  654. * returned SRP version doesn't match ours.
  655. * @hostdata: ibmvscsi_host_data of host
  656. *
  657. * Returns zero if successful.
  658. */
  659. static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
  660. {
  661. struct viosrp_adapter_info *req;
  662. struct srp_event_struct *evt_struct;
  663. evt_struct = get_event_struct(&hostdata->pool);
  664. if (!evt_struct) {
  665. printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
  666. "for ADAPTER_INFO_REQ!\n");
  667. return;
  668. }
  669. init_event_struct(evt_struct,
  670. adapter_info_rsp,
  671. VIOSRP_MAD_FORMAT,
  672. init_timeout * HZ);
  673. req = &evt_struct->iu.mad.adapter_info;
  674. memset(req, 0x00, sizeof(*req));
  675. req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
  676. req->common.length = sizeof(hostdata->madapter_info);
  677. req->buffer = dma_map_single(hostdata->dev,
  678. &hostdata->madapter_info,
  679. sizeof(hostdata->madapter_info),
  680. DMA_BIDIRECTIONAL);
  681. if (dma_mapping_error(req->buffer)) {
  682. printk(KERN_ERR
  683. "ibmvscsi: Unable to map request_buffer "
  684. "for adapter_info!\n");
  685. free_event_struct(&hostdata->pool, evt_struct);
  686. return;
  687. }
  688. if (ibmvscsi_send_srp_event(evt_struct, hostdata))
  689. printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
  690. };
  691. /**
  692. * login_rsp: - Handle response to SRP login request
  693. * @evt_struct: srp_event_struct with the response
  694. *
  695. * Used as a "done" callback by when sending srp_login. Gets called
  696. * by ibmvscsi_handle_crq()
  697. */
  698. static void login_rsp(struct srp_event_struct *evt_struct)
  699. {
  700. struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
  701. switch (evt_struct->xfer_iu->srp.generic.type) {
  702. case SRP_LOGIN_RSP_TYPE: /* it worked! */
  703. break;
  704. case SRP_LOGIN_REJ_TYPE: /* refused! */
  705. printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
  706. evt_struct->xfer_iu->srp.login_rej.reason);
  707. /* Login failed. */
  708. atomic_set(&hostdata->request_limit, -1);
  709. return;
  710. default:
  711. printk(KERN_ERR
  712. "ibmvscsi: Invalid login response typecode 0x%02x!\n",
  713. evt_struct->xfer_iu->srp.generic.type);
  714. /* Login failed. */
  715. atomic_set(&hostdata->request_limit, -1);
  716. return;
  717. }
  718. printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
  719. if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta >
  720. (max_requests - 2))
  721. evt_struct->xfer_iu->srp.login_rsp.request_limit_delta =
  722. max_requests - 2;
  723. /* Now we know what the real request-limit is */
  724. atomic_set(&hostdata->request_limit,
  725. evt_struct->xfer_iu->srp.login_rsp.request_limit_delta);
  726. hostdata->host->can_queue =
  727. evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2;
  728. if (hostdata->host->can_queue < 1) {
  729. printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
  730. return;
  731. }
  732. /* If we had any pending I/Os, kick them */
  733. scsi_unblock_requests(hostdata->host);
  734. send_mad_adapter_info(hostdata);
  735. return;
  736. }
  737. /**
  738. * send_srp_login: - Sends the srp login
  739. * @hostdata: ibmvscsi_host_data of host
  740. *
  741. * Returns zero if successful.
  742. */
  743. static int send_srp_login(struct ibmvscsi_host_data *hostdata)
  744. {
  745. int rc;
  746. unsigned long flags;
  747. struct srp_login_req *login;
  748. struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
  749. if (!evt_struct) {
  750. printk(KERN_ERR
  751. "ibmvscsi: couldn't allocate an event for login req!\n");
  752. return FAILED;
  753. }
  754. init_event_struct(evt_struct,
  755. login_rsp,
  756. VIOSRP_SRP_FORMAT,
  757. init_timeout * HZ);
  758. login = &evt_struct->iu.srp.login_req;
  759. memset(login, 0x00, sizeof(struct srp_login_req));
  760. login->type = SRP_LOGIN_REQ_TYPE;
  761. login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
  762. login->required_buffer_formats = 0x0006;
  763. /* Start out with a request limit of 1, since this is negotiated in
  764. * the login request we are just sending
  765. */
  766. atomic_set(&hostdata->request_limit, 1);
  767. spin_lock_irqsave(hostdata->host->host_lock, flags);
  768. rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
  769. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  770. return rc;
  771. };
  772. /**
  773. * sync_completion: Signal that a synchronous command has completed
  774. * Note that after returning from this call, the evt_struct is freed.
  775. * the caller waiting on this completion shouldn't touch the evt_struct
  776. * again.
  777. */
  778. static void sync_completion(struct srp_event_struct *evt_struct)
  779. {
  780. /* copy the response back */
  781. if (evt_struct->sync_srp)
  782. *evt_struct->sync_srp = *evt_struct->xfer_iu;
  783. complete(&evt_struct->comp);
  784. }
  785. /**
  786. * ibmvscsi_abort: Abort a command...from scsi host template
  787. * send this over to the server and wait synchronously for the response
  788. */
  789. static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  790. {
  791. struct ibmvscsi_host_data *hostdata =
  792. (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
  793. struct srp_tsk_mgmt *tsk_mgmt;
  794. struct srp_event_struct *evt;
  795. struct srp_event_struct *tmp_evt, *found_evt;
  796. union viosrp_iu srp_rsp;
  797. int rsp_rc;
  798. unsigned long flags;
  799. u16 lun = lun_from_dev(cmd->device);
  800. /* First, find this command in our sent list so we can figure
  801. * out the correct tag
  802. */
  803. spin_lock_irqsave(hostdata->host->host_lock, flags);
  804. found_evt = NULL;
  805. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  806. if (tmp_evt->cmnd == cmd) {
  807. found_evt = tmp_evt;
  808. break;
  809. }
  810. }
  811. if (!found_evt) {
  812. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  813. return FAILED;
  814. }
  815. evt = get_event_struct(&hostdata->pool);
  816. if (evt == NULL) {
  817. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  818. printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
  819. return FAILED;
  820. }
  821. init_event_struct(evt,
  822. sync_completion,
  823. VIOSRP_SRP_FORMAT,
  824. init_timeout * HZ);
  825. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  826. /* Set up an abort SRP command */
  827. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  828. tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
  829. tsk_mgmt->lun = ((u64) lun) << 48;
  830. tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */
  831. tsk_mgmt->managed_task_tag = (u64) found_evt;
  832. printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
  833. tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
  834. evt->sync_srp = &srp_rsp;
  835. init_completion(&evt->comp);
  836. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
  837. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  838. if (rsp_rc != 0) {
  839. printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
  840. return FAILED;
  841. }
  842. wait_for_completion(&evt->comp);
  843. /* make sure we got a good response */
  844. if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
  845. if (printk_ratelimit())
  846. printk(KERN_WARNING
  847. "ibmvscsi: abort bad SRP RSP type %d\n",
  848. srp_rsp.srp.generic.type);
  849. return FAILED;
  850. }
  851. if (srp_rsp.srp.rsp.rspvalid)
  852. rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
  853. else
  854. rsp_rc = srp_rsp.srp.rsp.status;
  855. if (rsp_rc) {
  856. if (printk_ratelimit())
  857. printk(KERN_WARNING
  858. "ibmvscsi: abort code %d for task tag 0x%lx\n",
  859. rsp_rc,
  860. tsk_mgmt->managed_task_tag);
  861. return FAILED;
  862. }
  863. /* Because we dropped the spinlock above, it's possible
  864. * The event is no longer in our list. Make sure it didn't
  865. * complete while we were aborting
  866. */
  867. spin_lock_irqsave(hostdata->host->host_lock, flags);
  868. found_evt = NULL;
  869. list_for_each_entry(tmp_evt, &hostdata->sent, list) {
  870. if (tmp_evt->cmnd == cmd) {
  871. found_evt = tmp_evt;
  872. break;
  873. }
  874. }
  875. if (found_evt == NULL) {
  876. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  877. printk(KERN_INFO
  878. "ibmvscsi: aborted task tag 0x%lx completed\n",
  879. tsk_mgmt->managed_task_tag);
  880. return SUCCESS;
  881. }
  882. printk(KERN_INFO
  883. "ibmvscsi: successfully aborted task tag 0x%lx\n",
  884. tsk_mgmt->managed_task_tag);
  885. cmd->result = (DID_ABORT << 16);
  886. list_del(&found_evt->list);
  887. unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
  888. found_evt->hostdata->dev);
  889. free_event_struct(&found_evt->hostdata->pool, found_evt);
  890. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  891. atomic_inc(&hostdata->request_limit);
  892. return SUCCESS;
  893. }
  894. /**
  895. * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
  896. * template send this over to the server and wait synchronously for the
  897. * response
  898. */
  899. static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
  900. {
  901. struct ibmvscsi_host_data *hostdata =
  902. (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
  903. struct srp_tsk_mgmt *tsk_mgmt;
  904. struct srp_event_struct *evt;
  905. struct srp_event_struct *tmp_evt, *pos;
  906. union viosrp_iu srp_rsp;
  907. int rsp_rc;
  908. unsigned long flags;
  909. u16 lun = lun_from_dev(cmd->device);
  910. spin_lock_irqsave(hostdata->host->host_lock, flags);
  911. evt = get_event_struct(&hostdata->pool);
  912. if (evt == NULL) {
  913. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  914. printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
  915. return FAILED;
  916. }
  917. init_event_struct(evt,
  918. sync_completion,
  919. VIOSRP_SRP_FORMAT,
  920. init_timeout * HZ);
  921. tsk_mgmt = &evt->iu.srp.tsk_mgmt;
  922. /* Set up a lun reset SRP command */
  923. memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
  924. tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
  925. tsk_mgmt->lun = ((u64) lun) << 48;
  926. tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */
  927. printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
  928. tsk_mgmt->lun);
  929. evt->sync_srp = &srp_rsp;
  930. init_completion(&evt->comp);
  931. rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
  932. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  933. if (rsp_rc != 0) {
  934. printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
  935. return FAILED;
  936. }
  937. wait_for_completion(&evt->comp);
  938. /* make sure we got a good response */
  939. if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
  940. if (printk_ratelimit())
  941. printk(KERN_WARNING
  942. "ibmvscsi: reset bad SRP RSP type %d\n",
  943. srp_rsp.srp.generic.type);
  944. return FAILED;
  945. }
  946. if (srp_rsp.srp.rsp.rspvalid)
  947. rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
  948. else
  949. rsp_rc = srp_rsp.srp.rsp.status;
  950. if (rsp_rc) {
  951. if (printk_ratelimit())
  952. printk(KERN_WARNING
  953. "ibmvscsi: reset code %d for task tag 0x%lx\n",
  954. rsp_rc,
  955. tsk_mgmt->managed_task_tag);
  956. return FAILED;
  957. }
  958. /* We need to find all commands for this LUN that have not yet been
  959. * responded to, and fail them with DID_RESET
  960. */
  961. spin_lock_irqsave(hostdata->host->host_lock, flags);
  962. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  963. if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
  964. if (tmp_evt->cmnd)
  965. tmp_evt->cmnd->result = (DID_RESET << 16);
  966. list_del(&tmp_evt->list);
  967. unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
  968. tmp_evt->hostdata->dev);
  969. free_event_struct(&tmp_evt->hostdata->pool,
  970. tmp_evt);
  971. atomic_inc(&hostdata->request_limit);
  972. if (tmp_evt->cmnd_done)
  973. tmp_evt->cmnd_done(tmp_evt->cmnd);
  974. else if (tmp_evt->done)
  975. tmp_evt->done(tmp_evt);
  976. }
  977. }
  978. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  979. return SUCCESS;
  980. }
  981. /**
  982. * purge_requests: Our virtual adapter just shut down. purge any sent requests
  983. * @hostdata: the adapter
  984. */
  985. static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
  986. {
  987. struct srp_event_struct *tmp_evt, *pos;
  988. unsigned long flags;
  989. spin_lock_irqsave(hostdata->host->host_lock, flags);
  990. list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
  991. list_del(&tmp_evt->list);
  992. if (tmp_evt->cmnd) {
  993. tmp_evt->cmnd->result = (error_code << 16);
  994. unmap_cmd_data(&tmp_evt->iu.srp.cmd,
  995. tmp_evt,
  996. tmp_evt->hostdata->dev);
  997. if (tmp_evt->cmnd_done)
  998. tmp_evt->cmnd_done(tmp_evt->cmnd);
  999. } else {
  1000. if (tmp_evt->done) {
  1001. tmp_evt->done(tmp_evt);
  1002. }
  1003. }
  1004. free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
  1005. }
  1006. spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  1007. }
  1008. /**
  1009. * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
  1010. * @crq: Command/Response queue
  1011. * @hostdata: ibmvscsi_host_data of host
  1012. *
  1013. */
  1014. void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  1015. struct ibmvscsi_host_data *hostdata)
  1016. {
  1017. unsigned long flags;
  1018. struct srp_event_struct *evt_struct =
  1019. (struct srp_event_struct *)crq->IU_data_ptr;
  1020. switch (crq->valid) {
  1021. case 0xC0: /* initialization */
  1022. switch (crq->format) {
  1023. case 0x01: /* Initialization message */
  1024. printk(KERN_INFO "ibmvscsi: partner initialized\n");
  1025. /* Send back a response */
  1026. if (ibmvscsi_send_crq(hostdata,
  1027. 0xC002000000000000LL, 0) == 0) {
  1028. /* Now login */
  1029. send_srp_login(hostdata);
  1030. } else {
  1031. printk(KERN_ERR
  1032. "ibmvscsi: Unable to send init rsp\n");
  1033. }
  1034. break;
  1035. case 0x02: /* Initialization response */
  1036. printk(KERN_INFO
  1037. "ibmvscsi: partner initialization complete\n");
  1038. /* Now login */
  1039. send_srp_login(hostdata);
  1040. break;
  1041. default:
  1042. printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
  1043. }
  1044. return;
  1045. case 0xFF: /* Hypervisor telling us the connection is closed */
  1046. scsi_block_requests(hostdata->host);
  1047. if (crq->format == 0x06) {
  1048. /* We need to re-setup the interpartition connection */
  1049. printk(KERN_INFO
  1050. "ibmvscsi: Re-enabling adapter!\n");
  1051. purge_requests(hostdata, DID_REQUEUE);
  1052. if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
  1053. hostdata) == 0)
  1054. if (ibmvscsi_send_crq(hostdata,
  1055. 0xC001000000000000LL, 0))
  1056. printk(KERN_ERR
  1057. "ibmvscsi: transmit error after"
  1058. " enable\n");
  1059. } else {
  1060. printk(KERN_INFO
  1061. "ibmvscsi: Virtual adapter failed rc %d!\n",
  1062. crq->format);
  1063. atomic_set(&hostdata->request_limit, -1);
  1064. purge_requests(hostdata, DID_ERROR);
  1065. ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
  1066. }
  1067. scsi_unblock_requests(hostdata->host);
  1068. return;
  1069. case 0x80: /* real payload */
  1070. break;
  1071. default:
  1072. printk(KERN_ERR
  1073. "ibmvscsi: got an invalid message type 0x%02x\n",
  1074. crq->valid);
  1075. return;
  1076. }
  1077. /* The only kind of payload CRQs we should get are responses to
  1078. * things we send. Make sure this response is to something we
  1079. * actually sent
  1080. */
  1081. if (!valid_event_struct(&hostdata->pool, evt_struct)) {
  1082. printk(KERN_ERR
  1083. "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
  1084. (void *)crq->IU_data_ptr);
  1085. return;
  1086. }
  1087. if (atomic_read(&evt_struct->free)) {
  1088. printk(KERN_ERR
  1089. "ibmvscsi: received duplicate correlation_token 0x%p!\n",
  1090. (void *)crq->IU_data_ptr);
  1091. return;
  1092. }
  1093. if (crq->format == VIOSRP_SRP_FORMAT)
  1094. atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
  1095. &hostdata->request_limit);
  1096. if (evt_struct->done)
  1097. evt_struct->done(evt_struct);
  1098. else
  1099. printk(KERN_ERR
  1100. "ibmvscsi: returned done() is NULL; not running it!\n");
  1101. /*
  1102. * Lock the host_lock before messing with these structures, since we
  1103. * are running in a task context
  1104. */
  1105. spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
  1106. list_del(&evt_struct->list);
  1107. free_event_struct(&evt_struct->hostdata->pool, evt_struct);
  1108. spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
  1109. }
  1110. /**
  1111. * ibmvscsi_get_host_config: Send the command to the server to get host
  1112. * configuration data. The data is opaque to us.
  1113. */
  1114. static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
  1115. unsigned char *buffer, int length)
  1116. {
  1117. struct viosrp_host_config *host_config;
  1118. struct srp_event_struct *evt_struct;
  1119. int rc;
  1120. evt_struct = get_event_struct(&hostdata->pool);
  1121. if (!evt_struct) {
  1122. printk(KERN_ERR
  1123. "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
  1124. return -1;
  1125. }
  1126. init_event_struct(evt_struct,
  1127. sync_completion,
  1128. VIOSRP_MAD_FORMAT,
  1129. init_timeout * HZ);
  1130. host_config = &evt_struct->iu.mad.host_config;
  1131. /* Set up a lun reset SRP command */
  1132. memset(host_config, 0x00, sizeof(*host_config));
  1133. host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
  1134. host_config->common.length = length;
  1135. host_config->buffer = dma_map_single(hostdata->dev, buffer, length,
  1136. DMA_BIDIRECTIONAL);
  1137. if (dma_mapping_error(host_config->buffer)) {
  1138. printk(KERN_ERR
  1139. "ibmvscsi: dma_mapping error " "getting host config\n");
  1140. free_event_struct(&hostdata->pool, evt_struct);
  1141. return -1;
  1142. }
  1143. init_completion(&evt_struct->comp);
  1144. rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
  1145. if (rc == 0) {
  1146. wait_for_completion(&evt_struct->comp);
  1147. dma_unmap_single(hostdata->dev, host_config->buffer,
  1148. length, DMA_BIDIRECTIONAL);
  1149. }
  1150. return rc;
  1151. }
  1152. /* ------------------------------------------------------------
  1153. * sysfs attributes
  1154. */
  1155. static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
  1156. {
  1157. struct Scsi_Host *shost = class_to_shost(class_dev);
  1158. struct ibmvscsi_host_data *hostdata =
  1159. (struct ibmvscsi_host_data *)shost->hostdata;
  1160. int len;
  1161. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1162. hostdata->madapter_info.srp_version);
  1163. return len;
  1164. }
  1165. static struct class_device_attribute ibmvscsi_host_srp_version = {
  1166. .attr = {
  1167. .name = "srp_version",
  1168. .mode = S_IRUGO,
  1169. },
  1170. .show = show_host_srp_version,
  1171. };
  1172. static ssize_t show_host_partition_name(struct class_device *class_dev,
  1173. char *buf)
  1174. {
  1175. struct Scsi_Host *shost = class_to_shost(class_dev);
  1176. struct ibmvscsi_host_data *hostdata =
  1177. (struct ibmvscsi_host_data *)shost->hostdata;
  1178. int len;
  1179. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1180. hostdata->madapter_info.partition_name);
  1181. return len;
  1182. }
  1183. static struct class_device_attribute ibmvscsi_host_partition_name = {
  1184. .attr = {
  1185. .name = "partition_name",
  1186. .mode = S_IRUGO,
  1187. },
  1188. .show = show_host_partition_name,
  1189. };
  1190. static ssize_t show_host_partition_number(struct class_device *class_dev,
  1191. char *buf)
  1192. {
  1193. struct Scsi_Host *shost = class_to_shost(class_dev);
  1194. struct ibmvscsi_host_data *hostdata =
  1195. (struct ibmvscsi_host_data *)shost->hostdata;
  1196. int len;
  1197. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1198. hostdata->madapter_info.partition_number);
  1199. return len;
  1200. }
  1201. static struct class_device_attribute ibmvscsi_host_partition_number = {
  1202. .attr = {
  1203. .name = "partition_number",
  1204. .mode = S_IRUGO,
  1205. },
  1206. .show = show_host_partition_number,
  1207. };
  1208. static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
  1209. {
  1210. struct Scsi_Host *shost = class_to_shost(class_dev);
  1211. struct ibmvscsi_host_data *hostdata =
  1212. (struct ibmvscsi_host_data *)shost->hostdata;
  1213. int len;
  1214. len = snprintf(buf, PAGE_SIZE, "%d\n",
  1215. hostdata->madapter_info.mad_version);
  1216. return len;
  1217. }
  1218. static struct class_device_attribute ibmvscsi_host_mad_version = {
  1219. .attr = {
  1220. .name = "mad_version",
  1221. .mode = S_IRUGO,
  1222. },
  1223. .show = show_host_mad_version,
  1224. };
  1225. static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
  1226. {
  1227. struct Scsi_Host *shost = class_to_shost(class_dev);
  1228. struct ibmvscsi_host_data *hostdata =
  1229. (struct ibmvscsi_host_data *)shost->hostdata;
  1230. int len;
  1231. len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
  1232. return len;
  1233. }
  1234. static struct class_device_attribute ibmvscsi_host_os_type = {
  1235. .attr = {
  1236. .name = "os_type",
  1237. .mode = S_IRUGO,
  1238. },
  1239. .show = show_host_os_type,
  1240. };
  1241. static ssize_t show_host_config(struct class_device *class_dev, char *buf)
  1242. {
  1243. struct Scsi_Host *shost = class_to_shost(class_dev);
  1244. struct ibmvscsi_host_data *hostdata =
  1245. (struct ibmvscsi_host_data *)shost->hostdata;
  1246. /* returns null-terminated host config data */
  1247. if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
  1248. return strlen(buf);
  1249. else
  1250. return 0;
  1251. }
  1252. static struct class_device_attribute ibmvscsi_host_config = {
  1253. .attr = {
  1254. .name = "config",
  1255. .mode = S_IRUGO,
  1256. },
  1257. .show = show_host_config,
  1258. };
  1259. static struct class_device_attribute *ibmvscsi_attrs[] = {
  1260. &ibmvscsi_host_srp_version,
  1261. &ibmvscsi_host_partition_name,
  1262. &ibmvscsi_host_partition_number,
  1263. &ibmvscsi_host_mad_version,
  1264. &ibmvscsi_host_os_type,
  1265. &ibmvscsi_host_config,
  1266. NULL
  1267. };
  1268. /* ------------------------------------------------------------
  1269. * SCSI driver registration
  1270. */
  1271. static struct scsi_host_template driver_template = {
  1272. .module = THIS_MODULE,
  1273. .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
  1274. .proc_name = "ibmvscsi",
  1275. .queuecommand = ibmvscsi_queuecommand,
  1276. .eh_abort_handler = ibmvscsi_eh_abort_handler,
  1277. .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
  1278. .cmd_per_lun = 16,
  1279. .can_queue = 1, /* Updated after SRP_LOGIN */
  1280. .this_id = -1,
  1281. .sg_tablesize = SG_ALL,
  1282. .use_clustering = ENABLE_CLUSTERING,
  1283. .shost_attrs = ibmvscsi_attrs,
  1284. };
  1285. /**
  1286. * Called by bus code for each adapter
  1287. */
  1288. static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1289. {
  1290. struct ibmvscsi_host_data *hostdata;
  1291. struct Scsi_Host *host;
  1292. struct device *dev = &vdev->dev;
  1293. unsigned long wait_switch = 0;
  1294. vdev->dev.driver_data = NULL;
  1295. host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
  1296. if (!host) {
  1297. printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
  1298. goto scsi_host_alloc_failed;
  1299. }
  1300. hostdata = (struct ibmvscsi_host_data *)host->hostdata;
  1301. memset(hostdata, 0x00, sizeof(*hostdata));
  1302. INIT_LIST_HEAD(&hostdata->sent);
  1303. hostdata->host = host;
  1304. hostdata->dev = dev;
  1305. atomic_set(&hostdata->request_limit, -1);
  1306. hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
  1307. if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
  1308. max_requests) != 0) {
  1309. printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
  1310. goto init_crq_failed;
  1311. }
  1312. if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
  1313. printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
  1314. goto init_pool_failed;
  1315. }
  1316. host->max_lun = 8;
  1317. host->max_id = max_id;
  1318. host->max_channel = max_channel;
  1319. if (scsi_add_host(hostdata->host, hostdata->dev))
  1320. goto add_host_failed;
  1321. /* Try to send an initialization message. Note that this is allowed
  1322. * to fail if the other end is not acive. In that case we don't
  1323. * want to scan
  1324. */
  1325. if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) {
  1326. /*
  1327. * Wait around max init_timeout secs for the adapter to finish
  1328. * initializing. When we are done initializing, we will have a
  1329. * valid request_limit. We don't want Linux scanning before
  1330. * we are ready.
  1331. */
  1332. for (wait_switch = jiffies + (init_timeout * HZ);
  1333. time_before(jiffies, wait_switch) &&
  1334. atomic_read(&hostdata->request_limit) < 2;) {
  1335. msleep(10);
  1336. }
  1337. /* if we now have a valid request_limit, initiate a scan */
  1338. if (atomic_read(&hostdata->request_limit) > 0)
  1339. scsi_scan_host(host);
  1340. }
  1341. vdev->dev.driver_data = hostdata;
  1342. return 0;
  1343. add_host_failed:
  1344. release_event_pool(&hostdata->pool, hostdata);
  1345. init_pool_failed:
  1346. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
  1347. init_crq_failed:
  1348. scsi_host_put(host);
  1349. scsi_host_alloc_failed:
  1350. return -1;
  1351. }
  1352. static int ibmvscsi_remove(struct vio_dev *vdev)
  1353. {
  1354. struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
  1355. release_event_pool(&hostdata->pool, hostdata);
  1356. ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
  1357. max_requests);
  1358. scsi_remove_host(hostdata->host);
  1359. scsi_host_put(hostdata->host);
  1360. return 0;
  1361. }
  1362. /**
  1363. * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
  1364. * support.
  1365. */
  1366. static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
  1367. {"vscsi", "IBM,v-scsi"},
  1368. { "", "" }
  1369. };
  1370. MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
  1371. static struct vio_driver ibmvscsi_driver = {
  1372. .id_table = ibmvscsi_device_table,
  1373. .probe = ibmvscsi_probe,
  1374. .remove = ibmvscsi_remove,
  1375. .driver = {
  1376. .name = "ibmvscsi",
  1377. .owner = THIS_MODULE,
  1378. }
  1379. };
  1380. int __init ibmvscsi_module_init(void)
  1381. {
  1382. return vio_register_driver(&ibmvscsi_driver);
  1383. }
  1384. void __exit ibmvscsi_module_exit(void)
  1385. {
  1386. vio_unregister_driver(&ibmvscsi_driver);
  1387. }
  1388. module_init(ibmvscsi_module_init);
  1389. module_exit(ibmvscsi_module_exit);