ibmvstgt.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. /*
  2. * IBM eServer i/pSeries Virtual SCSI Target Driver
  3. * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
  4. * Santiago Leon (santil@us.ibm.com) IBM Corp.
  5. * Linda Xie (lxie@us.ibm.com) IBM Corp.
  6. *
  7. * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. */
  24. #include <linux/interrupt.h>
  25. #include <linux/module.h>
  26. #include <scsi/scsi.h>
  27. #include <scsi/scsi_host.h>
  28. #include <scsi/scsi_transport_srp.h>
  29. #include <scsi/scsi_tgt.h>
  30. #include <scsi/libsrp.h>
  31. #include <asm/hvcall.h>
  32. #include <asm/iommu.h>
  33. #include <asm/prom.h>
  34. #include <asm/vio.h>
  35. #include "ibmvscsi.h"
  36. #define INITIAL_SRP_LIMIT 16
  37. #define DEFAULT_MAX_SECTORS 256
  38. #define TGT_NAME "ibmvstgt"
  39. /*
  40. * Hypervisor calls.
  41. */
  42. #define h_copy_rdma(l, sa, sb, da, db) \
  43. plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
  44. #define h_send_crq(ua, l, h) \
  45. plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
  46. #define h_reg_crq(ua, tok, sz)\
  47. plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
  48. #define h_free_crq(ua) \
  49. plpar_hcall_norets(H_FREE_CRQ, ua);
  50. /* tmp - will replace with SCSI logging stuff */
  51. #define eprintk(fmt, args...) \
  52. do { \
  53. printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
  54. } while (0)
  55. /* #define dprintk eprintk */
  56. #define dprintk(fmt, args...)
  57. struct vio_port {
  58. struct vio_dev *dma_dev;
  59. struct crq_queue crq_queue;
  60. struct work_struct crq_work;
  61. unsigned long liobn;
  62. unsigned long riobn;
  63. struct srp_target *target;
  64. struct srp_rport *rport;
  65. };
  66. static struct workqueue_struct *vtgtd;
  67. static struct scsi_transport_template *ibmvstgt_transport_template;
  68. /*
  69. * These are fixed for the system and come from the Open Firmware device tree.
  70. * We just store them here to save getting them every time.
  71. */
  72. static char system_id[64] = "";
  73. static char partition_name[97] = "UNKNOWN";
  74. static unsigned int partition_number = -1;
  75. static struct vio_port *target_to_port(struct srp_target *target)
  76. {
  77. return (struct vio_port *) target->ldata;
  78. }
  79. static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
  80. {
  81. return (union viosrp_iu *) (iue->sbuf->buf);
  82. }
  83. static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
  84. {
  85. struct srp_target *target = iue->target;
  86. struct vio_port *vport = target_to_port(target);
  87. long rc, rc1;
  88. union {
  89. struct viosrp_crq cooked;
  90. uint64_t raw[2];
  91. } crq;
  92. /* First copy the SRP */
  93. rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
  94. vport->riobn, iue->remote_token);
  95. if (rc)
  96. eprintk("Error %ld transferring data\n", rc);
  97. crq.cooked.valid = 0x80;
  98. crq.cooked.format = format;
  99. crq.cooked.reserved = 0x00;
  100. crq.cooked.timeout = 0x00;
  101. crq.cooked.IU_length = length;
  102. crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
  103. if (rc == 0)
  104. crq.cooked.status = 0x99; /* Just needs to be non-zero */
  105. else
  106. crq.cooked.status = 0x00;
  107. rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
  108. if (rc1) {
  109. eprintk("%ld sending response\n", rc1);
  110. return rc1;
  111. }
  112. return rc;
  113. }
  114. #define SRP_RSP_SENSE_DATA_LEN 18
  115. static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
  116. unsigned char status, unsigned char asc)
  117. {
  118. union viosrp_iu *iu = vio_iu(iue);
  119. uint64_t tag = iu->srp.rsp.tag;
  120. /* If the linked bit is on and status is good */
  121. if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
  122. status = 0x10;
  123. memset(iu, 0, sizeof(struct srp_rsp));
  124. iu->srp.rsp.opcode = SRP_RSP;
  125. iu->srp.rsp.req_lim_delta = 1;
  126. iu->srp.rsp.tag = tag;
  127. if (test_bit(V_DIOVER, &iue->flags))
  128. iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
  129. iu->srp.rsp.data_in_res_cnt = 0;
  130. iu->srp.rsp.data_out_res_cnt = 0;
  131. iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
  132. iu->srp.rsp.resp_data_len = 0;
  133. iu->srp.rsp.status = status;
  134. if (status) {
  135. uint8_t *sense = iu->srp.rsp.data;
  136. if (sc) {
  137. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  138. iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
  139. memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
  140. } else {
  141. iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
  142. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  143. iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
  144. /* Valid bit and 'current errors' */
  145. sense[0] = (0x1 << 7 | 0x70);
  146. /* Sense key */
  147. sense[2] = status;
  148. /* Additional sense length */
  149. sense[7] = 0xa; /* 10 bytes */
  150. /* Additional sense code */
  151. sense[12] = asc;
  152. }
  153. }
  154. send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
  155. VIOSRP_SRP_FORMAT);
  156. return 0;
  157. }
  158. static void handle_cmd_queue(struct srp_target *target)
  159. {
  160. struct Scsi_Host *shost = target->shost;
  161. struct srp_rport *rport = target_to_port(target)->rport;
  162. struct iu_entry *iue;
  163. struct srp_cmd *cmd;
  164. unsigned long flags;
  165. int err;
  166. retry:
  167. spin_lock_irqsave(&target->lock, flags);
  168. list_for_each_entry(iue, &target->cmd_queue, ilist) {
  169. if (!test_and_set_bit(V_FLYING, &iue->flags)) {
  170. spin_unlock_irqrestore(&target->lock, flags);
  171. cmd = iue->sbuf->buf;
  172. err = srp_cmd_queue(shost, cmd, iue,
  173. (unsigned long)rport, 0);
  174. if (err) {
  175. eprintk("cannot queue cmd %p %d\n", cmd, err);
  176. srp_iu_put(iue);
  177. }
  178. goto retry;
  179. }
  180. }
  181. spin_unlock_irqrestore(&target->lock, flags);
  182. }
  183. static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
  184. struct srp_direct_buf *md, int nmd,
  185. enum dma_data_direction dir, unsigned int rest)
  186. {
  187. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  188. struct srp_target *target = iue->target;
  189. struct vio_port *vport = target_to_port(target);
  190. dma_addr_t token;
  191. long err;
  192. unsigned int done = 0;
  193. int i, sidx, soff;
  194. sidx = soff = 0;
  195. token = sg_dma_address(sg + sidx);
  196. for (i = 0; i < nmd && rest; i++) {
  197. unsigned int mdone, mlen;
  198. mlen = min(rest, md[i].len);
  199. for (mdone = 0; mlen;) {
  200. int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
  201. if (dir == DMA_TO_DEVICE)
  202. err = h_copy_rdma(slen,
  203. vport->riobn,
  204. md[i].va + mdone,
  205. vport->liobn,
  206. token + soff);
  207. else
  208. err = h_copy_rdma(slen,
  209. vport->liobn,
  210. token + soff,
  211. vport->riobn,
  212. md[i].va + mdone);
  213. if (err != H_SUCCESS) {
  214. eprintk("rdma error %d %d %ld\n", dir, slen, err);
  215. return -EIO;
  216. }
  217. mlen -= slen;
  218. mdone += slen;
  219. soff += slen;
  220. done += slen;
  221. if (soff == sg_dma_len(sg + sidx)) {
  222. sidx++;
  223. soff = 0;
  224. token = sg_dma_address(sg + sidx);
  225. if (sidx > nsg) {
  226. eprintk("out of sg %p %d %d\n",
  227. iue, sidx, nsg);
  228. return -EIO;
  229. }
  230. }
  231. };
  232. rest -= mlen;
  233. }
  234. return 0;
  235. }
  236. static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
  237. void (*done)(struct scsi_cmnd *))
  238. {
  239. unsigned long flags;
  240. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  241. struct srp_target *target = iue->target;
  242. int err = 0;
  243. dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
  244. scsi_sg_count(sc));
  245. if (scsi_sg_count(sc))
  246. err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
  247. spin_lock_irqsave(&target->lock, flags);
  248. list_del(&iue->ilist);
  249. spin_unlock_irqrestore(&target->lock, flags);
  250. if (err|| sc->result != SAM_STAT_GOOD) {
  251. eprintk("operation failed %p %d %x\n",
  252. iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
  253. send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
  254. } else
  255. send_rsp(iue, sc, NO_SENSE, 0x00);
  256. done(sc);
  257. srp_iu_put(iue);
  258. return 0;
  259. }
  260. int send_adapter_info(struct iu_entry *iue,
  261. dma_addr_t remote_buffer, uint16_t length)
  262. {
  263. struct srp_target *target = iue->target;
  264. struct vio_port *vport = target_to_port(target);
  265. struct Scsi_Host *shost = target->shost;
  266. dma_addr_t data_token;
  267. struct mad_adapter_info_data *info;
  268. int err;
  269. info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
  270. GFP_KERNEL);
  271. if (!info) {
  272. eprintk("bad dma_alloc_coherent %p\n", target);
  273. return 1;
  274. }
  275. /* Get remote info */
  276. err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
  277. vport->liobn, data_token);
  278. if (err == H_SUCCESS) {
  279. dprintk("Client connect: %s (%d)\n",
  280. info->partition_name, info->partition_number);
  281. }
  282. memset(info, 0, sizeof(*info));
  283. strcpy(info->srp_version, "16.a");
  284. strncpy(info->partition_name, partition_name,
  285. sizeof(info->partition_name));
  286. info->partition_number = partition_number;
  287. info->mad_version = 1;
  288. info->os_type = 2;
  289. info->port_max_txu[0] = shost->hostt->max_sectors << 9;
  290. /* Send our info to remote */
  291. err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
  292. vport->riobn, remote_buffer);
  293. dma_free_coherent(target->dev, sizeof(*info), info, data_token);
  294. if (err != H_SUCCESS) {
  295. eprintk("Error sending adapter info %d\n", err);
  296. return 1;
  297. }
  298. return 0;
  299. }
  300. static void process_login(struct iu_entry *iue)
  301. {
  302. union viosrp_iu *iu = vio_iu(iue);
  303. struct srp_login_rsp *rsp = &iu->srp.login_rsp;
  304. uint64_t tag = iu->srp.rsp.tag;
  305. struct Scsi_Host *shost = iue->target->shost;
  306. struct srp_target *target = host_to_srp_target(shost);
  307. struct vio_port *vport = target_to_port(target);
  308. struct srp_rport_identifiers ids;
  309. memset(&ids, 0, sizeof(ids));
  310. sprintf(ids.port_id, "%x", vport->dma_dev->unit_address);
  311. ids.roles = SRP_RPORT_ROLE_INITIATOR;
  312. if (!vport->rport)
  313. vport->rport = srp_rport_add(shost, &ids);
  314. /* TODO handle case that requested size is wrong and
  315. * buffer format is wrong
  316. */
  317. memset(iu, 0, sizeof(struct srp_login_rsp));
  318. rsp->opcode = SRP_LOGIN_RSP;
  319. rsp->req_lim_delta = INITIAL_SRP_LIMIT;
  320. rsp->tag = tag;
  321. rsp->max_it_iu_len = sizeof(union srp_iu);
  322. rsp->max_ti_iu_len = sizeof(union srp_iu);
  323. /* direct and indirect */
  324. rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  325. send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
  326. }
  327. static inline void queue_cmd(struct iu_entry *iue)
  328. {
  329. struct srp_target *target = iue->target;
  330. unsigned long flags;
  331. spin_lock_irqsave(&target->lock, flags);
  332. list_add_tail(&iue->ilist, &target->cmd_queue);
  333. spin_unlock_irqrestore(&target->lock, flags);
  334. }
  335. static int process_tsk_mgmt(struct iu_entry *iue)
  336. {
  337. union viosrp_iu *iu = vio_iu(iue);
  338. int fn;
  339. dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
  340. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  341. case SRP_TSK_ABORT_TASK:
  342. fn = ABORT_TASK;
  343. break;
  344. case SRP_TSK_ABORT_TASK_SET:
  345. fn = ABORT_TASK_SET;
  346. break;
  347. case SRP_TSK_CLEAR_TASK_SET:
  348. fn = CLEAR_TASK_SET;
  349. break;
  350. case SRP_TSK_LUN_RESET:
  351. fn = LOGICAL_UNIT_RESET;
  352. break;
  353. case SRP_TSK_CLEAR_ACA:
  354. fn = CLEAR_ACA;
  355. break;
  356. default:
  357. fn = 0;
  358. }
  359. if (fn)
  360. scsi_tgt_tsk_mgmt_request(iue->target->shost,
  361. (unsigned long)iue->target->shost,
  362. fn,
  363. iu->srp.tsk_mgmt.task_tag,
  364. (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
  365. iue);
  366. else
  367. send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
  368. return !fn;
  369. }
  370. static int process_mad_iu(struct iu_entry *iue)
  371. {
  372. union viosrp_iu *iu = vio_iu(iue);
  373. struct viosrp_adapter_info *info;
  374. struct viosrp_host_config *conf;
  375. switch (iu->mad.empty_iu.common.type) {
  376. case VIOSRP_EMPTY_IU_TYPE:
  377. eprintk("%s\n", "Unsupported EMPTY MAD IU");
  378. break;
  379. case VIOSRP_ERROR_LOG_TYPE:
  380. eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
  381. iu->mad.error_log.common.status = 1;
  382. send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
  383. break;
  384. case VIOSRP_ADAPTER_INFO_TYPE:
  385. info = &iu->mad.adapter_info;
  386. info->common.status = send_adapter_info(iue, info->buffer,
  387. info->common.length);
  388. send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
  389. break;
  390. case VIOSRP_HOST_CONFIG_TYPE:
  391. conf = &iu->mad.host_config;
  392. conf->common.status = 1;
  393. send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
  394. break;
  395. default:
  396. eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
  397. }
  398. return 1;
  399. }
  400. static int process_srp_iu(struct iu_entry *iue)
  401. {
  402. union viosrp_iu *iu = vio_iu(iue);
  403. int done = 1;
  404. u8 opcode = iu->srp.rsp.opcode;
  405. switch (opcode) {
  406. case SRP_LOGIN_REQ:
  407. process_login(iue);
  408. break;
  409. case SRP_TSK_MGMT:
  410. done = process_tsk_mgmt(iue);
  411. break;
  412. case SRP_CMD:
  413. queue_cmd(iue);
  414. done = 0;
  415. break;
  416. case SRP_LOGIN_RSP:
  417. case SRP_I_LOGOUT:
  418. case SRP_T_LOGOUT:
  419. case SRP_RSP:
  420. case SRP_CRED_REQ:
  421. case SRP_CRED_RSP:
  422. case SRP_AER_REQ:
  423. case SRP_AER_RSP:
  424. eprintk("Unsupported type %u\n", opcode);
  425. break;
  426. default:
  427. eprintk("Unknown type %u\n", opcode);
  428. }
  429. return done;
  430. }
  431. static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
  432. {
  433. struct vio_port *vport = target_to_port(target);
  434. struct iu_entry *iue;
  435. long err;
  436. int done = 1;
  437. iue = srp_iu_get(target);
  438. if (!iue) {
  439. eprintk("Error getting IU from pool, %p\n", target);
  440. return;
  441. }
  442. iue->remote_token = crq->IU_data_ptr;
  443. err = h_copy_rdma(crq->IU_length, vport->riobn,
  444. iue->remote_token, vport->liobn, iue->sbuf->dma);
  445. if (err != H_SUCCESS) {
  446. eprintk("%ld transferring data error %p\n", err, iue);
  447. goto out;
  448. }
  449. if (crq->format == VIOSRP_MAD_FORMAT)
  450. done = process_mad_iu(iue);
  451. else
  452. done = process_srp_iu(iue);
  453. out:
  454. if (done)
  455. srp_iu_put(iue);
  456. }
  457. static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
  458. {
  459. struct srp_target *target = data;
  460. struct vio_port *vport = target_to_port(target);
  461. vio_disable_interrupts(vport->dma_dev);
  462. queue_work(vtgtd, &vport->crq_work);
  463. return IRQ_HANDLED;
  464. }
  465. static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
  466. {
  467. int err;
  468. struct vio_port *vport = target_to_port(target);
  469. queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
  470. if (!queue->msgs)
  471. goto malloc_failed;
  472. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  473. queue->msg_token = dma_map_single(target->dev, queue->msgs,
  474. queue->size * sizeof(*queue->msgs),
  475. DMA_BIDIRECTIONAL);
  476. if (dma_mapping_error(target->dev, queue->msg_token))
  477. goto map_failed;
  478. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  479. PAGE_SIZE);
  480. /* If the adapter was left active for some reason (like kexec)
  481. * try freeing and re-registering
  482. */
  483. if (err == H_RESOURCE) {
  484. do {
  485. err = h_free_crq(vport->dma_dev->unit_address);
  486. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  487. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  488. PAGE_SIZE);
  489. }
  490. if (err != H_SUCCESS && err != 2) {
  491. eprintk("Error 0x%x opening virtual adapter\n", err);
  492. goto reg_crq_failed;
  493. }
  494. err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
  495. IRQF_DISABLED, "ibmvstgt", target);
  496. if (err)
  497. goto req_irq_failed;
  498. vio_enable_interrupts(vport->dma_dev);
  499. h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
  500. queue->cur = 0;
  501. spin_lock_init(&queue->lock);
  502. return 0;
  503. req_irq_failed:
  504. do {
  505. err = h_free_crq(vport->dma_dev->unit_address);
  506. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  507. reg_crq_failed:
  508. dma_unmap_single(target->dev, queue->msg_token,
  509. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  510. map_failed:
  511. free_page((unsigned long) queue->msgs);
  512. malloc_failed:
  513. return -ENOMEM;
  514. }
  515. static void crq_queue_destroy(struct srp_target *target)
  516. {
  517. struct vio_port *vport = target_to_port(target);
  518. struct crq_queue *queue = &vport->crq_queue;
  519. int err;
  520. free_irq(vport->dma_dev->irq, target);
  521. do {
  522. err = h_free_crq(vport->dma_dev->unit_address);
  523. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  524. dma_unmap_single(target->dev, queue->msg_token,
  525. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  526. free_page((unsigned long) queue->msgs);
  527. }
  528. static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
  529. {
  530. struct vio_port *vport = target_to_port(target);
  531. dprintk("%x %x\n", crq->valid, crq->format);
  532. switch (crq->valid) {
  533. case 0xC0:
  534. /* initialization */
  535. switch (crq->format) {
  536. case 0x01:
  537. h_send_crq(vport->dma_dev->unit_address,
  538. 0xC002000000000000, 0);
  539. break;
  540. case 0x02:
  541. break;
  542. default:
  543. eprintk("Unknown format %u\n", crq->format);
  544. }
  545. break;
  546. case 0xFF:
  547. /* transport event */
  548. break;
  549. case 0x80:
  550. /* real payload */
  551. switch (crq->format) {
  552. case VIOSRP_SRP_FORMAT:
  553. case VIOSRP_MAD_FORMAT:
  554. process_iu(crq, target);
  555. break;
  556. case VIOSRP_OS400_FORMAT:
  557. case VIOSRP_AIX_FORMAT:
  558. case VIOSRP_LINUX_FORMAT:
  559. case VIOSRP_INLINE_FORMAT:
  560. eprintk("Unsupported format %u\n", crq->format);
  561. break;
  562. default:
  563. eprintk("Unknown format %u\n", crq->format);
  564. }
  565. break;
  566. default:
  567. eprintk("unknown message type 0x%02x!?\n", crq->valid);
  568. }
  569. }
  570. static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
  571. {
  572. struct viosrp_crq *crq;
  573. unsigned long flags;
  574. spin_lock_irqsave(&queue->lock, flags);
  575. crq = &queue->msgs[queue->cur];
  576. if (crq->valid & 0x80) {
  577. if (++queue->cur == queue->size)
  578. queue->cur = 0;
  579. } else
  580. crq = NULL;
  581. spin_unlock_irqrestore(&queue->lock, flags);
  582. return crq;
  583. }
  584. static void handle_crq(struct work_struct *work)
  585. {
  586. struct vio_port *vport = container_of(work, struct vio_port, crq_work);
  587. struct srp_target *target = vport->target;
  588. struct viosrp_crq *crq;
  589. int done = 0;
  590. while (!done) {
  591. while ((crq = next_crq(&vport->crq_queue)) != NULL) {
  592. process_crq(crq, target);
  593. crq->valid = 0x00;
  594. }
  595. vio_enable_interrupts(vport->dma_dev);
  596. crq = next_crq(&vport->crq_queue);
  597. if (crq) {
  598. vio_disable_interrupts(vport->dma_dev);
  599. process_crq(crq, target);
  600. crq->valid = 0x00;
  601. } else
  602. done = 1;
  603. }
  604. handle_cmd_queue(target);
  605. }
  606. static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
  607. {
  608. unsigned long flags;
  609. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  610. struct srp_target *target = iue->target;
  611. dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
  612. spin_lock_irqsave(&target->lock, flags);
  613. list_del(&iue->ilist);
  614. spin_unlock_irqrestore(&target->lock, flags);
  615. srp_iu_put(iue);
  616. return 0;
  617. }
  618. static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host *shost,
  619. u64 itn_id, u64 mid, int result)
  620. {
  621. struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
  622. union viosrp_iu *iu = vio_iu(iue);
  623. unsigned char status, asc;
  624. eprintk("%p %d\n", iue, result);
  625. status = NO_SENSE;
  626. asc = 0;
  627. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  628. case SRP_TSK_ABORT_TASK:
  629. asc = 0x14;
  630. if (result)
  631. status = ABORTED_COMMAND;
  632. break;
  633. default:
  634. break;
  635. }
  636. send_rsp(iue, NULL, status, asc);
  637. srp_iu_put(iue);
  638. return 0;
  639. }
  640. static int ibmvstgt_it_nexus_response(struct Scsi_Host *shost, u64 itn_id,
  641. int result)
  642. {
  643. struct srp_target *target = host_to_srp_target(shost);
  644. struct vio_port *vport = target_to_port(target);
  645. if (result) {
  646. eprintk("%p %d\n", shost, result);
  647. srp_rport_del(vport->rport);
  648. vport->rport = NULL;
  649. }
  650. return 0;
  651. }
  652. static ssize_t system_id_show(struct device *dev,
  653. struct device_attribute *attr, char *buf)
  654. {
  655. return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
  656. }
  657. static ssize_t partition_number_show(struct device *dev,
  658. struct device_attribute *attr, char *buf)
  659. {
  660. return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
  661. }
  662. static ssize_t unit_address_show(struct device *dev,
  663. struct device_attribute *attr, char *buf)
  664. {
  665. struct Scsi_Host *shost = class_to_shost(dev);
  666. struct srp_target *target = host_to_srp_target(shost);
  667. struct vio_port *vport = target_to_port(target);
  668. return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
  669. }
  670. static DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
  671. static DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
  672. static DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
  673. static struct device_attribute *ibmvstgt_attrs[] = {
  674. &dev_attr_system_id,
  675. &dev_attr_partition_number,
  676. &dev_attr_unit_address,
  677. NULL,
  678. };
  679. static struct scsi_host_template ibmvstgt_sht = {
  680. .name = TGT_NAME,
  681. .module = THIS_MODULE,
  682. .can_queue = INITIAL_SRP_LIMIT,
  683. .sg_tablesize = SG_ALL,
  684. .use_clustering = DISABLE_CLUSTERING,
  685. .max_sectors = DEFAULT_MAX_SECTORS,
  686. .transfer_response = ibmvstgt_cmd_done,
  687. .eh_abort_handler = ibmvstgt_eh_abort_handler,
  688. .shost_attrs = ibmvstgt_attrs,
  689. .proc_name = TGT_NAME,
  690. .supported_mode = MODE_TARGET,
  691. };
  692. static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
  693. {
  694. struct Scsi_Host *shost;
  695. struct srp_target *target;
  696. struct vio_port *vport;
  697. unsigned int *dma, dma_size;
  698. int err = -ENOMEM;
  699. vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
  700. if (!vport)
  701. return err;
  702. shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
  703. if (!shost)
  704. goto free_vport;
  705. shost->transportt = ibmvstgt_transport_template;
  706. target = host_to_srp_target(shost);
  707. target->shost = shost;
  708. vport->dma_dev = dev;
  709. target->ldata = vport;
  710. vport->target = target;
  711. err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
  712. SRP_MAX_IU_LEN);
  713. if (err)
  714. goto put_host;
  715. dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
  716. &dma_size);
  717. if (!dma || dma_size != 40) {
  718. eprintk("Couldn't get window property %d\n", dma_size);
  719. err = -EIO;
  720. goto free_srp_target;
  721. }
  722. vport->liobn = dma[0];
  723. vport->riobn = dma[5];
  724. INIT_WORK(&vport->crq_work, handle_crq);
  725. err = crq_queue_create(&vport->crq_queue, target);
  726. if (err)
  727. goto free_srp_target;
  728. err = scsi_add_host(shost, target->dev);
  729. if (err)
  730. goto destroy_queue;
  731. err = scsi_tgt_alloc_queue(shost);
  732. if (err)
  733. goto destroy_queue;
  734. return 0;
  735. destroy_queue:
  736. crq_queue_destroy(target);
  737. free_srp_target:
  738. srp_target_free(target);
  739. put_host:
  740. scsi_host_put(shost);
  741. free_vport:
  742. kfree(vport);
  743. return err;
  744. }
  745. static int ibmvstgt_remove(struct vio_dev *dev)
  746. {
  747. struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
  748. struct Scsi_Host *shost = target->shost;
  749. struct vio_port *vport = target->ldata;
  750. crq_queue_destroy(target);
  751. srp_remove_host(shost);
  752. scsi_remove_host(shost);
  753. scsi_tgt_free_queue(shost);
  754. srp_target_free(target);
  755. kfree(vport);
  756. scsi_host_put(shost);
  757. return 0;
  758. }
  759. static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
  760. {"v-scsi-host", "IBM,v-scsi-host"},
  761. {"",""}
  762. };
  763. MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
  764. static struct vio_driver ibmvstgt_driver = {
  765. .id_table = ibmvstgt_device_table,
  766. .probe = ibmvstgt_probe,
  767. .remove = ibmvstgt_remove,
  768. .driver = {
  769. .name = "ibmvscsis",
  770. .owner = THIS_MODULE,
  771. }
  772. };
  773. static int get_system_info(void)
  774. {
  775. struct device_node *rootdn;
  776. const char *id, *model, *name;
  777. const unsigned int *num;
  778. rootdn = of_find_node_by_path("/");
  779. if (!rootdn)
  780. return -ENOENT;
  781. model = of_get_property(rootdn, "model", NULL);
  782. id = of_get_property(rootdn, "system-id", NULL);
  783. if (model && id)
  784. snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
  785. name = of_get_property(rootdn, "ibm,partition-name", NULL);
  786. if (name)
  787. strncpy(partition_name, name, sizeof(partition_name));
  788. num = of_get_property(rootdn, "ibm,partition-no", NULL);
  789. if (num)
  790. partition_number = *num;
  791. of_node_put(rootdn);
  792. return 0;
  793. }
  794. static struct srp_function_template ibmvstgt_transport_functions = {
  795. .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
  796. .it_nexus_response = ibmvstgt_it_nexus_response,
  797. };
  798. static int ibmvstgt_init(void)
  799. {
  800. int err = -ENOMEM;
  801. printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
  802. ibmvstgt_transport_template =
  803. srp_attach_transport(&ibmvstgt_transport_functions);
  804. if (!ibmvstgt_transport_template)
  805. return err;
  806. vtgtd = create_workqueue("ibmvtgtd");
  807. if (!vtgtd)
  808. goto release_transport;
  809. err = get_system_info();
  810. if (err)
  811. goto destroy_wq;
  812. err = vio_register_driver(&ibmvstgt_driver);
  813. if (err)
  814. goto destroy_wq;
  815. return 0;
  816. destroy_wq:
  817. destroy_workqueue(vtgtd);
  818. release_transport:
  819. srp_release_transport(ibmvstgt_transport_template);
  820. return err;
  821. }
  822. static void ibmvstgt_exit(void)
  823. {
  824. printk("Unregister IBM virtual SCSI driver\n");
  825. destroy_workqueue(vtgtd);
  826. vio_unregister_driver(&ibmvstgt_driver);
  827. srp_release_transport(ibmvstgt_transport_template);
  828. }
  829. MODULE_DESCRIPTION("IBM Virtual SCSI Target");
  830. MODULE_AUTHOR("Santiago Leon");
  831. MODULE_LICENSE("GPL");
  832. module_init(ibmvstgt_init);
  833. module_exit(ibmvstgt_exit);