ibmvstgt.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /*
  2. * IBM eServer i/pSeries Virtual SCSI Target Driver
  3. * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
  4. * Santiago Leon (santil@us.ibm.com) IBM Corp.
  5. * Linda Xie (lxie@us.ibm.com) IBM Corp.
  6. *
  7. * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. */
  24. #include <linux/interrupt.h>
  25. #include <linux/module.h>
  26. #include <scsi/scsi.h>
  27. #include <scsi/scsi_host.h>
  28. #include <scsi/scsi_tgt.h>
  29. #include <scsi/libsrp.h>
  30. #include <asm/hvcall.h>
  31. #include <asm/iommu.h>
  32. #include <asm/prom.h>
  33. #include <asm/vio.h>
  34. #include "ibmvscsi.h"
  35. #define INITIAL_SRP_LIMIT 16
  36. #define DEFAULT_MAX_SECTORS 512
  37. #define TGT_NAME "ibmvstgt"
  38. /*
  39. * Hypervisor calls.
  40. */
  41. #define h_copy_rdma(l, sa, sb, da, db) \
  42. plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
  43. #define h_send_crq(ua, l, h) \
  44. plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
  45. #define h_reg_crq(ua, tok, sz)\
  46. plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
  47. #define h_free_crq(ua) \
  48. plpar_hcall_norets(H_FREE_CRQ, ua);
  49. /* tmp - will replace with SCSI logging stuff */
  50. #define eprintk(fmt, args...) \
  51. do { \
  52. printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
  53. } while (0)
  54. /* #define dprintk eprintk */
  55. #define dprintk(fmt, args...)
  56. struct vio_port {
  57. struct vio_dev *dma_dev;
  58. struct crq_queue crq_queue;
  59. struct work_struct crq_work;
  60. unsigned long liobn;
  61. unsigned long riobn;
  62. };
  63. static struct workqueue_struct *vtgtd;
  64. /*
  65. * These are fixed for the system and come from the Open Firmware device tree.
  66. * We just store them here to save getting them every time.
  67. */
  68. static char system_id[64] = "";
  69. static char partition_name[97] = "UNKNOWN";
  70. static unsigned int partition_number = -1;
  71. static struct vio_port *target_to_port(struct srp_target *target)
  72. {
  73. return (struct vio_port *) target->ldata;
  74. }
  75. static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
  76. {
  77. return (union viosrp_iu *) (iue->sbuf->buf);
  78. }
  79. static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
  80. {
  81. struct srp_target *target = iue->target;
  82. struct vio_port *vport = target_to_port(target);
  83. long rc, rc1;
  84. union {
  85. struct viosrp_crq cooked;
  86. uint64_t raw[2];
  87. } crq;
  88. /* First copy the SRP */
  89. rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
  90. vport->riobn, iue->remote_token);
  91. if (rc)
  92. eprintk("Error %ld transferring data\n", rc);
  93. crq.cooked.valid = 0x80;
  94. crq.cooked.format = format;
  95. crq.cooked.reserved = 0x00;
  96. crq.cooked.timeout = 0x00;
  97. crq.cooked.IU_length = length;
  98. crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
  99. if (rc == 0)
  100. crq.cooked.status = 0x99; /* Just needs to be non-zero */
  101. else
  102. crq.cooked.status = 0x00;
  103. rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
  104. if (rc1) {
  105. eprintk("%ld sending response\n", rc1);
  106. return rc1;
  107. }
  108. return rc;
  109. }
  110. #define SRP_RSP_SENSE_DATA_LEN 18
  111. static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
  112. unsigned char status, unsigned char asc)
  113. {
  114. union viosrp_iu *iu = vio_iu(iue);
  115. uint64_t tag = iu->srp.rsp.tag;
  116. /* If the linked bit is on and status is good */
  117. if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
  118. status = 0x10;
  119. memset(iu, 0, sizeof(struct srp_rsp));
  120. iu->srp.rsp.opcode = SRP_RSP;
  121. iu->srp.rsp.req_lim_delta = 1;
  122. iu->srp.rsp.tag = tag;
  123. if (test_bit(V_DIOVER, &iue->flags))
  124. iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
  125. iu->srp.rsp.data_in_res_cnt = 0;
  126. iu->srp.rsp.data_out_res_cnt = 0;
  127. iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
  128. iu->srp.rsp.resp_data_len = 0;
  129. iu->srp.rsp.status = status;
  130. if (status) {
  131. uint8_t *sense = iu->srp.rsp.data;
  132. if (sc) {
  133. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  134. iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
  135. memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
  136. } else {
  137. iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
  138. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  139. iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
  140. /* Valid bit and 'current errors' */
  141. sense[0] = (0x1 << 7 | 0x70);
  142. /* Sense key */
  143. sense[2] = status;
  144. /* Additional sense length */
  145. sense[7] = 0xa; /* 10 bytes */
  146. /* Additional sense code */
  147. sense[12] = asc;
  148. }
  149. }
  150. send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
  151. VIOSRP_SRP_FORMAT);
  152. return 0;
  153. }
  154. static void handle_cmd_queue(struct srp_target *target)
  155. {
  156. struct Scsi_Host *shost = target->shost;
  157. struct iu_entry *iue;
  158. struct srp_cmd *cmd;
  159. unsigned long flags;
  160. int err;
  161. retry:
  162. spin_lock_irqsave(&target->lock, flags);
  163. list_for_each_entry(iue, &target->cmd_queue, ilist) {
  164. if (!test_and_set_bit(V_FLYING, &iue->flags)) {
  165. spin_unlock_irqrestore(&target->lock, flags);
  166. cmd = iue->sbuf->buf;
  167. err = srp_cmd_queue(shost, cmd, iue, 0);
  168. if (err) {
  169. eprintk("cannot queue cmd %p %d\n", cmd, err);
  170. srp_iu_put(iue);
  171. }
  172. goto retry;
  173. }
  174. }
  175. spin_unlock_irqrestore(&target->lock, flags);
  176. }
  177. static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
  178. struct srp_direct_buf *md, int nmd,
  179. enum dma_data_direction dir, unsigned int rest)
  180. {
  181. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  182. struct srp_target *target = iue->target;
  183. struct vio_port *vport = target_to_port(target);
  184. dma_addr_t token;
  185. long err;
  186. unsigned int done = 0;
  187. int i, sidx, soff;
  188. sidx = soff = 0;
  189. token = sg_dma_address(sg + sidx);
  190. for (i = 0; i < nmd && rest; i++) {
  191. unsigned int mdone, mlen;
  192. mlen = min(rest, md[i].len);
  193. for (mdone = 0; mlen;) {
  194. int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
  195. if (dir == DMA_TO_DEVICE)
  196. err = h_copy_rdma(slen,
  197. vport->riobn,
  198. md[i].va + mdone,
  199. vport->liobn,
  200. token + soff);
  201. else
  202. err = h_copy_rdma(slen,
  203. vport->liobn,
  204. token + soff,
  205. vport->riobn,
  206. md[i].va + mdone);
  207. if (err != H_SUCCESS) {
  208. eprintk("rdma error %d %d\n", dir, slen);
  209. goto out;
  210. }
  211. mlen -= slen;
  212. mdone += slen;
  213. soff += slen;
  214. done += slen;
  215. if (soff == sg_dma_len(sg + sidx)) {
  216. sidx++;
  217. soff = 0;
  218. token = sg_dma_address(sg + sidx);
  219. if (sidx > nsg) {
  220. eprintk("out of sg %p %d %d\n",
  221. iue, sidx, nsg);
  222. goto out;
  223. }
  224. }
  225. };
  226. rest -= mlen;
  227. }
  228. out:
  229. return 0;
  230. }
  231. static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
  232. void (*done)(struct scsi_cmnd *))
  233. {
  234. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  235. int err;
  236. err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
  237. done(sc);
  238. return err;
  239. }
  240. static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
  241. void (*done)(struct scsi_cmnd *))
  242. {
  243. unsigned long flags;
  244. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  245. struct srp_target *target = iue->target;
  246. dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
  247. spin_lock_irqsave(&target->lock, flags);
  248. list_del(&iue->ilist);
  249. spin_unlock_irqrestore(&target->lock, flags);
  250. if (sc->result != SAM_STAT_GOOD) {
  251. eprintk("operation failed %p %d %x\n",
  252. iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
  253. send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
  254. } else
  255. send_rsp(iue, sc, NO_SENSE, 0x00);
  256. done(sc);
  257. srp_iu_put(iue);
  258. return 0;
  259. }
  260. int send_adapter_info(struct iu_entry *iue,
  261. dma_addr_t remote_buffer, uint16_t length)
  262. {
  263. struct srp_target *target = iue->target;
  264. struct vio_port *vport = target_to_port(target);
  265. struct Scsi_Host *shost = target->shost;
  266. dma_addr_t data_token;
  267. struct mad_adapter_info_data *info;
  268. int err;
  269. info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
  270. GFP_KERNEL);
  271. if (!info) {
  272. eprintk("bad dma_alloc_coherent %p\n", target);
  273. return 1;
  274. }
  275. /* Get remote info */
  276. err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
  277. vport->liobn, data_token);
  278. if (err == H_SUCCESS) {
  279. dprintk("Client connect: %s (%d)\n",
  280. info->partition_name, info->partition_number);
  281. }
  282. memset(info, 0, sizeof(*info));
  283. strcpy(info->srp_version, "16.a");
  284. strncpy(info->partition_name, partition_name,
  285. sizeof(info->partition_name));
  286. info->partition_number = partition_number;
  287. info->mad_version = 1;
  288. info->os_type = 2;
  289. info->port_max_txu[0] = shost->hostt->max_sectors << 9;
  290. /* Send our info to remote */
  291. err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
  292. vport->riobn, remote_buffer);
  293. dma_free_coherent(target->dev, sizeof(*info), info, data_token);
  294. if (err != H_SUCCESS) {
  295. eprintk("Error sending adapter info %d\n", err);
  296. return 1;
  297. }
  298. return 0;
  299. }
  300. static void process_login(struct iu_entry *iue)
  301. {
  302. union viosrp_iu *iu = vio_iu(iue);
  303. struct srp_login_rsp *rsp = &iu->srp.login_rsp;
  304. uint64_t tag = iu->srp.rsp.tag;
  305. /* TODO handle case that requested size is wrong and
  306. * buffer format is wrong
  307. */
  308. memset(iu, 0, sizeof(struct srp_login_rsp));
  309. rsp->opcode = SRP_LOGIN_RSP;
  310. rsp->req_lim_delta = INITIAL_SRP_LIMIT;
  311. rsp->tag = tag;
  312. rsp->max_it_iu_len = sizeof(union srp_iu);
  313. rsp->max_ti_iu_len = sizeof(union srp_iu);
  314. /* direct and indirect */
  315. rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  316. send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
  317. }
  318. static inline void queue_cmd(struct iu_entry *iue)
  319. {
  320. struct srp_target *target = iue->target;
  321. unsigned long flags;
  322. spin_lock_irqsave(&target->lock, flags);
  323. list_add_tail(&iue->ilist, &target->cmd_queue);
  324. spin_unlock_irqrestore(&target->lock, flags);
  325. }
  326. static int process_tsk_mgmt(struct iu_entry *iue)
  327. {
  328. union viosrp_iu *iu = vio_iu(iue);
  329. int fn;
  330. dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
  331. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  332. case SRP_TSK_ABORT_TASK:
  333. fn = ABORT_TASK;
  334. break;
  335. case SRP_TSK_ABORT_TASK_SET:
  336. fn = ABORT_TASK_SET;
  337. break;
  338. case SRP_TSK_CLEAR_TASK_SET:
  339. fn = CLEAR_TASK_SET;
  340. break;
  341. case SRP_TSK_LUN_RESET:
  342. fn = LOGICAL_UNIT_RESET;
  343. break;
  344. case SRP_TSK_CLEAR_ACA:
  345. fn = CLEAR_ACA;
  346. break;
  347. default:
  348. fn = 0;
  349. }
  350. if (fn)
  351. scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
  352. iu->srp.tsk_mgmt.task_tag,
  353. (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
  354. iue);
  355. else
  356. send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
  357. return !fn;
  358. }
  359. static int process_mad_iu(struct iu_entry *iue)
  360. {
  361. union viosrp_iu *iu = vio_iu(iue);
  362. struct viosrp_adapter_info *info;
  363. struct viosrp_host_config *conf;
  364. switch (iu->mad.empty_iu.common.type) {
  365. case VIOSRP_EMPTY_IU_TYPE:
  366. eprintk("%s\n", "Unsupported EMPTY MAD IU");
  367. break;
  368. case VIOSRP_ERROR_LOG_TYPE:
  369. eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
  370. iu->mad.error_log.common.status = 1;
  371. send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
  372. break;
  373. case VIOSRP_ADAPTER_INFO_TYPE:
  374. info = &iu->mad.adapter_info;
  375. info->common.status = send_adapter_info(iue, info->buffer,
  376. info->common.length);
  377. send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
  378. break;
  379. case VIOSRP_HOST_CONFIG_TYPE:
  380. conf = &iu->mad.host_config;
  381. conf->common.status = 1;
  382. send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
  383. break;
  384. default:
  385. eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
  386. }
  387. return 1;
  388. }
  389. static int process_srp_iu(struct iu_entry *iue)
  390. {
  391. union viosrp_iu *iu = vio_iu(iue);
  392. int done = 1;
  393. u8 opcode = iu->srp.rsp.opcode;
  394. switch (opcode) {
  395. case SRP_LOGIN_REQ:
  396. process_login(iue);
  397. break;
  398. case SRP_TSK_MGMT:
  399. done = process_tsk_mgmt(iue);
  400. break;
  401. case SRP_CMD:
  402. queue_cmd(iue);
  403. done = 0;
  404. break;
  405. case SRP_LOGIN_RSP:
  406. case SRP_I_LOGOUT:
  407. case SRP_T_LOGOUT:
  408. case SRP_RSP:
  409. case SRP_CRED_REQ:
  410. case SRP_CRED_RSP:
  411. case SRP_AER_REQ:
  412. case SRP_AER_RSP:
  413. eprintk("Unsupported type %u\n", opcode);
  414. break;
  415. default:
  416. eprintk("Unknown type %u\n", opcode);
  417. }
  418. return done;
  419. }
  420. static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
  421. {
  422. struct vio_port *vport = target_to_port(target);
  423. struct iu_entry *iue;
  424. long err, done;
  425. iue = srp_iu_get(target);
  426. if (!iue) {
  427. eprintk("Error getting IU from pool, %p\n", target);
  428. return;
  429. }
  430. iue->remote_token = crq->IU_data_ptr;
  431. err = h_copy_rdma(crq->IU_length, vport->riobn,
  432. iue->remote_token, vport->liobn, iue->sbuf->dma);
  433. if (err != H_SUCCESS) {
  434. eprintk("%ld transferring data error %p\n", err, iue);
  435. done = 1;
  436. goto out;
  437. }
  438. if (crq->format == VIOSRP_MAD_FORMAT)
  439. done = process_mad_iu(iue);
  440. else
  441. done = process_srp_iu(iue);
  442. out:
  443. if (done)
  444. srp_iu_put(iue);
  445. }
  446. static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
  447. {
  448. struct srp_target *target = (struct srp_target *) data;
  449. struct vio_port *vport = target_to_port(target);
  450. vio_disable_interrupts(vport->dma_dev);
  451. queue_work(vtgtd, &vport->crq_work);
  452. return IRQ_HANDLED;
  453. }
  454. static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
  455. {
  456. int err;
  457. struct vio_port *vport = target_to_port(target);
  458. queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
  459. if (!queue->msgs)
  460. goto malloc_failed;
  461. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  462. queue->msg_token = dma_map_single(target->dev, queue->msgs,
  463. queue->size * sizeof(*queue->msgs),
  464. DMA_BIDIRECTIONAL);
  465. if (dma_mapping_error(queue->msg_token))
  466. goto map_failed;
  467. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  468. PAGE_SIZE);
  469. /* If the adapter was left active for some reason (like kexec)
  470. * try freeing and re-registering
  471. */
  472. if (err == H_RESOURCE) {
  473. do {
  474. err = h_free_crq(vport->dma_dev->unit_address);
  475. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  476. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  477. PAGE_SIZE);
  478. }
  479. if (err != H_SUCCESS && err != 2) {
  480. eprintk("Error 0x%x opening virtual adapter\n", err);
  481. goto reg_crq_failed;
  482. }
  483. err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
  484. SA_INTERRUPT, "ibmvstgt", target);
  485. if (err)
  486. goto req_irq_failed;
  487. vio_enable_interrupts(vport->dma_dev);
  488. h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
  489. queue->cur = 0;
  490. spin_lock_init(&queue->lock);
  491. return 0;
  492. req_irq_failed:
  493. do {
  494. err = h_free_crq(vport->dma_dev->unit_address);
  495. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  496. reg_crq_failed:
  497. dma_unmap_single(target->dev, queue->msg_token,
  498. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  499. map_failed:
  500. free_page((unsigned long) queue->msgs);
  501. malloc_failed:
  502. return -ENOMEM;
  503. }
  504. static void crq_queue_destroy(struct srp_target *target)
  505. {
  506. struct vio_port *vport = target_to_port(target);
  507. struct crq_queue *queue = &vport->crq_queue;
  508. int err;
  509. free_irq(vport->dma_dev->irq, target);
  510. do {
  511. err = h_free_crq(vport->dma_dev->unit_address);
  512. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  513. dma_unmap_single(target->dev, queue->msg_token,
  514. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  515. free_page((unsigned long) queue->msgs);
  516. }
  517. static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
  518. {
  519. struct vio_port *vport = target_to_port(target);
  520. dprintk("%x %x\n", crq->valid, crq->format);
  521. switch (crq->valid) {
  522. case 0xC0:
  523. /* initialization */
  524. switch (crq->format) {
  525. case 0x01:
  526. h_send_crq(vport->dma_dev->unit_address,
  527. 0xC002000000000000, 0);
  528. break;
  529. case 0x02:
  530. break;
  531. default:
  532. eprintk("Unknown format %u\n", crq->format);
  533. }
  534. break;
  535. case 0xFF:
  536. /* transport event */
  537. break;
  538. case 0x80:
  539. /* real payload */
  540. switch (crq->format) {
  541. case VIOSRP_SRP_FORMAT:
  542. case VIOSRP_MAD_FORMAT:
  543. process_iu(crq, target);
  544. break;
  545. case VIOSRP_OS400_FORMAT:
  546. case VIOSRP_AIX_FORMAT:
  547. case VIOSRP_LINUX_FORMAT:
  548. case VIOSRP_INLINE_FORMAT:
  549. eprintk("Unsupported format %u\n", crq->format);
  550. break;
  551. default:
  552. eprintk("Unknown format %u\n", crq->format);
  553. }
  554. break;
  555. default:
  556. eprintk("unknown message type 0x%02x!?\n", crq->valid);
  557. }
  558. }
  559. static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
  560. {
  561. struct viosrp_crq *crq;
  562. unsigned long flags;
  563. spin_lock_irqsave(&queue->lock, flags);
  564. crq = &queue->msgs[queue->cur];
  565. if (crq->valid & 0x80) {
  566. if (++queue->cur == queue->size)
  567. queue->cur = 0;
  568. } else
  569. crq = NULL;
  570. spin_unlock_irqrestore(&queue->lock, flags);
  571. return crq;
  572. }
  573. static void handle_crq(void *data)
  574. {
  575. struct srp_target *target = (struct srp_target *) data;
  576. struct vio_port *vport = target_to_port(target);
  577. struct viosrp_crq *crq;
  578. int done = 0;
  579. while (!done) {
  580. while ((crq = next_crq(&vport->crq_queue)) != NULL) {
  581. process_crq(crq, target);
  582. crq->valid = 0x00;
  583. }
  584. vio_enable_interrupts(vport->dma_dev);
  585. crq = next_crq(&vport->crq_queue);
  586. if (crq) {
  587. vio_disable_interrupts(vport->dma_dev);
  588. process_crq(crq, target);
  589. crq->valid = 0x00;
  590. } else
  591. done = 1;
  592. }
  593. handle_cmd_queue(target);
  594. }
  595. static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
  596. {
  597. unsigned long flags;
  598. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  599. struct srp_target *target = iue->target;
  600. dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
  601. spin_lock_irqsave(&target->lock, flags);
  602. list_del(&iue->ilist);
  603. spin_unlock_irqrestore(&target->lock, flags);
  604. srp_iu_put(iue);
  605. return 0;
  606. }
  607. static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
  608. {
  609. struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
  610. union viosrp_iu *iu = vio_iu(iue);
  611. unsigned char status, asc;
  612. eprintk("%p %d\n", iue, result);
  613. status = NO_SENSE;
  614. asc = 0;
  615. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  616. case SRP_TSK_ABORT_TASK:
  617. asc = 0x14;
  618. if (result)
  619. status = ABORTED_COMMAND;
  620. break;
  621. default:
  622. break;
  623. }
  624. send_rsp(iue, NULL, status, asc);
  625. srp_iu_put(iue);
  626. return 0;
  627. }
  628. static ssize_t system_id_show(struct class_device *cdev, char *buf)
  629. {
  630. return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
  631. }
  632. static ssize_t partition_number_show(struct class_device *cdev, char *buf)
  633. {
  634. return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
  635. }
  636. static ssize_t unit_address_show(struct class_device *cdev, char *buf)
  637. {
  638. struct Scsi_Host *shost = class_to_shost(cdev);
  639. struct srp_target *target = host_to_srp_target(shost);
  640. struct vio_port *vport = target_to_port(target);
  641. return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
  642. }
  643. static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
  644. static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
  645. static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
  646. static struct class_device_attribute *ibmvstgt_attrs[] = {
  647. &class_device_attr_system_id,
  648. &class_device_attr_partition_number,
  649. &class_device_attr_unit_address,
  650. NULL,
  651. };
  652. static struct scsi_host_template ibmvstgt_sht = {
  653. .name = TGT_NAME,
  654. .module = THIS_MODULE,
  655. .can_queue = INITIAL_SRP_LIMIT,
  656. .sg_tablesize = SG_ALL,
  657. .use_clustering = DISABLE_CLUSTERING,
  658. .max_sectors = DEFAULT_MAX_SECTORS,
  659. .transfer_response = ibmvstgt_cmd_done,
  660. .transfer_data = ibmvstgt_transfer_data,
  661. .eh_abort_handler = ibmvstgt_eh_abort_handler,
  662. .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
  663. .shost_attrs = ibmvstgt_attrs,
  664. .proc_name = TGT_NAME,
  665. };
  666. static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
  667. {
  668. struct Scsi_Host *shost;
  669. struct srp_target *target;
  670. struct vio_port *vport;
  671. unsigned int *dma, dma_size;
  672. int err = -ENOMEM;
  673. vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
  674. if (!vport)
  675. return err;
  676. shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
  677. if (!shost)
  678. goto free_vport;
  679. err = scsi_tgt_alloc_queue(shost);
  680. if (err)
  681. goto put_host;
  682. target = host_to_srp_target(shost);
  683. target->shost = shost;
  684. vport->dma_dev = dev;
  685. target->ldata = vport;
  686. err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
  687. SRP_MAX_IU_LEN);
  688. if (err)
  689. goto put_host;
  690. dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
  691. &dma_size);
  692. if (!dma || dma_size != 40) {
  693. eprintk("Couldn't get window property %d\n", dma_size);
  694. err = -EIO;
  695. goto free_srp_target;
  696. }
  697. vport->liobn = dma[0];
  698. vport->riobn = dma[5];
  699. INIT_WORK(&vport->crq_work, handle_crq, target);
  700. err = crq_queue_create(&vport->crq_queue, target);
  701. if (err)
  702. goto free_srp_target;
  703. err = scsi_add_host(shost, target->dev);
  704. if (err)
  705. goto destroy_queue;
  706. return 0;
  707. destroy_queue:
  708. crq_queue_destroy(target);
  709. free_srp_target:
  710. srp_target_free(target);
  711. put_host:
  712. scsi_host_put(shost);
  713. free_vport:
  714. kfree(vport);
  715. return err;
  716. }
  717. static int ibmvstgt_remove(struct vio_dev *dev)
  718. {
  719. struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
  720. struct Scsi_Host *shost = target->shost;
  721. struct vio_port *vport = target->ldata;
  722. crq_queue_destroy(target);
  723. scsi_remove_host(shost);
  724. scsi_tgt_free_queue(shost);
  725. srp_target_free(target);
  726. kfree(vport);
  727. scsi_host_put(shost);
  728. return 0;
  729. }
  730. static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
  731. {"v-scsi-host", "IBM,v-scsi-host"},
  732. {"",""}
  733. };
  734. MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
  735. static struct vio_driver ibmvstgt_driver = {
  736. .id_table = ibmvstgt_device_table,
  737. .probe = ibmvstgt_probe,
  738. .remove = ibmvstgt_remove,
  739. .driver = {
  740. .name = "ibmvscsis",
  741. .owner = THIS_MODULE,
  742. }
  743. };
  744. static int get_system_info(void)
  745. {
  746. struct device_node *rootdn;
  747. const char *id, *model, *name;
  748. unsigned int *num;
  749. rootdn = find_path_device("/");
  750. if (!rootdn)
  751. return -ENOENT;
  752. model = get_property(rootdn, "model", NULL);
  753. id = get_property(rootdn, "system-id", NULL);
  754. if (model && id)
  755. snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
  756. name = get_property(rootdn, "ibm,partition-name", NULL);
  757. if (name)
  758. strncpy(partition_name, name, sizeof(partition_name));
  759. num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
  760. if (num)
  761. partition_number = *num;
  762. return 0;
  763. }
  764. static int ibmvstgt_init(void)
  765. {
  766. int err = -ENOMEM;
  767. printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
  768. vtgtd = create_workqueue("ibmvtgtd");
  769. if (!vtgtd)
  770. return err;
  771. err = get_system_info();
  772. if (err)
  773. goto destroy_wq;
  774. err = vio_register_driver(&ibmvstgt_driver);
  775. if (err)
  776. goto destroy_wq;
  777. return 0;
  778. destroy_wq:
  779. destroy_workqueue(vtgtd);
  780. return err;
  781. }
  782. static void ibmvstgt_exit(void)
  783. {
  784. printk("Unregister IBM virtual SCSI driver\n");
  785. destroy_workqueue(vtgtd);
  786. vio_unregister_driver(&ibmvstgt_driver);
  787. }
  788. MODULE_DESCRIPTION("IBM Virtual SCSI Target");
  789. MODULE_AUTHOR("Santiago Leon");
  790. MODULE_LICENSE("GPL");
  791. module_init(ibmvstgt_init);
  792. module_exit(ibmvstgt_exit);