hptiop.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. /*
  2. * HighPoint RR3xxx/4xxx controller driver for Linux
  3. * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  15. *
  16. * For more information, visit http://www.highpoint-tech.com
  17. */
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/pci.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/errno.h>
  25. #include <linux/delay.h>
  26. #include <linux/timer.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/hdreg.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/div64.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <scsi/scsi_device.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/scsi_tcq.h>
  36. #include <scsi/scsi_host.h>
  37. #include "hptiop.h"
  38. MODULE_AUTHOR("HighPoint Technologies, Inc.");
  39. MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
  40. static char driver_name[] = "hptiop";
  41. static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
  42. static const char driver_ver[] = "v1.3 (071203)";
  43. static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
  44. static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
  45. struct hpt_iop_request_scsi_command *req);
  46. static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
  47. static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
  48. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
  49. static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
  50. {
  51. u32 req = 0;
  52. int i;
  53. for (i = 0; i < millisec; i++) {
  54. req = readl(&hba->u.itl.iop->inbound_queue);
  55. if (req != IOPMU_QUEUE_EMPTY)
  56. break;
  57. msleep(1);
  58. }
  59. if (req != IOPMU_QUEUE_EMPTY) {
  60. writel(req, &hba->u.itl.iop->outbound_queue);
  61. readl(&hba->u.itl.iop->outbound_intstatus);
  62. return 0;
  63. }
  64. return -1;
  65. }
  66. static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
  67. {
  68. return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
  69. }
  70. static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
  71. {
  72. if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
  73. hptiop_host_request_callback_itl(hba,
  74. tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
  75. else
  76. hptiop_iop_request_callback_itl(hba, tag);
  77. }
  78. static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
  79. {
  80. u32 req;
  81. while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
  82. IOPMU_QUEUE_EMPTY) {
  83. if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  84. hptiop_request_callback_itl(hba, req);
  85. else {
  86. struct hpt_iop_request_header __iomem * p;
  87. p = (struct hpt_iop_request_header __iomem *)
  88. ((char __iomem *)hba->u.itl.iop + req);
  89. if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  90. if (readl(&p->context))
  91. hptiop_request_callback_itl(hba, req);
  92. else
  93. writel(1, &p->context);
  94. }
  95. else
  96. hptiop_request_callback_itl(hba, req);
  97. }
  98. }
  99. }
  100. static int iop_intr_itl(struct hptiop_hba *hba)
  101. {
  102. struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
  103. u32 status;
  104. int ret = 0;
  105. status = readl(&iop->outbound_intstatus);
  106. if (status & IOPMU_OUTBOUND_INT_MSG0) {
  107. u32 msg = readl(&iop->outbound_msgaddr0);
  108. dprintk("received outbound msg %x\n", msg);
  109. writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
  110. hptiop_message_callback(hba, msg);
  111. ret = 1;
  112. }
  113. if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  114. hptiop_drain_outbound_queue_itl(hba);
  115. ret = 1;
  116. }
  117. return ret;
  118. }
  119. static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
  120. {
  121. u32 outbound_tail = readl(&mu->outbound_tail);
  122. u32 outbound_head = readl(&mu->outbound_head);
  123. if (outbound_tail != outbound_head) {
  124. u64 p;
  125. memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
  126. outbound_tail++;
  127. if (outbound_tail == MVIOP_QUEUE_LEN)
  128. outbound_tail = 0;
  129. writel(outbound_tail, &mu->outbound_tail);
  130. return p;
  131. } else
  132. return 0;
  133. }
  134. static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
  135. {
  136. u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
  137. u32 head = inbound_head + 1;
  138. if (head == MVIOP_QUEUE_LEN)
  139. head = 0;
  140. memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
  141. writel(head, &hba->u.mv.mu->inbound_head);
  142. writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
  143. &hba->u.mv.regs->inbound_doorbell);
  144. }
  145. static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
  146. {
  147. u32 req_type = (tag >> 5) & 0x7;
  148. struct hpt_iop_request_scsi_command *req;
  149. dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
  150. BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
  151. switch (req_type) {
  152. case IOP_REQUEST_TYPE_GET_CONFIG:
  153. case IOP_REQUEST_TYPE_SET_CONFIG:
  154. hba->msg_done = 1;
  155. break;
  156. case IOP_REQUEST_TYPE_SCSI_COMMAND:
  157. req = hba->reqs[tag >> 8].req_virt;
  158. if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
  159. req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
  160. hptiop_finish_scsi_req(hba, tag>>8, req);
  161. break;
  162. default:
  163. break;
  164. }
  165. }
  166. static int iop_intr_mv(struct hptiop_hba *hba)
  167. {
  168. u32 status;
  169. int ret = 0;
  170. status = readl(&hba->u.mv.regs->outbound_doorbell);
  171. writel(~status, &hba->u.mv.regs->outbound_doorbell);
  172. if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
  173. u32 msg;
  174. msg = readl(&hba->u.mv.mu->outbound_msg);
  175. dprintk("received outbound msg %x\n", msg);
  176. hptiop_message_callback(hba, msg);
  177. ret = 1;
  178. }
  179. if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
  180. u64 tag;
  181. while ((tag = mv_outbound_read(hba->u.mv.mu)))
  182. hptiop_request_callback_mv(hba, tag);
  183. ret = 1;
  184. }
  185. return ret;
  186. }
  187. static int iop_send_sync_request_itl(struct hptiop_hba *hba,
  188. void __iomem *_req, u32 millisec)
  189. {
  190. struct hpt_iop_request_header __iomem *req = _req;
  191. u32 i;
  192. writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
  193. writel(0, &req->context);
  194. writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
  195. &hba->u.itl.iop->inbound_queue);
  196. readl(&hba->u.itl.iop->outbound_intstatus);
  197. for (i = 0; i < millisec; i++) {
  198. iop_intr_itl(hba);
  199. if (readl(&req->context))
  200. return 0;
  201. msleep(1);
  202. }
  203. return -1;
  204. }
  205. static int iop_send_sync_request_mv(struct hptiop_hba *hba,
  206. u32 size_bits, u32 millisec)
  207. {
  208. struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
  209. u32 i;
  210. hba->msg_done = 0;
  211. reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
  212. mv_inbound_write(hba->u.mv.internal_req_phy |
  213. MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
  214. for (i = 0; i < millisec; i++) {
  215. iop_intr_mv(hba);
  216. if (hba->msg_done)
  217. return 0;
  218. msleep(1);
  219. }
  220. return -1;
  221. }
  222. static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
  223. {
  224. writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
  225. readl(&hba->u.itl.iop->outbound_intstatus);
  226. }
  227. static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
  228. {
  229. writel(msg, &hba->u.mv.mu->inbound_msg);
  230. writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
  231. readl(&hba->u.mv.regs->inbound_doorbell);
  232. }
  233. static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
  234. {
  235. u32 i;
  236. hba->msg_done = 0;
  237. hba->ops->post_msg(hba, msg);
  238. for (i = 0; i < millisec; i++) {
  239. spin_lock_irq(hba->host->host_lock);
  240. hba->ops->iop_intr(hba);
  241. spin_unlock_irq(hba->host->host_lock);
  242. if (hba->msg_done)
  243. break;
  244. msleep(1);
  245. }
  246. return hba->msg_done? 0 : -1;
  247. }
  248. static int iop_get_config_itl(struct hptiop_hba *hba,
  249. struct hpt_iop_request_get_config *config)
  250. {
  251. u32 req32;
  252. struct hpt_iop_request_get_config __iomem *req;
  253. req32 = readl(&hba->u.itl.iop->inbound_queue);
  254. if (req32 == IOPMU_QUEUE_EMPTY)
  255. return -1;
  256. req = (struct hpt_iop_request_get_config __iomem *)
  257. ((unsigned long)hba->u.itl.iop + req32);
  258. writel(0, &req->header.flags);
  259. writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
  260. writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
  261. writel(IOP_RESULT_PENDING, &req->header.result);
  262. if (iop_send_sync_request_itl(hba, req, 20000)) {
  263. dprintk("Get config send cmd failed\n");
  264. return -1;
  265. }
  266. memcpy_fromio(config, req, sizeof(*config));
  267. writel(req32, &hba->u.itl.iop->outbound_queue);
  268. return 0;
  269. }
  270. static int iop_get_config_mv(struct hptiop_hba *hba,
  271. struct hpt_iop_request_get_config *config)
  272. {
  273. struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
  274. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  275. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
  276. req->header.size =
  277. cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
  278. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  279. req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
  280. req->header.context_hi32 = 0;
  281. if (iop_send_sync_request_mv(hba, 0, 20000)) {
  282. dprintk("Get config send cmd failed\n");
  283. return -1;
  284. }
  285. memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
  286. return 0;
  287. }
  288. static int iop_set_config_itl(struct hptiop_hba *hba,
  289. struct hpt_iop_request_set_config *config)
  290. {
  291. u32 req32;
  292. struct hpt_iop_request_set_config __iomem *req;
  293. req32 = readl(&hba->u.itl.iop->inbound_queue);
  294. if (req32 == IOPMU_QUEUE_EMPTY)
  295. return -1;
  296. req = (struct hpt_iop_request_set_config __iomem *)
  297. ((unsigned long)hba->u.itl.iop + req32);
  298. memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
  299. (u8 *)config + sizeof(struct hpt_iop_request_header),
  300. sizeof(struct hpt_iop_request_set_config) -
  301. sizeof(struct hpt_iop_request_header));
  302. writel(0, &req->header.flags);
  303. writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
  304. writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
  305. writel(IOP_RESULT_PENDING, &req->header.result);
  306. if (iop_send_sync_request_itl(hba, req, 20000)) {
  307. dprintk("Set config send cmd failed\n");
  308. return -1;
  309. }
  310. writel(req32, &hba->u.itl.iop->outbound_queue);
  311. return 0;
  312. }
  313. static int iop_set_config_mv(struct hptiop_hba *hba,
  314. struct hpt_iop_request_set_config *config)
  315. {
  316. struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
  317. memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
  318. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  319. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
  320. req->header.size =
  321. cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
  322. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  323. req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
  324. req->header.context_hi32 = 0;
  325. if (iop_send_sync_request_mv(hba, 0, 20000)) {
  326. dprintk("Set config send cmd failed\n");
  327. return -1;
  328. }
  329. return 0;
  330. }
  331. static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
  332. {
  333. writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
  334. &hba->u.itl.iop->outbound_intmask);
  335. }
  336. static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
  337. {
  338. writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
  339. &hba->u.mv.regs->outbound_intmask);
  340. }
  341. static int hptiop_initialize_iop(struct hptiop_hba *hba)
  342. {
  343. /* enable interrupts */
  344. hba->ops->enable_intr(hba);
  345. hba->initialized = 1;
  346. /* start background tasks */
  347. if (iop_send_sync_msg(hba,
  348. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  349. printk(KERN_ERR "scsi%d: fail to start background task\n",
  350. hba->host->host_no);
  351. return -1;
  352. }
  353. return 0;
  354. }
  355. static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
  356. {
  357. u32 mem_base_phy, length;
  358. void __iomem *mem_base_virt;
  359. struct pci_dev *pcidev = hba->pcidev;
  360. if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
  361. printk(KERN_ERR "scsi%d: pci resource invalid\n",
  362. hba->host->host_no);
  363. return NULL;
  364. }
  365. mem_base_phy = pci_resource_start(pcidev, index);
  366. length = pci_resource_len(pcidev, index);
  367. mem_base_virt = ioremap(mem_base_phy, length);
  368. if (!mem_base_virt) {
  369. printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
  370. hba->host->host_no);
  371. return NULL;
  372. }
  373. return mem_base_virt;
  374. }
  375. static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
  376. {
  377. hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
  378. if (hba->u.itl.iop)
  379. return 0;
  380. else
  381. return -1;
  382. }
  383. static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
  384. {
  385. iounmap(hba->u.itl.iop);
  386. }
  387. static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
  388. {
  389. hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
  390. if (hba->u.mv.regs == NULL)
  391. return -1;
  392. hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
  393. if (hba->u.mv.mu == NULL) {
  394. iounmap(hba->u.mv.regs);
  395. return -1;
  396. }
  397. return 0;
  398. }
  399. static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
  400. {
  401. iounmap(hba->u.mv.regs);
  402. iounmap(hba->u.mv.mu);
  403. }
  404. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
  405. {
  406. dprintk("iop message 0x%x\n", msg);
  407. if (msg == IOPMU_INBOUND_MSG0_NOP)
  408. hba->msg_done = 1;
  409. if (!hba->initialized)
  410. return;
  411. if (msg == IOPMU_INBOUND_MSG0_RESET) {
  412. atomic_set(&hba->resetting, 0);
  413. wake_up(&hba->reset_wq);
  414. }
  415. else if (msg <= IOPMU_INBOUND_MSG0_MAX)
  416. hba->msg_done = 1;
  417. }
  418. static struct hptiop_request *get_req(struct hptiop_hba *hba)
  419. {
  420. struct hptiop_request *ret;
  421. dprintk("get_req : req=%p\n", hba->req_list);
  422. ret = hba->req_list;
  423. if (ret)
  424. hba->req_list = ret->next;
  425. return ret;
  426. }
  427. static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
  428. {
  429. dprintk("free_req(%d, %p)\n", req->index, req);
  430. req->next = hba->req_list;
  431. hba->req_list = req;
  432. }
  433. static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
  434. struct hpt_iop_request_scsi_command *req)
  435. {
  436. struct scsi_cmnd *scp;
  437. dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
  438. "result=%d, context=0x%x tag=%d\n",
  439. req, req->header.type, req->header.result,
  440. req->header.context, tag);
  441. BUG_ON(!req->header.result);
  442. BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
  443. scp = hba->reqs[tag].scp;
  444. if (HPT_SCP(scp)->mapped)
  445. scsi_dma_unmap(scp);
  446. switch (le32_to_cpu(req->header.result)) {
  447. case IOP_RESULT_SUCCESS:
  448. scsi_set_resid(scp,
  449. scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
  450. scp->result = (DID_OK<<16);
  451. break;
  452. case IOP_RESULT_BAD_TARGET:
  453. scp->result = (DID_BAD_TARGET<<16);
  454. break;
  455. case IOP_RESULT_BUSY:
  456. scp->result = (DID_BUS_BUSY<<16);
  457. break;
  458. case IOP_RESULT_RESET:
  459. scp->result = (DID_RESET<<16);
  460. break;
  461. case IOP_RESULT_FAIL:
  462. scp->result = (DID_ERROR<<16);
  463. break;
  464. case IOP_RESULT_INVALID_REQUEST:
  465. scp->result = (DID_ABORT<<16);
  466. break;
  467. case IOP_RESULT_CHECK_CONDITION:
  468. scsi_set_resid(scp,
  469. scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
  470. scp->result = SAM_STAT_CHECK_CONDITION;
  471. memcpy(scp->sense_buffer, &req->sg_list,
  472. min_t(size_t, SCSI_SENSE_BUFFERSIZE,
  473. le32_to_cpu(req->dataxfer_length)));
  474. break;
  475. default:
  476. scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
  477. (DID_ABORT<<16);
  478. break;
  479. }
  480. dprintk("scsi_done(%p)\n", scp);
  481. scp->scsi_done(scp);
  482. free_req(hba, &hba->reqs[tag]);
  483. }
  484. static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
  485. {
  486. struct hpt_iop_request_scsi_command *req;
  487. u32 tag;
  488. if (hba->iopintf_v2) {
  489. tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
  490. req = hba->reqs[tag].req_virt;
  491. if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
  492. req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
  493. } else {
  494. tag = _tag;
  495. req = hba->reqs[tag].req_virt;
  496. }
  497. hptiop_finish_scsi_req(hba, tag, req);
  498. }
  499. void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
  500. {
  501. struct hpt_iop_request_header __iomem *req;
  502. struct hpt_iop_request_ioctl_command __iomem *p;
  503. struct hpt_ioctl_k *arg;
  504. req = (struct hpt_iop_request_header __iomem *)
  505. ((unsigned long)hba->u.itl.iop + tag);
  506. dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
  507. "result=%d, context=0x%x tag=%d\n",
  508. req, readl(&req->type), readl(&req->result),
  509. readl(&req->context), tag);
  510. BUG_ON(!readl(&req->result));
  511. BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
  512. p = (struct hpt_iop_request_ioctl_command __iomem *)req;
  513. arg = (struct hpt_ioctl_k *)(unsigned long)
  514. (readl(&req->context) |
  515. ((u64)readl(&req->context_hi32)<<32));
  516. if (readl(&req->result) == IOP_RESULT_SUCCESS) {
  517. arg->result = HPT_IOCTL_RESULT_OK;
  518. if (arg->outbuf_size)
  519. memcpy_fromio(arg->outbuf,
  520. &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
  521. arg->outbuf_size);
  522. if (arg->bytes_returned)
  523. *arg->bytes_returned = arg->outbuf_size;
  524. }
  525. else
  526. arg->result = HPT_IOCTL_RESULT_FAILED;
  527. arg->done(arg);
  528. writel(tag, &hba->u.itl.iop->outbound_queue);
  529. }
  530. static irqreturn_t hptiop_intr(int irq, void *dev_id)
  531. {
  532. struct hptiop_hba *hba = dev_id;
  533. int handled;
  534. unsigned long flags;
  535. spin_lock_irqsave(hba->host->host_lock, flags);
  536. handled = hba->ops->iop_intr(hba);
  537. spin_unlock_irqrestore(hba->host->host_lock, flags);
  538. return handled;
  539. }
  540. static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
  541. {
  542. struct Scsi_Host *host = scp->device->host;
  543. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  544. struct scatterlist *sg;
  545. int idx, nseg;
  546. nseg = scsi_dma_map(scp);
  547. BUG_ON(nseg < 0);
  548. if (!nseg)
  549. return 0;
  550. HPT_SCP(scp)->sgcnt = nseg;
  551. HPT_SCP(scp)->mapped = 1;
  552. BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
  553. scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
  554. psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
  555. psg[idx].size = cpu_to_le32(sg_dma_len(sg));
  556. psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
  557. cpu_to_le32(1) : 0;
  558. }
  559. return HPT_SCP(scp)->sgcnt;
  560. }
  561. static void hptiop_post_req_itl(struct hptiop_hba *hba,
  562. struct hptiop_request *_req)
  563. {
  564. struct hpt_iop_request_header *reqhdr = _req->req_virt;
  565. reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
  566. (u32)_req->index);
  567. reqhdr->context_hi32 = 0;
  568. if (hba->iopintf_v2) {
  569. u32 size, size_bits;
  570. size = le32_to_cpu(reqhdr->size);
  571. if (size < 256)
  572. size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
  573. else if (size < 512)
  574. size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
  575. else
  576. size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
  577. IOPMU_QUEUE_ADDR_HOST_BIT;
  578. writel(_req->req_shifted_phy | size_bits,
  579. &hba->u.itl.iop->inbound_queue);
  580. } else
  581. writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
  582. &hba->u.itl.iop->inbound_queue);
  583. }
  584. static void hptiop_post_req_mv(struct hptiop_hba *hba,
  585. struct hptiop_request *_req)
  586. {
  587. struct hpt_iop_request_header *reqhdr = _req->req_virt;
  588. u32 size, size_bit;
  589. reqhdr->context = cpu_to_le32(_req->index<<8 |
  590. IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
  591. reqhdr->context_hi32 = 0;
  592. size = le32_to_cpu(reqhdr->size);
  593. if (size <= 256)
  594. size_bit = 0;
  595. else if (size <= 256*2)
  596. size_bit = 1;
  597. else if (size <= 256*3)
  598. size_bit = 2;
  599. else
  600. size_bit = 3;
  601. mv_inbound_write((_req->req_shifted_phy << 5) |
  602. MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
  603. }
  604. static int hptiop_queuecommand(struct scsi_cmnd *scp,
  605. void (*done)(struct scsi_cmnd *))
  606. {
  607. struct Scsi_Host *host = scp->device->host;
  608. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  609. struct hpt_iop_request_scsi_command *req;
  610. int sg_count = 0;
  611. struct hptiop_request *_req;
  612. BUG_ON(!done);
  613. scp->scsi_done = done;
  614. _req = get_req(hba);
  615. if (_req == NULL) {
  616. dprintk("hptiop_queuecmd : no free req\n");
  617. return SCSI_MLQUEUE_HOST_BUSY;
  618. }
  619. _req->scp = scp;
  620. dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
  621. "req_index=%d, req=%p\n",
  622. scp,
  623. host->host_no, scp->device->channel,
  624. scp->device->id, scp->device->lun,
  625. ((u32 *)scp->cmnd)[0],
  626. ((u32 *)scp->cmnd)[1],
  627. ((u32 *)scp->cmnd)[2],
  628. _req->index, _req->req_virt);
  629. scp->result = 0;
  630. if (scp->device->channel || scp->device->lun ||
  631. scp->device->id > hba->max_devices) {
  632. scp->result = DID_BAD_TARGET << 16;
  633. free_req(hba, _req);
  634. goto cmd_done;
  635. }
  636. req = _req->req_virt;
  637. /* build S/G table */
  638. sg_count = hptiop_buildsgl(scp, req->sg_list);
  639. if (!sg_count)
  640. HPT_SCP(scp)->mapped = 0;
  641. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  642. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
  643. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  644. req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
  645. req->channel = scp->device->channel;
  646. req->target = scp->device->id;
  647. req->lun = scp->device->lun;
  648. req->header.size = cpu_to_le32(
  649. sizeof(struct hpt_iop_request_scsi_command)
  650. - sizeof(struct hpt_iopsg)
  651. + sg_count * sizeof(struct hpt_iopsg));
  652. memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
  653. hba->ops->post_req(hba, _req);
  654. return 0;
  655. cmd_done:
  656. dprintk("scsi_done(scp=%p)\n", scp);
  657. scp->scsi_done(scp);
  658. return 0;
  659. }
  660. static const char *hptiop_info(struct Scsi_Host *host)
  661. {
  662. return driver_name_long;
  663. }
  664. static int hptiop_reset_hba(struct hptiop_hba *hba)
  665. {
  666. if (atomic_xchg(&hba->resetting, 1) == 0) {
  667. atomic_inc(&hba->reset_count);
  668. hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
  669. }
  670. wait_event_timeout(hba->reset_wq,
  671. atomic_read(&hba->resetting) == 0, 60 * HZ);
  672. if (atomic_read(&hba->resetting)) {
  673. /* IOP is in unkown state, abort reset */
  674. printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
  675. return -1;
  676. }
  677. if (iop_send_sync_msg(hba,
  678. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  679. dprintk("scsi%d: fail to start background task\n",
  680. hba->host->host_no);
  681. }
  682. return 0;
  683. }
  684. static int hptiop_reset(struct scsi_cmnd *scp)
  685. {
  686. struct Scsi_Host * host = scp->device->host;
  687. struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
  688. printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
  689. scp->device->host->host_no, scp->device->channel,
  690. scp->device->id, scp);
  691. return hptiop_reset_hba(hba)? FAILED : SUCCESS;
  692. }
  693. static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
  694. int queue_depth)
  695. {
  696. struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
  697. if (queue_depth > hba->max_requests)
  698. queue_depth = hba->max_requests;
  699. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  700. return queue_depth;
  701. }
  702. static ssize_t hptiop_show_version(struct device *dev,
  703. struct device_attribute *attr, char *buf)
  704. {
  705. return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
  706. }
  707. static ssize_t hptiop_show_fw_version(struct device *dev,
  708. struct device_attribute *attr, char *buf)
  709. {
  710. struct Scsi_Host *host = class_to_shost(dev);
  711. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  712. return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
  713. hba->firmware_version >> 24,
  714. (hba->firmware_version >> 16) & 0xff,
  715. (hba->firmware_version >> 8) & 0xff,
  716. hba->firmware_version & 0xff);
  717. }
  718. static struct device_attribute hptiop_attr_version = {
  719. .attr = {
  720. .name = "driver-version",
  721. .mode = S_IRUGO,
  722. },
  723. .show = hptiop_show_version,
  724. };
  725. static struct device_attribute hptiop_attr_fw_version = {
  726. .attr = {
  727. .name = "firmware-version",
  728. .mode = S_IRUGO,
  729. },
  730. .show = hptiop_show_fw_version,
  731. };
  732. static struct device_attribute *hptiop_attrs[] = {
  733. &hptiop_attr_version,
  734. &hptiop_attr_fw_version,
  735. NULL
  736. };
  737. static struct scsi_host_template driver_template = {
  738. .module = THIS_MODULE,
  739. .name = driver_name,
  740. .queuecommand = hptiop_queuecommand,
  741. .eh_device_reset_handler = hptiop_reset,
  742. .eh_bus_reset_handler = hptiop_reset,
  743. .info = hptiop_info,
  744. .emulated = 0,
  745. .use_clustering = ENABLE_CLUSTERING,
  746. .proc_name = driver_name,
  747. .shost_attrs = hptiop_attrs,
  748. .this_id = -1,
  749. .change_queue_depth = hptiop_adjust_disk_queue_depth,
  750. };
  751. static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
  752. {
  753. hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
  754. 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
  755. if (hba->u.mv.internal_req)
  756. return 0;
  757. else
  758. return -1;
  759. }
  760. static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
  761. {
  762. if (hba->u.mv.internal_req) {
  763. dma_free_coherent(&hba->pcidev->dev, 0x800,
  764. hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
  765. return 0;
  766. } else
  767. return -1;
  768. }
  769. static int __devinit hptiop_probe(struct pci_dev *pcidev,
  770. const struct pci_device_id *id)
  771. {
  772. struct Scsi_Host *host = NULL;
  773. struct hptiop_hba *hba;
  774. struct hpt_iop_request_get_config iop_config;
  775. struct hpt_iop_request_set_config set_config;
  776. dma_addr_t start_phy;
  777. void *start_virt;
  778. u32 offset, i, req_size;
  779. dprintk("hptiop_probe(%p)\n", pcidev);
  780. if (pci_enable_device(pcidev)) {
  781. printk(KERN_ERR "hptiop: fail to enable pci device\n");
  782. return -ENODEV;
  783. }
  784. printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
  785. pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
  786. pcidev->irq);
  787. pci_set_master(pcidev);
  788. /* Enable 64bit DMA if possible */
  789. if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
  790. if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
  791. printk(KERN_ERR "hptiop: fail to set dma_mask\n");
  792. goto disable_pci_device;
  793. }
  794. }
  795. if (pci_request_regions(pcidev, driver_name)) {
  796. printk(KERN_ERR "hptiop: pci_request_regions failed\n");
  797. goto disable_pci_device;
  798. }
  799. host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
  800. if (!host) {
  801. printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
  802. goto free_pci_regions;
  803. }
  804. hba = (struct hptiop_hba *)host->hostdata;
  805. hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
  806. hba->pcidev = pcidev;
  807. hba->host = host;
  808. hba->initialized = 0;
  809. hba->iopintf_v2 = 0;
  810. atomic_set(&hba->resetting, 0);
  811. atomic_set(&hba->reset_count, 0);
  812. init_waitqueue_head(&hba->reset_wq);
  813. init_waitqueue_head(&hba->ioctl_wq);
  814. host->max_lun = 1;
  815. host->max_channel = 0;
  816. host->io_port = 0;
  817. host->n_io_port = 0;
  818. host->irq = pcidev->irq;
  819. if (hba->ops->map_pci_bar(hba))
  820. goto free_scsi_host;
  821. if (hba->ops->iop_wait_ready(hba, 20000)) {
  822. printk(KERN_ERR "scsi%d: firmware not ready\n",
  823. hba->host->host_no);
  824. goto unmap_pci_bar;
  825. }
  826. if (hba->ops->internal_memalloc) {
  827. if (hba->ops->internal_memalloc(hba)) {
  828. printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
  829. hba->host->host_no);
  830. goto unmap_pci_bar;
  831. }
  832. }
  833. if (hba->ops->get_config(hba, &iop_config)) {
  834. printk(KERN_ERR "scsi%d: get config failed\n",
  835. hba->host->host_no);
  836. goto unmap_pci_bar;
  837. }
  838. hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
  839. HPTIOP_MAX_REQUESTS);
  840. hba->max_devices = le32_to_cpu(iop_config.max_devices);
  841. hba->max_request_size = le32_to_cpu(iop_config.request_size);
  842. hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
  843. hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
  844. hba->interface_version = le32_to_cpu(iop_config.interface_version);
  845. hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
  846. if (hba->firmware_version > 0x01020000 ||
  847. hba->interface_version > 0x01020000)
  848. hba->iopintf_v2 = 1;
  849. host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
  850. host->max_id = le32_to_cpu(iop_config.max_devices);
  851. host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
  852. host->can_queue = le32_to_cpu(iop_config.max_requests);
  853. host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
  854. host->max_cmd_len = 16;
  855. req_size = sizeof(struct hpt_iop_request_scsi_command)
  856. + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
  857. if ((req_size & 0x1f) != 0)
  858. req_size = (req_size + 0x1f) & ~0x1f;
  859. memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
  860. set_config.iop_id = cpu_to_le32(host->host_no);
  861. set_config.vbus_id = cpu_to_le16(host->host_no);
  862. set_config.max_host_request_size = cpu_to_le16(req_size);
  863. if (hba->ops->set_config(hba, &set_config)) {
  864. printk(KERN_ERR "scsi%d: set config failed\n",
  865. hba->host->host_no);
  866. goto unmap_pci_bar;
  867. }
  868. pci_set_drvdata(pcidev, host);
  869. if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
  870. driver_name, hba)) {
  871. printk(KERN_ERR "scsi%d: request irq %d failed\n",
  872. hba->host->host_no, pcidev->irq);
  873. goto unmap_pci_bar;
  874. }
  875. /* Allocate request mem */
  876. dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
  877. hba->req_size = req_size;
  878. start_virt = dma_alloc_coherent(&pcidev->dev,
  879. hba->req_size*hba->max_requests + 0x20,
  880. &start_phy, GFP_KERNEL);
  881. if (!start_virt) {
  882. printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
  883. hba->host->host_no);
  884. goto free_request_irq;
  885. }
  886. hba->dma_coherent = start_virt;
  887. hba->dma_coherent_handle = start_phy;
  888. if ((start_phy & 0x1f) != 0)
  889. {
  890. offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
  891. start_phy += offset;
  892. start_virt += offset;
  893. }
  894. hba->req_list = start_virt;
  895. for (i = 0; i < hba->max_requests; i++) {
  896. hba->reqs[i].next = NULL;
  897. hba->reqs[i].req_virt = start_virt;
  898. hba->reqs[i].req_shifted_phy = start_phy >> 5;
  899. hba->reqs[i].index = i;
  900. free_req(hba, &hba->reqs[i]);
  901. start_virt = (char *)start_virt + hba->req_size;
  902. start_phy = start_phy + hba->req_size;
  903. }
  904. /* Enable Interrupt and start background task */
  905. if (hptiop_initialize_iop(hba))
  906. goto free_request_mem;
  907. if (scsi_add_host(host, &pcidev->dev)) {
  908. printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
  909. hba->host->host_no);
  910. goto free_request_mem;
  911. }
  912. scsi_scan_host(host);
  913. dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
  914. return 0;
  915. free_request_mem:
  916. dma_free_coherent(&hba->pcidev->dev,
  917. hba->req_size * hba->max_requests + 0x20,
  918. hba->dma_coherent, hba->dma_coherent_handle);
  919. free_request_irq:
  920. free_irq(hba->pcidev->irq, hba);
  921. unmap_pci_bar:
  922. if (hba->ops->internal_memfree)
  923. hba->ops->internal_memfree(hba);
  924. hba->ops->unmap_pci_bar(hba);
  925. free_scsi_host:
  926. scsi_host_put(host);
  927. free_pci_regions:
  928. pci_release_regions(pcidev);
  929. disable_pci_device:
  930. pci_disable_device(pcidev);
  931. dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
  932. return -ENODEV;
  933. }
  934. static void hptiop_shutdown(struct pci_dev *pcidev)
  935. {
  936. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  937. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  938. dprintk("hptiop_shutdown(%p)\n", hba);
  939. /* stop the iop */
  940. if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
  941. printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
  942. hba->host->host_no);
  943. /* disable all outbound interrupts */
  944. hba->ops->disable_intr(hba);
  945. }
  946. static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
  947. {
  948. u32 int_mask;
  949. int_mask = readl(&hba->u.itl.iop->outbound_intmask);
  950. writel(int_mask |
  951. IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
  952. &hba->u.itl.iop->outbound_intmask);
  953. readl(&hba->u.itl.iop->outbound_intmask);
  954. }
  955. static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
  956. {
  957. writel(0, &hba->u.mv.regs->outbound_intmask);
  958. readl(&hba->u.mv.regs->outbound_intmask);
  959. }
  960. static void hptiop_remove(struct pci_dev *pcidev)
  961. {
  962. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  963. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  964. dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
  965. scsi_remove_host(host);
  966. hptiop_shutdown(pcidev);
  967. free_irq(hba->pcidev->irq, hba);
  968. dma_free_coherent(&hba->pcidev->dev,
  969. hba->req_size * hba->max_requests + 0x20,
  970. hba->dma_coherent,
  971. hba->dma_coherent_handle);
  972. if (hba->ops->internal_memfree)
  973. hba->ops->internal_memfree(hba);
  974. hba->ops->unmap_pci_bar(hba);
  975. pci_release_regions(hba->pcidev);
  976. pci_set_drvdata(hba->pcidev, NULL);
  977. pci_disable_device(hba->pcidev);
  978. scsi_host_put(host);
  979. }
  980. static struct hptiop_adapter_ops hptiop_itl_ops = {
  981. .iop_wait_ready = iop_wait_ready_itl,
  982. .internal_memalloc = NULL,
  983. .internal_memfree = NULL,
  984. .map_pci_bar = hptiop_map_pci_bar_itl,
  985. .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
  986. .enable_intr = hptiop_enable_intr_itl,
  987. .disable_intr = hptiop_disable_intr_itl,
  988. .get_config = iop_get_config_itl,
  989. .set_config = iop_set_config_itl,
  990. .iop_intr = iop_intr_itl,
  991. .post_msg = hptiop_post_msg_itl,
  992. .post_req = hptiop_post_req_itl,
  993. };
  994. static struct hptiop_adapter_ops hptiop_mv_ops = {
  995. .iop_wait_ready = iop_wait_ready_mv,
  996. .internal_memalloc = hptiop_internal_memalloc_mv,
  997. .internal_memfree = hptiop_internal_memfree_mv,
  998. .map_pci_bar = hptiop_map_pci_bar_mv,
  999. .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
  1000. .enable_intr = hptiop_enable_intr_mv,
  1001. .disable_intr = hptiop_disable_intr_mv,
  1002. .get_config = iop_get_config_mv,
  1003. .set_config = iop_set_config_mv,
  1004. .iop_intr = iop_intr_mv,
  1005. .post_msg = hptiop_post_msg_mv,
  1006. .post_req = hptiop_post_req_mv,
  1007. };
  1008. static struct pci_device_id hptiop_id_table[] = {
  1009. { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
  1010. { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
  1011. { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
  1012. { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
  1013. { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
  1014. { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
  1015. { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
  1016. { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
  1017. { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
  1018. { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
  1019. { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
  1020. { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
  1021. { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
  1022. {},
  1023. };
  1024. MODULE_DEVICE_TABLE(pci, hptiop_id_table);
  1025. static struct pci_driver hptiop_pci_driver = {
  1026. .name = driver_name,
  1027. .id_table = hptiop_id_table,
  1028. .probe = hptiop_probe,
  1029. .remove = hptiop_remove,
  1030. .shutdown = hptiop_shutdown,
  1031. };
  1032. static int __init hptiop_module_init(void)
  1033. {
  1034. printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
  1035. return pci_register_driver(&hptiop_pci_driver);
  1036. }
  1037. static void __exit hptiop_module_exit(void)
  1038. {
  1039. pci_unregister_driver(&hptiop_pci_driver);
  1040. }
  1041. module_init(hptiop_module_init);
  1042. module_exit(hptiop_module_exit);
  1043. MODULE_LICENSE("GPL");