hptiop.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493
  1. /*
  2. * HighPoint RR3xxx controller driver for Linux
  3. * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  15. *
  16. * For more information, visit http://www.highpoint-tech.com
  17. */
  18. #include <linux/config.h>
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/string.h>
  22. #include <linux/kernel.h>
  23. #include <linux/pci.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/errno.h>
  26. #include <linux/delay.h>
  27. #include <linux/timer.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/hdreg.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/io.h>
  32. #include <asm/div64.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <scsi/scsi_device.h>
  35. #include <scsi/scsi.h>
  36. #include <scsi/scsi_tcq.h>
  37. #include <scsi/scsi_host.h>
  38. #include "hptiop.h"
  39. MODULE_AUTHOR("HighPoint Technologies, Inc.");
  40. MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
  41. static char driver_name[] = "hptiop";
  42. static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
  43. static const char driver_ver[] = "v1.0 (060426)";
  44. static DEFINE_SPINLOCK(hptiop_hba_list_lock);
  45. static LIST_HEAD(hptiop_hba_list);
  46. static int hptiop_cdev_major = -1;
  47. static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
  48. static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
  49. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
  50. static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
  51. {
  52. readl(&iop->outbound_intstatus);
  53. }
  54. static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
  55. {
  56. u32 req = 0;
  57. int i;
  58. for (i = 0; i < millisec; i++) {
  59. req = readl(&iop->inbound_queue);
  60. if (req != IOPMU_QUEUE_EMPTY)
  61. break;
  62. msleep(1);
  63. }
  64. if (req != IOPMU_QUEUE_EMPTY) {
  65. writel(req, &iop->outbound_queue);
  66. hptiop_pci_posting_flush(iop);
  67. return 0;
  68. }
  69. return -1;
  70. }
  71. static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
  72. {
  73. if ((tag & IOPMU_QUEUE_MASK_HOST_BITS) == IOPMU_QUEUE_ADDR_HOST_BIT)
  74. return hptiop_host_request_callback(hba,
  75. tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
  76. else
  77. return hptiop_iop_request_callback(hba, tag);
  78. }
  79. static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
  80. {
  81. u32 req;
  82. while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
  83. if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  84. hptiop_request_callback(hba, req);
  85. else {
  86. struct hpt_iop_request_header __iomem * p;
  87. p = (struct hpt_iop_request_header __iomem *)
  88. ((char __iomem *)hba->iop + req);
  89. if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  90. if (readl(&p->context))
  91. hptiop_request_callback(hba, req);
  92. else
  93. writel(1, &p->context);
  94. }
  95. else
  96. hptiop_request_callback(hba, req);
  97. }
  98. }
  99. }
  100. static int __iop_intr(struct hptiop_hba *hba)
  101. {
  102. struct hpt_iopmu __iomem *iop = hba->iop;
  103. u32 status;
  104. int ret = 0;
  105. status = readl(&iop->outbound_intstatus);
  106. if (status & IOPMU_OUTBOUND_INT_MSG0) {
  107. u32 msg = readl(&iop->outbound_msgaddr0);
  108. dprintk("received outbound msg %x\n", msg);
  109. writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
  110. hptiop_message_callback(hba, msg);
  111. ret = 1;
  112. }
  113. if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  114. hptiop_drain_outbound_queue(hba);
  115. ret = 1;
  116. }
  117. return ret;
  118. }
  119. static int iop_send_sync_request(struct hptiop_hba *hba,
  120. void __iomem *_req, u32 millisec)
  121. {
  122. struct hpt_iop_request_header __iomem *req = _req;
  123. u32 i;
  124. writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
  125. &req->flags);
  126. writel(0, &req->context);
  127. writel((unsigned long)req - (unsigned long)hba->iop,
  128. &hba->iop->inbound_queue);
  129. hptiop_pci_posting_flush(hba->iop);
  130. for (i = 0; i < millisec; i++) {
  131. __iop_intr(hba);
  132. if (readl(&req->context))
  133. return 0;
  134. msleep(1);
  135. }
  136. return -1;
  137. }
  138. static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
  139. {
  140. u32 i;
  141. hba->msg_done = 0;
  142. writel(msg, &hba->iop->inbound_msgaddr0);
  143. hptiop_pci_posting_flush(hba->iop);
  144. for (i = 0; i < millisec; i++) {
  145. spin_lock_irq(hba->host->host_lock);
  146. __iop_intr(hba);
  147. spin_unlock_irq(hba->host->host_lock);
  148. if (hba->msg_done)
  149. break;
  150. msleep(1);
  151. }
  152. return hba->msg_done? 0 : -1;
  153. }
  154. static int iop_get_config(struct hptiop_hba *hba,
  155. struct hpt_iop_request_get_config *config)
  156. {
  157. u32 req32;
  158. struct hpt_iop_request_get_config __iomem *req;
  159. req32 = readl(&hba->iop->inbound_queue);
  160. if (req32 == IOPMU_QUEUE_EMPTY)
  161. return -1;
  162. req = (struct hpt_iop_request_get_config __iomem *)
  163. ((unsigned long)hba->iop + req32);
  164. writel(0, &req->header.flags);
  165. writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
  166. writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
  167. writel(IOP_RESULT_PENDING, &req->header.result);
  168. if (iop_send_sync_request(hba, req, 20000)) {
  169. dprintk("Get config send cmd failed\n");
  170. return -1;
  171. }
  172. memcpy_fromio(config, req, sizeof(*config));
  173. writel(req32, &hba->iop->outbound_queue);
  174. return 0;
  175. }
  176. static int iop_set_config(struct hptiop_hba *hba,
  177. struct hpt_iop_request_set_config *config)
  178. {
  179. u32 req32;
  180. struct hpt_iop_request_set_config __iomem *req;
  181. req32 = readl(&hba->iop->inbound_queue);
  182. if (req32 == IOPMU_QUEUE_EMPTY)
  183. return -1;
  184. req = (struct hpt_iop_request_set_config __iomem *)
  185. ((unsigned long)hba->iop + req32);
  186. memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
  187. (u8 *)config + sizeof(struct hpt_iop_request_header),
  188. sizeof(struct hpt_iop_request_set_config) -
  189. sizeof(struct hpt_iop_request_header));
  190. writel(0, &req->header.flags);
  191. writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
  192. writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
  193. writel(IOP_RESULT_PENDING, &req->header.result);
  194. if (iop_send_sync_request(hba, req, 20000)) {
  195. dprintk("Set config send cmd failed\n");
  196. return -1;
  197. }
  198. writel(req32, &hba->iop->outbound_queue);
  199. return 0;
  200. }
  201. static int hptiop_initialize_iop(struct hptiop_hba *hba)
  202. {
  203. struct hpt_iopmu __iomem *iop = hba->iop;
  204. /* enable interrupts */
  205. writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
  206. &iop->outbound_intmask);
  207. hba->initialized = 1;
  208. /* start background tasks */
  209. if (iop_send_sync_msg(hba,
  210. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  211. printk(KERN_ERR "scsi%d: fail to start background task\n",
  212. hba->host->host_no);
  213. return -1;
  214. }
  215. return 0;
  216. }
  217. static int hptiop_map_pci_bar(struct hptiop_hba *hba)
  218. {
  219. u32 mem_base_phy, length;
  220. void __iomem *mem_base_virt;
  221. struct pci_dev *pcidev = hba->pcidev;
  222. if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
  223. printk(KERN_ERR "scsi%d: pci resource invalid\n",
  224. hba->host->host_no);
  225. return -1;
  226. }
  227. mem_base_phy = pci_resource_start(pcidev, 0);
  228. length = pci_resource_len(pcidev, 0);
  229. mem_base_virt = ioremap(mem_base_phy, length);
  230. if (!mem_base_virt) {
  231. printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
  232. hba->host->host_no);
  233. return -1;
  234. }
  235. hba->iop = mem_base_virt;
  236. dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
  237. return 0;
  238. }
  239. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
  240. {
  241. dprintk("iop message 0x%x\n", msg);
  242. if (!hba->initialized)
  243. return;
  244. if (msg == IOPMU_INBOUND_MSG0_RESET) {
  245. atomic_set(&hba->resetting, 0);
  246. wake_up(&hba->reset_wq);
  247. }
  248. else if (msg <= IOPMU_INBOUND_MSG0_MAX)
  249. hba->msg_done = 1;
  250. }
  251. static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
  252. {
  253. struct hptiop_request *ret;
  254. dprintk("get_req : req=%p\n", hba->req_list);
  255. ret = hba->req_list;
  256. if (ret)
  257. hba->req_list = ret->next;
  258. return ret;
  259. }
  260. static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
  261. {
  262. dprintk("free_req(%d, %p)\n", req->index, req);
  263. req->next = hba->req_list;
  264. hba->req_list = req;
  265. }
  266. static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
  267. {
  268. struct hpt_iop_request_scsi_command *req;
  269. struct scsi_cmnd *scp;
  270. req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt;
  271. dprintk("hptiop_host_request_callback: req=%p, type=%d, "
  272. "result=%d, context=0x%x tag=%d\n",
  273. req, req->header.type, req->header.result,
  274. req->header.context, tag);
  275. BUG_ON(!req->header.result);
  276. BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
  277. scp = hba->reqs[tag].scp;
  278. if (HPT_SCP(scp)->mapped) {
  279. if (scp->use_sg)
  280. pci_unmap_sg(hba->pcidev,
  281. (struct scatterlist *)scp->request_buffer,
  282. scp->use_sg,
  283. scp->sc_data_direction
  284. );
  285. else
  286. pci_unmap_single(hba->pcidev,
  287. HPT_SCP(scp)->dma_handle,
  288. scp->request_bufflen,
  289. scp->sc_data_direction
  290. );
  291. }
  292. switch (le32_to_cpu(req->header.result)) {
  293. case IOP_RESULT_SUCCESS:
  294. scp->result = (DID_OK<<16);
  295. break;
  296. case IOP_RESULT_BAD_TARGET:
  297. scp->result = (DID_BAD_TARGET<<16);
  298. break;
  299. case IOP_RESULT_BUSY:
  300. scp->result = (DID_BUS_BUSY<<16);
  301. break;
  302. case IOP_RESULT_RESET:
  303. scp->result = (DID_RESET<<16);
  304. break;
  305. case IOP_RESULT_FAIL:
  306. scp->result = (DID_ERROR<<16);
  307. break;
  308. case IOP_RESULT_INVALID_REQUEST:
  309. scp->result = (DID_ABORT<<16);
  310. break;
  311. case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
  312. scp->result = SAM_STAT_CHECK_CONDITION;
  313. memset(&scp->sense_buffer,
  314. 0, sizeof(scp->sense_buffer));
  315. memcpy(&scp->sense_buffer,
  316. &req->sg_list, le32_to_cpu(req->dataxfer_length));
  317. break;
  318. default:
  319. scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
  320. (DID_ABORT<<16);
  321. break;
  322. }
  323. dprintk("scsi_done(%p)\n", scp);
  324. scp->scsi_done(scp);
  325. free_req(hba, &hba->reqs[tag]);
  326. }
  327. void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
  328. {
  329. struct hpt_iop_request_header __iomem *req;
  330. struct hpt_iop_request_ioctl_command __iomem *p;
  331. struct hpt_ioctl_k *arg;
  332. req = (struct hpt_iop_request_header __iomem *)
  333. ((unsigned long)hba->iop + tag);
  334. dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
  335. "result=%d, context=0x%x tag=%d\n",
  336. req, readl(&req->type), readl(&req->result),
  337. readl(&req->context), tag);
  338. BUG_ON(!readl(&req->result));
  339. BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
  340. p = (struct hpt_iop_request_ioctl_command __iomem *)req;
  341. arg = (struct hpt_ioctl_k *)(unsigned long)
  342. (readl(&req->context) |
  343. ((u64)readl(&req->context_hi32)<<32));
  344. if (readl(&req->result) == IOP_RESULT_SUCCESS) {
  345. arg->result = HPT_IOCTL_RESULT_OK;
  346. if (arg->outbuf_size)
  347. memcpy_fromio(arg->outbuf,
  348. &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
  349. arg->outbuf_size);
  350. if (arg->bytes_returned)
  351. *arg->bytes_returned = arg->outbuf_size;
  352. }
  353. else
  354. arg->result = HPT_IOCTL_RESULT_FAILED;
  355. arg->done(arg);
  356. writel(tag, &hba->iop->outbound_queue);
  357. }
  358. static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs)
  359. {
  360. struct hptiop_hba *hba = dev_id;
  361. int handled;
  362. unsigned long flags;
  363. spin_lock_irqsave(hba->host->host_lock, flags);
  364. handled = __iop_intr(hba);
  365. spin_unlock_irqrestore(hba->host->host_lock, flags);
  366. return handled;
  367. }
  368. static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
  369. {
  370. struct Scsi_Host *host = scp->device->host;
  371. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  372. struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
  373. /*
  374. * though we'll not get non-use_sg fields anymore,
  375. * keep use_sg checking anyway
  376. */
  377. if (scp->use_sg) {
  378. int idx;
  379. HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
  380. sglist, scp->use_sg,
  381. scp->sc_data_direction);
  382. HPT_SCP(scp)->mapped = 1;
  383. BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
  384. for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
  385. psg[idx].pci_address =
  386. cpu_to_le64(sg_dma_address(&sglist[idx]));
  387. psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
  388. psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
  389. cpu_to_le32(1) : 0;
  390. }
  391. return HPT_SCP(scp)->sgcnt;
  392. } else {
  393. HPT_SCP(scp)->dma_handle = pci_map_single(
  394. hba->pcidev,
  395. scp->request_buffer,
  396. scp->request_bufflen,
  397. scp->sc_data_direction
  398. );
  399. HPT_SCP(scp)->mapped = 1;
  400. psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
  401. psg->size = cpu_to_le32(scp->request_bufflen);
  402. psg->eot = cpu_to_le32(1);
  403. return 1;
  404. }
  405. }
  406. static int hptiop_queuecommand(struct scsi_cmnd *scp,
  407. void (*done)(struct scsi_cmnd *))
  408. {
  409. struct Scsi_Host *host = scp->device->host;
  410. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  411. struct hpt_iop_request_scsi_command *req;
  412. int sg_count = 0;
  413. struct hptiop_request *_req;
  414. BUG_ON(!done);
  415. scp->scsi_done = done;
  416. _req = get_req(hba);
  417. if (_req == NULL) {
  418. dprintk("hptiop_queuecmd : no free req\n");
  419. return SCSI_MLQUEUE_HOST_BUSY;
  420. }
  421. _req->scp = scp;
  422. dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
  423. "req_index=%d, req=%p\n",
  424. scp,
  425. host->host_no, scp->device->channel,
  426. scp->device->id, scp->device->lun,
  427. *((u32 *)&scp->cmnd),
  428. *((u32 *)&scp->cmnd + 1),
  429. *((u32 *)&scp->cmnd + 2),
  430. _req->index, _req->req_virt);
  431. scp->result = 0;
  432. if (scp->device->channel || scp->device->lun ||
  433. scp->device->id > hba->max_devices) {
  434. scp->result = DID_BAD_TARGET << 16;
  435. free_req(hba, _req);
  436. goto cmd_done;
  437. }
  438. req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
  439. /* build S/G table */
  440. if (scp->request_bufflen)
  441. sg_count = hptiop_buildsgl(scp, req->sg_list);
  442. else
  443. HPT_SCP(scp)->mapped = 0;
  444. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  445. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
  446. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  447. req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
  448. (u32)_req->index);
  449. req->header.context_hi32 = 0;
  450. req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
  451. req->channel = scp->device->channel;
  452. req->target = scp->device->id;
  453. req->lun = scp->device->lun;
  454. req->header.size = cpu_to_le32(
  455. sizeof(struct hpt_iop_request_scsi_command)
  456. - sizeof(struct hpt_iopsg)
  457. + sg_count * sizeof(struct hpt_iopsg));
  458. memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
  459. writel(IOPMU_QUEUE_ADDR_HOST_BIT | _req->req_shifted_phy,
  460. &hba->iop->inbound_queue);
  461. return 0;
  462. cmd_done:
  463. dprintk("scsi_done(scp=%p)\n", scp);
  464. scp->scsi_done(scp);
  465. return 0;
  466. }
  467. static const char *hptiop_info(struct Scsi_Host *host)
  468. {
  469. return driver_name_long;
  470. }
  471. static int hptiop_reset_hba(struct hptiop_hba *hba)
  472. {
  473. if (atomic_xchg(&hba->resetting, 1) == 0) {
  474. atomic_inc(&hba->reset_count);
  475. writel(IOPMU_INBOUND_MSG0_RESET,
  476. &hba->iop->outbound_msgaddr0);
  477. hptiop_pci_posting_flush(hba->iop);
  478. }
  479. wait_event_timeout(hba->reset_wq,
  480. atomic_read(&hba->resetting) == 0, 60 * HZ);
  481. if (atomic_read(&hba->resetting)) {
  482. /* IOP is in unkown state, abort reset */
  483. printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
  484. return -1;
  485. }
  486. if (iop_send_sync_msg(hba,
  487. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  488. dprintk("scsi%d: fail to start background task\n",
  489. hba->host->host_no);
  490. }
  491. return 0;
  492. }
  493. static int hptiop_reset(struct scsi_cmnd *scp)
  494. {
  495. struct Scsi_Host * host = scp->device->host;
  496. struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
  497. printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
  498. scp->device->host->host_no, scp->device->channel,
  499. scp->device->id, scp);
  500. return hptiop_reset_hba(hba)? FAILED : SUCCESS;
  501. }
  502. static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
  503. int queue_depth)
  504. {
  505. if(queue_depth > 256)
  506. queue_depth = 256;
  507. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  508. return queue_depth;
  509. }
  510. struct hptiop_getinfo {
  511. char __user *buffer;
  512. loff_t buflength;
  513. loff_t bufoffset;
  514. loff_t buffillen;
  515. loff_t filpos;
  516. };
  517. static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
  518. char *data, int datalen)
  519. {
  520. if (pinfo->filpos < pinfo->bufoffset) {
  521. if (pinfo->filpos + datalen <= pinfo->bufoffset) {
  522. pinfo->filpos += datalen;
  523. return;
  524. } else {
  525. data += (pinfo->bufoffset - pinfo->filpos);
  526. datalen -= (pinfo->bufoffset - pinfo->filpos);
  527. pinfo->filpos = pinfo->bufoffset;
  528. }
  529. }
  530. pinfo->filpos += datalen;
  531. if (pinfo->buffillen == pinfo->buflength)
  532. return;
  533. if (pinfo->buflength - pinfo->buffillen < datalen)
  534. datalen = pinfo->buflength - pinfo->buffillen;
  535. if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
  536. return;
  537. pinfo->buffillen += datalen;
  538. }
  539. static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
  540. {
  541. va_list args;
  542. char buf[128];
  543. int len;
  544. va_start(args, fmt);
  545. len = vsnprintf(buf, sizeof(buf), fmt, args);
  546. va_end(args);
  547. hptiop_copy_mem_info(pinfo, buf, len);
  548. return len;
  549. }
  550. static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
  551. {
  552. arg->done = NULL;
  553. wake_up(&arg->hba->ioctl_wq);
  554. }
  555. static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
  556. {
  557. struct hptiop_hba *hba = arg->hba;
  558. u32 val;
  559. struct hpt_iop_request_ioctl_command __iomem *req;
  560. int ioctl_retry = 0;
  561. dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
  562. /*
  563. * check (in + out) buff size from application.
  564. * outbuf must be dword aligned.
  565. */
  566. if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
  567. hba->max_request_size
  568. - sizeof(struct hpt_iop_request_header)
  569. - 4 * sizeof(u32)) {
  570. dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
  571. hba->host->host_no,
  572. arg->inbuf_size, arg->outbuf_size);
  573. arg->result = HPT_IOCTL_RESULT_FAILED;
  574. return;
  575. }
  576. retry:
  577. spin_lock_irq(hba->host->host_lock);
  578. val = readl(&hba->iop->inbound_queue);
  579. if (val == IOPMU_QUEUE_EMPTY) {
  580. spin_unlock_irq(hba->host->host_lock);
  581. dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
  582. arg->result = -1;
  583. return;
  584. }
  585. req = (struct hpt_iop_request_ioctl_command __iomem *)
  586. ((unsigned long)hba->iop + val);
  587. writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
  588. &req->ioctl_code);
  589. writel(arg->inbuf_size, &req->inbuf_size);
  590. writel(arg->outbuf_size, &req->outbuf_size);
  591. /*
  592. * use the buffer on the IOP local memory first, then copy it
  593. * back to host.
  594. * the caller's request buffer shoudl be little-endian.
  595. */
  596. if (arg->inbuf_size)
  597. memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
  598. /* correct the controller ID for IOP */
  599. if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
  600. arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
  601. arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
  602. && arg->inbuf_size >= sizeof(u32))
  603. writel(0, req->buf);
  604. writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
  605. writel(0, &req->header.flags);
  606. writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
  607. + arg->inbuf_size, &req->header.size);
  608. writel((u32)(unsigned long)arg, &req->header.context);
  609. writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
  610. &req->header.context_hi32);
  611. writel(IOP_RESULT_PENDING, &req->header.result);
  612. arg->result = HPT_IOCTL_RESULT_FAILED;
  613. arg->done = hptiop_ioctl_done;
  614. writel(val, &hba->iop->inbound_queue);
  615. hptiop_pci_posting_flush(hba->iop);
  616. spin_unlock_irq(hba->host->host_lock);
  617. wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
  618. if (arg->done != NULL) {
  619. hptiop_reset_hba(hba);
  620. if (ioctl_retry++ < 3)
  621. goto retry;
  622. }
  623. dprintk("hpt_iop_ioctl %x result %d\n",
  624. arg->ioctl_code, arg->result);
  625. }
  626. static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
  627. u32 insize, void *outbuf, u32 outsize)
  628. {
  629. struct hpt_ioctl_k arg;
  630. arg.hba = hba;
  631. arg.ioctl_code = code;
  632. arg.inbuf = inbuf;
  633. arg.outbuf = outbuf;
  634. arg.inbuf_size = insize;
  635. arg.outbuf_size = outsize;
  636. arg.bytes_returned = NULL;
  637. hptiop_do_ioctl(&arg);
  638. return arg.result;
  639. }
  640. static inline int hpt_id_valid(__le32 id)
  641. {
  642. return id != 0 && id != cpu_to_le32(0xffffffff);
  643. }
  644. static int hptiop_get_controller_info(struct hptiop_hba *hba,
  645. struct hpt_controller_info *pinfo)
  646. {
  647. int id = 0;
  648. return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
  649. &id, sizeof(int), pinfo, sizeof(*pinfo));
  650. }
  651. static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
  652. struct hpt_channel_info *pinfo)
  653. {
  654. u32 ids[2];
  655. ids[0] = 0;
  656. ids[1] = bus;
  657. return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
  658. ids, sizeof(ids), pinfo, sizeof(*pinfo));
  659. }
  660. static int hptiop_get_logical_devices(struct hptiop_hba *hba,
  661. __le32 *pids, int maxcount)
  662. {
  663. int i;
  664. u32 count = maxcount - 1;
  665. if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
  666. &count, sizeof(u32),
  667. pids, sizeof(u32) * maxcount))
  668. return -1;
  669. maxcount = le32_to_cpu(pids[0]);
  670. for (i = 0; i < maxcount; i++)
  671. pids[i] = pids[i+1];
  672. return maxcount;
  673. }
  674. static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
  675. struct hpt_logical_device_info_v3 *pinfo)
  676. {
  677. return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
  678. &id, sizeof(u32),
  679. pinfo, sizeof(*pinfo));
  680. }
  681. static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
  682. {
  683. static char s[64];
  684. u32 flags = le32_to_cpu(devinfo->u.array.flags);
  685. u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
  686. u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
  687. if (flags & ARRAY_FLAG_DISABLED)
  688. return "Disabled";
  689. else if (flags & ARRAY_FLAG_TRANSFORMING)
  690. sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
  691. trans_prog / 100,
  692. trans_prog % 100,
  693. (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
  694. ", Critical" : "",
  695. ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
  696. !(flags & ARRAY_FLAG_REBUILDING) &&
  697. !(flags & ARRAY_FLAG_INITIALIZING))?
  698. ", Unintialized" : "");
  699. else if ((flags & ARRAY_FLAG_BROKEN) &&
  700. devinfo->u.array.array_type != AT_RAID6)
  701. return "Critical";
  702. else if (flags & ARRAY_FLAG_REBUILDING)
  703. sprintf(s,
  704. (flags & ARRAY_FLAG_NEEDINITIALIZING)?
  705. "%sBackground initializing %d.%d%%" :
  706. "%sRebuilding %d.%d%%",
  707. (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
  708. reb_prog / 100,
  709. reb_prog % 100);
  710. else if (flags & ARRAY_FLAG_VERIFYING)
  711. sprintf(s, "%sVerifying %d.%d%%",
  712. (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
  713. reb_prog / 100,
  714. reb_prog % 100);
  715. else if (flags & ARRAY_FLAG_INITIALIZING)
  716. sprintf(s, "%sForground initializing %d.%d%%",
  717. (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
  718. reb_prog / 100,
  719. reb_prog % 100);
  720. else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
  721. sprintf(s,"%s%s%s", "Need Expanding/Migrating",
  722. (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
  723. ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
  724. !(flags & ARRAY_FLAG_REBUILDING) &&
  725. !(flags & ARRAY_FLAG_INITIALIZING))?
  726. ", Unintialized" : "");
  727. else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
  728. !(flags & ARRAY_FLAG_REBUILDING) &&
  729. !(flags & ARRAY_FLAG_INITIALIZING))
  730. sprintf(s,"%sUninitialized",
  731. (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
  732. else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
  733. (flags & ARRAY_FLAG_BROKEN))
  734. return "Critical";
  735. else
  736. return "Normal";
  737. return s;
  738. }
  739. static void hptiop_dump_devinfo(struct hptiop_hba *hba,
  740. struct hptiop_getinfo *pinfo, __le32 id, int indent)
  741. {
  742. struct hpt_logical_device_info_v3 devinfo;
  743. int i;
  744. u64 capacity;
  745. for (i = 0; i < indent; i++)
  746. hptiop_copy_info(pinfo, "\t");
  747. if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
  748. hptiop_copy_info(pinfo, "unknown\n");
  749. return;
  750. }
  751. switch (devinfo.type) {
  752. case LDT_DEVICE: {
  753. struct hd_driveid *driveid;
  754. u32 flags = le32_to_cpu(devinfo.u.device.flags);
  755. driveid = (struct hd_driveid *)devinfo.u.device.ident;
  756. /* model[] is 40 chars long, but we just want 20 chars here */
  757. driveid->model[20] = 0;
  758. if (indent)
  759. if (flags & DEVICE_FLAG_DISABLED)
  760. hptiop_copy_info(pinfo,"Missing\n");
  761. else
  762. hptiop_copy_info(pinfo, "CH%d %s\n",
  763. devinfo.u.device.path_id + 1,
  764. driveid->model);
  765. else {
  766. capacity = le64_to_cpu(devinfo.capacity) * 512;
  767. do_div(capacity, 1000000);
  768. hptiop_copy_info(pinfo,
  769. "CH%d %s, %lluMB, %s %s%s%s%s\n",
  770. devinfo.u.device.path_id + 1,
  771. driveid->model,
  772. capacity,
  773. (flags & DEVICE_FLAG_DISABLED)?
  774. "Disabled" : "Normal",
  775. devinfo.u.device.read_ahead_enabled?
  776. "[RA]" : "",
  777. devinfo.u.device.write_cache_enabled?
  778. "[WC]" : "",
  779. devinfo.u.device.TCQ_enabled?
  780. "[TCQ]" : "",
  781. devinfo.u.device.NCQ_enabled?
  782. "[NCQ]" : ""
  783. );
  784. }
  785. break;
  786. }
  787. case LDT_ARRAY:
  788. if (devinfo.target_id != INVALID_TARGET_ID)
  789. hptiop_copy_info(pinfo, "[DISK %d_%d] ",
  790. devinfo.vbus_id, devinfo.target_id);
  791. capacity = le64_to_cpu(devinfo.capacity) * 512;
  792. do_div(capacity, 1000000);
  793. hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
  794. devinfo.u.array.name,
  795. devinfo.u.array.array_type==AT_RAID0? "RAID0" :
  796. devinfo.u.array.array_type==AT_RAID1? "RAID1" :
  797. devinfo.u.array.array_type==AT_RAID5? "RAID5" :
  798. devinfo.u.array.array_type==AT_RAID6? "RAID6" :
  799. devinfo.u.array.array_type==AT_JBOD? "JBOD" :
  800. "unknown",
  801. capacity,
  802. get_array_status(&devinfo));
  803. for (i = 0; i < devinfo.u.array.ndisk; i++) {
  804. if (hpt_id_valid(devinfo.u.array.members[i])) {
  805. if (cpu_to_le16(1<<i) &
  806. devinfo.u.array.critical_members)
  807. hptiop_copy_info(pinfo, "\t*");
  808. hptiop_dump_devinfo(hba, pinfo,
  809. devinfo.u.array.members[i], indent+1);
  810. }
  811. else
  812. hptiop_copy_info(pinfo, "\tMissing\n");
  813. }
  814. if (id == devinfo.u.array.transform_source) {
  815. hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
  816. hptiop_dump_devinfo(hba, pinfo,
  817. devinfo.u.array.transform_target, indent+1);
  818. }
  819. break;
  820. }
  821. }
  822. static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
  823. {
  824. return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
  825. }
  826. static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
  827. size_t count, loff_t *ppos)
  828. {
  829. struct hptiop_hba *hba = filp->private_data;
  830. struct hptiop_getinfo info;
  831. int i, j, ndev;
  832. struct hpt_controller_info con_info;
  833. struct hpt_channel_info chan_info;
  834. __le32 ids[32];
  835. info.buffer = buf;
  836. info.buflength = count;
  837. info.bufoffset = ppos ? *ppos : 0;
  838. info.filpos = 0;
  839. info.buffillen = 0;
  840. if (hptiop_get_controller_info(hba, &con_info))
  841. return -EIO;
  842. for (i = 0; i < con_info.num_buses; i++) {
  843. if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
  844. if (hpt_id_valid(chan_info.devices[0]))
  845. hptiop_dump_devinfo(hba, &info,
  846. chan_info.devices[0], 0);
  847. if (hpt_id_valid(chan_info.devices[1]))
  848. hptiop_dump_devinfo(hba, &info,
  849. chan_info.devices[1], 0);
  850. }
  851. }
  852. ndev = hptiop_get_logical_devices(hba, ids,
  853. sizeof(ids) / sizeof(ids[0]));
  854. /*
  855. * if hptiop_get_logical_devices fails, ndev==-1 and it just
  856. * output nothing here
  857. */
  858. for (j = 0; j < ndev; j++)
  859. hptiop_dump_devinfo(hba, &info, ids[j], 0);
  860. if (ppos)
  861. *ppos += info.buffillen;
  862. return info.buffillen;
  863. }
  864. static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
  865. unsigned int cmd, unsigned long arg)
  866. {
  867. struct hptiop_hba *hba = file->private_data;
  868. struct hpt_ioctl_u ioctl_u;
  869. struct hpt_ioctl_k ioctl_k;
  870. u32 bytes_returned;
  871. int err = -EINVAL;
  872. if (copy_from_user(&ioctl_u,
  873. (void __user *)arg, sizeof(struct hpt_ioctl_u)))
  874. return -EINVAL;
  875. if (ioctl_u.magic != HPT_IOCTL_MAGIC)
  876. return -EINVAL;
  877. ioctl_k.ioctl_code = ioctl_u.ioctl_code;
  878. ioctl_k.inbuf = NULL;
  879. ioctl_k.inbuf_size = ioctl_u.inbuf_size;
  880. ioctl_k.outbuf = NULL;
  881. ioctl_k.outbuf_size = ioctl_u.outbuf_size;
  882. ioctl_k.hba = hba;
  883. ioctl_k.bytes_returned = &bytes_returned;
  884. /* verify user buffer */
  885. if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
  886. ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
  887. (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
  888. ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
  889. (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
  890. ioctl_u.bytes_returned, sizeof(u32))) ||
  891. ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
  892. dprintk("scsi%d: got bad user address\n", hba->host->host_no);
  893. return -EINVAL;
  894. }
  895. /* map buffer to kernel. */
  896. if (ioctl_k.inbuf_size) {
  897. ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
  898. if (!ioctl_k.inbuf) {
  899. dprintk("scsi%d: fail to alloc inbuf\n",
  900. hba->host->host_no);
  901. err = -ENOMEM;
  902. goto err_exit;
  903. }
  904. if (copy_from_user(ioctl_k.inbuf,
  905. ioctl_u.inbuf, ioctl_k.inbuf_size)) {
  906. goto err_exit;
  907. }
  908. }
  909. if (ioctl_k.outbuf_size) {
  910. ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
  911. if (!ioctl_k.outbuf) {
  912. dprintk("scsi%d: fail to alloc outbuf\n",
  913. hba->host->host_no);
  914. err = -ENOMEM;
  915. goto err_exit;
  916. }
  917. }
  918. hptiop_do_ioctl(&ioctl_k);
  919. if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
  920. if (ioctl_k.outbuf_size &&
  921. copy_to_user(ioctl_u.outbuf,
  922. ioctl_k.outbuf, ioctl_k.outbuf_size))
  923. goto err_exit;
  924. if (ioctl_u.bytes_returned &&
  925. copy_to_user(ioctl_u.bytes_returned,
  926. &bytes_returned, sizeof(u32)))
  927. goto err_exit;
  928. err = 0;
  929. }
  930. err_exit:
  931. kfree(ioctl_k.inbuf);
  932. kfree(ioctl_k.outbuf);
  933. return err;
  934. }
  935. static int hptiop_cdev_open(struct inode *inode, struct file *file)
  936. {
  937. struct hptiop_hba *hba;
  938. unsigned i = 0, minor = iminor(inode);
  939. int ret = -ENODEV;
  940. spin_lock(&hptiop_hba_list_lock);
  941. list_for_each_entry(hba, &hptiop_hba_list, link) {
  942. if (i == minor) {
  943. file->private_data = hba;
  944. ret = 0;
  945. goto out;
  946. }
  947. i++;
  948. }
  949. out:
  950. spin_unlock(&hptiop_hba_list_lock);
  951. return ret;
  952. }
  953. static struct file_operations hptiop_cdev_fops = {
  954. .owner = THIS_MODULE,
  955. .read = hptiop_cdev_read,
  956. .ioctl = hptiop_cdev_ioctl,
  957. .open = hptiop_cdev_open,
  958. };
  959. static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
  960. {
  961. struct Scsi_Host *host = class_to_shost(class_dev);
  962. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  963. return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
  964. hba->firmware_version >> 24,
  965. (hba->firmware_version >> 16) & 0xff,
  966. (hba->firmware_version >> 8) & 0xff,
  967. hba->firmware_version & 0xff);
  968. }
  969. static struct class_device_attribute hptiop_attr_version = {
  970. .attr = {
  971. .name = "driver-version",
  972. .mode = S_IRUGO,
  973. },
  974. .show = hptiop_show_version,
  975. };
  976. static struct class_device_attribute hptiop_attr_fw_version = {
  977. .attr = {
  978. .name = "firmware-version",
  979. .mode = S_IRUGO,
  980. },
  981. .show = hptiop_show_fw_version,
  982. };
  983. static struct class_device_attribute *hptiop_attrs[] = {
  984. &hptiop_attr_version,
  985. &hptiop_attr_fw_version,
  986. NULL
  987. };
  988. static struct scsi_host_template driver_template = {
  989. .module = THIS_MODULE,
  990. .name = driver_name,
  991. .queuecommand = hptiop_queuecommand,
  992. .eh_device_reset_handler = hptiop_reset,
  993. .eh_bus_reset_handler = hptiop_reset,
  994. .info = hptiop_info,
  995. .unchecked_isa_dma = 0,
  996. .emulated = 0,
  997. .use_clustering = ENABLE_CLUSTERING,
  998. .proc_name = driver_name,
  999. .shost_attrs = hptiop_attrs,
  1000. .this_id = -1,
  1001. .change_queue_depth = hptiop_adjust_disk_queue_depth,
  1002. };
  1003. static int __devinit hptiop_probe(struct pci_dev *pcidev,
  1004. const struct pci_device_id *id)
  1005. {
  1006. struct Scsi_Host *host = NULL;
  1007. struct hptiop_hba *hba;
  1008. struct hpt_iop_request_get_config iop_config;
  1009. struct hpt_iop_request_set_config set_config;
  1010. dma_addr_t start_phy;
  1011. void *start_virt;
  1012. u32 offset, i, req_size;
  1013. dprintk("hptiop_probe(%p)\n", pcidev);
  1014. if (pci_enable_device(pcidev)) {
  1015. printk(KERN_ERR "hptiop: fail to enable pci device\n");
  1016. return -ENODEV;
  1017. }
  1018. printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
  1019. pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
  1020. pcidev->irq);
  1021. pci_set_master(pcidev);
  1022. /* Enable 64bit DMA if possible */
  1023. if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
  1024. if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
  1025. printk(KERN_ERR "hptiop: fail to set dma_mask\n");
  1026. goto disable_pci_device;
  1027. }
  1028. }
  1029. if (pci_request_regions(pcidev, driver_name)) {
  1030. printk(KERN_ERR "hptiop: pci_request_regions failed\n");
  1031. goto disable_pci_device;
  1032. }
  1033. host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
  1034. if (!host) {
  1035. printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
  1036. goto free_pci_regions;
  1037. }
  1038. hba = (struct hptiop_hba *)host->hostdata;
  1039. hba->pcidev = pcidev;
  1040. hba->host = host;
  1041. hba->initialized = 0;
  1042. atomic_set(&hba->resetting, 0);
  1043. atomic_set(&hba->reset_count, 0);
  1044. init_waitqueue_head(&hba->reset_wq);
  1045. init_waitqueue_head(&hba->ioctl_wq);
  1046. host->max_lun = 1;
  1047. host->max_channel = 0;
  1048. host->io_port = 0;
  1049. host->n_io_port = 0;
  1050. host->irq = pcidev->irq;
  1051. if (hptiop_map_pci_bar(hba))
  1052. goto free_scsi_host;
  1053. if (iop_wait_ready(hba->iop, 20000)) {
  1054. printk(KERN_ERR "scsi%d: firmware not ready\n",
  1055. hba->host->host_no);
  1056. goto unmap_pci_bar;
  1057. }
  1058. if (iop_get_config(hba, &iop_config)) {
  1059. printk(KERN_ERR "scsi%d: get config failed\n",
  1060. hba->host->host_no);
  1061. goto unmap_pci_bar;
  1062. }
  1063. hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
  1064. HPTIOP_MAX_REQUESTS);
  1065. hba->max_devices = le32_to_cpu(iop_config.max_devices);
  1066. hba->max_request_size = le32_to_cpu(iop_config.request_size);
  1067. hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
  1068. hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
  1069. hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
  1070. host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
  1071. host->max_id = le32_to_cpu(iop_config.max_devices);
  1072. host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
  1073. host->can_queue = le32_to_cpu(iop_config.max_requests);
  1074. host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
  1075. host->max_cmd_len = 16;
  1076. set_config.vbus_id = cpu_to_le32(host->host_no);
  1077. set_config.iop_id = cpu_to_le32(host->host_no);
  1078. if (iop_set_config(hba, &set_config)) {
  1079. printk(KERN_ERR "scsi%d: set config failed\n",
  1080. hba->host->host_no);
  1081. goto unmap_pci_bar;
  1082. }
  1083. if (scsi_add_host(host, &pcidev->dev)) {
  1084. printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
  1085. hba->host->host_no);
  1086. goto unmap_pci_bar;
  1087. }
  1088. pci_set_drvdata(pcidev, host);
  1089. if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
  1090. driver_name, hba)) {
  1091. printk(KERN_ERR "scsi%d: request irq %d failed\n",
  1092. hba->host->host_no, pcidev->irq);
  1093. goto remove_scsi_host;
  1094. }
  1095. /* Allocate request mem */
  1096. req_size = sizeof(struct hpt_iop_request_scsi_command)
  1097. + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
  1098. if ((req_size& 0x1f) != 0)
  1099. req_size = (req_size + 0x1f) & ~0x1f;
  1100. dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
  1101. hba->req_size = req_size;
  1102. start_virt = dma_alloc_coherent(&pcidev->dev,
  1103. hba->req_size*hba->max_requests + 0x20,
  1104. &start_phy, GFP_KERNEL);
  1105. if (!start_virt) {
  1106. printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
  1107. hba->host->host_no);
  1108. goto free_request_irq;
  1109. }
  1110. hba->dma_coherent = start_virt;
  1111. hba->dma_coherent_handle = start_phy;
  1112. if ((start_phy & 0x1f) != 0)
  1113. {
  1114. offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
  1115. start_phy += offset;
  1116. start_virt += offset;
  1117. }
  1118. hba->req_list = start_virt;
  1119. for (i = 0; i < hba->max_requests; i++) {
  1120. hba->reqs[i].next = NULL;
  1121. hba->reqs[i].req_virt = start_virt;
  1122. hba->reqs[i].req_shifted_phy = start_phy >> 5;
  1123. hba->reqs[i].index = i;
  1124. free_req(hba, &hba->reqs[i]);
  1125. start_virt = (char *)start_virt + hba->req_size;
  1126. start_phy = start_phy + hba->req_size;
  1127. }
  1128. /* Enable Interrupt and start background task */
  1129. if (hptiop_initialize_iop(hba))
  1130. goto free_request_mem;
  1131. spin_lock(&hptiop_hba_list_lock);
  1132. list_add_tail(&hba->link, &hptiop_hba_list);
  1133. spin_unlock(&hptiop_hba_list_lock);
  1134. scsi_scan_host(host);
  1135. dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
  1136. return 0;
  1137. free_request_mem:
  1138. dma_free_coherent(&hba->pcidev->dev,
  1139. hba->req_size*hba->max_requests + 0x20,
  1140. hba->dma_coherent, hba->dma_coherent_handle);
  1141. free_request_irq:
  1142. free_irq(hba->pcidev->irq, hba);
  1143. remove_scsi_host:
  1144. scsi_remove_host(host);
  1145. unmap_pci_bar:
  1146. iounmap(hba->iop);
  1147. free_pci_regions:
  1148. pci_release_regions(pcidev) ;
  1149. free_scsi_host:
  1150. scsi_host_put(host);
  1151. disable_pci_device:
  1152. pci_disable_device(pcidev);
  1153. dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
  1154. return -ENODEV;
  1155. }
  1156. static void hptiop_shutdown(struct pci_dev *pcidev)
  1157. {
  1158. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  1159. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  1160. struct hpt_iopmu __iomem *iop = hba->iop;
  1161. u32 int_mask;
  1162. dprintk("hptiop_shutdown(%p)\n", hba);
  1163. /* stop the iop */
  1164. if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
  1165. printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
  1166. hba->host->host_no);
  1167. /* disable all outbound interrupts */
  1168. int_mask = readl(&iop->outbound_intmask);
  1169. writel(int_mask |
  1170. IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
  1171. &iop->outbound_intmask);
  1172. hptiop_pci_posting_flush(iop);
  1173. }
  1174. static void hptiop_remove(struct pci_dev *pcidev)
  1175. {
  1176. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  1177. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  1178. dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
  1179. scsi_remove_host(host);
  1180. spin_lock(&hptiop_hba_list_lock);
  1181. list_del_init(&hba->link);
  1182. spin_unlock(&hptiop_hba_list_lock);
  1183. hptiop_shutdown(pcidev);
  1184. free_irq(hba->pcidev->irq, hba);
  1185. dma_free_coherent(&hba->pcidev->dev,
  1186. hba->req_size * hba->max_requests + 0x20,
  1187. hba->dma_coherent,
  1188. hba->dma_coherent_handle);
  1189. iounmap(hba->iop);
  1190. pci_release_regions(hba->pcidev);
  1191. pci_set_drvdata(hba->pcidev, NULL);
  1192. pci_disable_device(hba->pcidev);
  1193. scsi_host_put(host);
  1194. }
  1195. static struct pci_device_id hptiop_id_table[] = {
  1196. { PCI_DEVICE(0x1103, 0x3220) },
  1197. { PCI_DEVICE(0x1103, 0x3320) },
  1198. {},
  1199. };
  1200. MODULE_DEVICE_TABLE(pci, hptiop_id_table);
  1201. static struct pci_driver hptiop_pci_driver = {
  1202. .name = driver_name,
  1203. .id_table = hptiop_id_table,
  1204. .probe = hptiop_probe,
  1205. .remove = hptiop_remove,
  1206. .shutdown = hptiop_shutdown,
  1207. };
  1208. static int __init hptiop_module_init(void)
  1209. {
  1210. int error;
  1211. printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
  1212. error = pci_register_driver(&hptiop_pci_driver);
  1213. if (error < 0)
  1214. return error;
  1215. hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
  1216. if (hptiop_cdev_major < 0) {
  1217. printk(KERN_WARNING "unable to register hptiop device.\n");
  1218. return hptiop_cdev_major;
  1219. }
  1220. return 0;
  1221. }
  1222. static void __exit hptiop_module_exit(void)
  1223. {
  1224. dprintk("hptiop_module_exit\n");
  1225. unregister_chrdev(hptiop_cdev_major, "hptiop");
  1226. pci_unregister_driver(&hptiop_pci_driver);
  1227. }
  1228. module_init(hptiop_module_init);
  1229. module_exit(hptiop_module_exit);
  1230. MODULE_LICENSE("GPL");