arcmsr_hba.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Erich Chen
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/pci.h>
  60. #include <linux/aer.h>
  61. #include <asm/dma.h>
  62. #include <asm/io.h>
  63. #include <asm/system.h>
  64. #include <asm/uaccess.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi.h>
  67. #include <scsi/scsi_cmnd.h>
  68. #include <scsi/scsi_tcq.h>
  69. #include <scsi/scsi_device.h>
  70. #include <scsi/scsi_transport.h>
  71. #include <scsi/scsicam.h>
  72. #include "arcmsr.h"
  73. MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
  74. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  77. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  78. struct scsi_cmnd *cmd);
  79. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  80. static int arcmsr_abort(struct scsi_cmnd *);
  81. static int arcmsr_bus_reset(struct scsi_cmnd *);
  82. static int arcmsr_bios_param(struct scsi_device *sdev,
  83. struct block_device *bdev, sector_t capacity, int *info);
  84. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  85. void (*done) (struct scsi_cmnd *));
  86. static int arcmsr_probe(struct pci_dev *pdev,
  87. const struct pci_device_id *id);
  88. static void arcmsr_remove(struct pci_dev *pdev);
  89. static void arcmsr_shutdown(struct pci_dev *pdev);
  90. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  91. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  92. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  93. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  94. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  95. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  96. static const char *arcmsr_info(struct Scsi_Host *);
  97. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  98. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  99. int queue_depth)
  100. {
  101. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  102. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  103. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  104. return queue_depth;
  105. }
  106. static struct scsi_host_template arcmsr_scsi_host_template = {
  107. .module = THIS_MODULE,
  108. .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
  109. ARCMSR_DRIVER_VERSION,
  110. .info = arcmsr_info,
  111. .queuecommand = arcmsr_queue_command,
  112. .eh_abort_handler = arcmsr_abort,
  113. .eh_bus_reset_handler = arcmsr_bus_reset,
  114. .bios_param = arcmsr_bios_param,
  115. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  116. .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
  117. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  118. .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
  119. .max_sectors = ARCMSR_MAX_XFER_SECTORS,
  120. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  121. .use_clustering = ENABLE_CLUSTERING,
  122. .use_sg_chaining = ENABLE_SG_CHAINING,
  123. .shost_attrs = arcmsr_host_attrs,
  124. };
  125. #ifdef CONFIG_SCSI_ARCMSR_AER
  126. static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
  127. static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
  128. pci_channel_state_t state);
  129. static struct pci_error_handlers arcmsr_pci_error_handlers = {
  130. .error_detected = arcmsr_pci_error_detected,
  131. .slot_reset = arcmsr_pci_slot_reset,
  132. };
  133. #endif
  134. static struct pci_device_id arcmsr_device_id_table[] = {
  135. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  136. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  137. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  138. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  139. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  153. {0, 0}, /* Terminating entry */
  154. };
  155. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  156. static struct pci_driver arcmsr_pci_driver = {
  157. .name = "arcmsr",
  158. .id_table = arcmsr_device_id_table,
  159. .probe = arcmsr_probe,
  160. .remove = arcmsr_remove,
  161. .shutdown = arcmsr_shutdown,
  162. #ifdef CONFIG_SCSI_ARCMSR_AER
  163. .err_handler = &arcmsr_pci_error_handlers,
  164. #endif
  165. };
  166. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  167. {
  168. irqreturn_t handle_state;
  169. struct AdapterControlBlock *acb = dev_id;
  170. spin_lock(acb->host->host_lock);
  171. handle_state = arcmsr_interrupt(acb);
  172. spin_unlock(acb->host->host_lock);
  173. return handle_state;
  174. }
  175. static int arcmsr_bios_param(struct scsi_device *sdev,
  176. struct block_device *bdev, sector_t capacity, int *geom)
  177. {
  178. int ret, heads, sectors, cylinders, total_capacity;
  179. unsigned char *buffer;/* return copy of block device's partition table */
  180. buffer = scsi_bios_ptable(bdev);
  181. if (buffer) {
  182. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  183. kfree(buffer);
  184. if (ret != -1)
  185. return ret;
  186. }
  187. total_capacity = capacity;
  188. heads = 64;
  189. sectors = 32;
  190. cylinders = total_capacity / (heads * sectors);
  191. if (cylinders > 1024) {
  192. heads = 255;
  193. sectors = 63;
  194. cylinders = total_capacity / (heads * sectors);
  195. }
  196. geom[0] = heads;
  197. geom[1] = sectors;
  198. geom[2] = cylinders;
  199. return 0;
  200. }
  201. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  202. {
  203. struct pci_dev *pdev = acb->pdev;
  204. u16 dev_id;
  205. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  206. switch (dev_id) {
  207. case 0x1201 : {
  208. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  209. }
  210. break;
  211. default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
  212. }
  213. }
  214. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  215. {
  216. switch (acb->adapter_type) {
  217. case ACB_ADAPTER_TYPE_A: {
  218. struct pci_dev *pdev = acb->pdev;
  219. void *dma_coherent;
  220. dma_addr_t dma_coherent_handle, dma_addr;
  221. struct CommandControlBlock *ccb_tmp;
  222. uint32_t intmask_org;
  223. int i, j;
  224. acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  225. if (!acb->pmuA) {
  226. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
  227. acb->host->host_no);
  228. }
  229. dma_coherent = dma_alloc_coherent(&pdev->dev,
  230. ARCMSR_MAX_FREECCB_NUM *
  231. sizeof (struct CommandControlBlock) + 0x20,
  232. &dma_coherent_handle, GFP_KERNEL);
  233. if (!dma_coherent)
  234. return -ENOMEM;
  235. acb->dma_coherent = dma_coherent;
  236. acb->dma_coherent_handle = dma_coherent_handle;
  237. if (((unsigned long)dma_coherent & 0x1F)) {
  238. dma_coherent = dma_coherent +
  239. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  240. dma_coherent_handle = dma_coherent_handle +
  241. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  242. }
  243. dma_addr = dma_coherent_handle;
  244. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  245. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  246. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  247. ccb_tmp->acb = acb;
  248. acb->pccb_pool[i] = ccb_tmp;
  249. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  250. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  251. ccb_tmp++;
  252. }
  253. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  254. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  255. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  256. acb->devstate[i][j] = ARECA_RAID_GONE;
  257. /*
  258. ** here we need to tell iop 331 our ccb_tmp.HighPart
  259. ** if ccb_tmp.HighPart is not zero
  260. */
  261. intmask_org = arcmsr_disable_outbound_ints(acb);
  262. }
  263. break;
  264. case ACB_ADAPTER_TYPE_B: {
  265. struct pci_dev *pdev = acb->pdev;
  266. struct MessageUnit_B *reg;
  267. void __iomem *mem_base0, *mem_base1;
  268. void *dma_coherent;
  269. dma_addr_t dma_coherent_handle, dma_addr;
  270. uint32_t intmask_org;
  271. struct CommandControlBlock *ccb_tmp;
  272. int i, j;
  273. dma_coherent = dma_alloc_coherent(&pdev->dev,
  274. ((ARCMSR_MAX_FREECCB_NUM *
  275. sizeof(struct CommandControlBlock) + 0x20) +
  276. sizeof(struct MessageUnit_B)),
  277. &dma_coherent_handle, GFP_KERNEL);
  278. if (!dma_coherent)
  279. return -ENOMEM;
  280. acb->dma_coherent = dma_coherent;
  281. acb->dma_coherent_handle = dma_coherent_handle;
  282. if (((unsigned long)dma_coherent & 0x1F)) {
  283. dma_coherent = dma_coherent +
  284. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  285. dma_coherent_handle = dma_coherent_handle +
  286. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  287. }
  288. reg = (struct MessageUnit_B *)(dma_coherent +
  289. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  290. dma_addr = dma_coherent_handle;
  291. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  292. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  293. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  294. ccb_tmp->acb = acb;
  295. acb->pccb_pool[i] = ccb_tmp;
  296. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  297. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  298. ccb_tmp++;
  299. }
  300. reg = (struct MessageUnit_B *)(dma_coherent +
  301. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  302. acb->pmuB = reg;
  303. mem_base0 = ioremap(pci_resource_start(pdev, 0),
  304. pci_resource_len(pdev, 0));
  305. mem_base1 = ioremap(pci_resource_start(pdev, 2),
  306. pci_resource_len(pdev, 2));
  307. reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
  308. reg->drv2iop_doorbell_mask_reg = mem_base0 +
  309. ARCMSR_DRV2IOP_DOORBELL_MASK;
  310. reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
  311. reg->iop2drv_doorbell_mask_reg = mem_base0 +
  312. ARCMSR_IOP2DRV_DOORBELL_MASK;
  313. reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
  314. reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
  315. reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
  316. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  317. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  318. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  319. acb->devstate[i][j] = ARECA_RAID_GOOD;
  320. /*
  321. ** here we need to tell iop 331 our ccb_tmp.HighPart
  322. ** if ccb_tmp.HighPart is not zero
  323. */
  324. intmask_org = arcmsr_disable_outbound_ints(acb);
  325. }
  326. break;
  327. }
  328. return 0;
  329. }
  330. static int arcmsr_probe(struct pci_dev *pdev,
  331. const struct pci_device_id *id)
  332. {
  333. struct Scsi_Host *host;
  334. struct AdapterControlBlock *acb;
  335. uint8_t bus, dev_fun;
  336. int error;
  337. error = pci_enable_device(pdev);
  338. if (error)
  339. goto out;
  340. pci_set_master(pdev);
  341. host = scsi_host_alloc(&arcmsr_scsi_host_template,
  342. sizeof(struct AdapterControlBlock));
  343. if (!host) {
  344. error = -ENOMEM;
  345. goto out_disable_device;
  346. }
  347. acb = (struct AdapterControlBlock *)host->hostdata;
  348. memset(acb, 0, sizeof (struct AdapterControlBlock));
  349. error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
  350. if (error) {
  351. error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  352. if (error) {
  353. printk(KERN_WARNING
  354. "scsi%d: No suitable DMA mask available\n",
  355. host->host_no);
  356. goto out_host_put;
  357. }
  358. }
  359. bus = pdev->bus->number;
  360. dev_fun = pdev->devfn;
  361. acb->host = host;
  362. acb->pdev = pdev;
  363. host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
  364. host->max_lun = ARCMSR_MAX_TARGETLUN;
  365. host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
  366. host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
  367. host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
  368. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  369. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  370. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  371. host->unique_id = (bus << 8) | dev_fun;
  372. host->irq = pdev->irq;
  373. error = pci_request_regions(pdev, "arcmsr");
  374. if (error) {
  375. goto out_host_put;
  376. }
  377. arcmsr_define_adapter_type(acb);
  378. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  379. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  380. ACB_F_MESSAGE_WQBUFFER_READED);
  381. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  382. INIT_LIST_HEAD(&acb->ccb_free_list);
  383. error = arcmsr_alloc_ccb_pool(acb);
  384. if (error)
  385. goto out_release_regions;
  386. error = request_irq(pdev->irq, arcmsr_do_interrupt,
  387. IRQF_SHARED, "arcmsr", acb);
  388. if (error)
  389. goto out_free_ccb_pool;
  390. arcmsr_iop_init(acb);
  391. pci_set_drvdata(pdev, host);
  392. if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
  393. host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
  394. error = scsi_add_host(host, &pdev->dev);
  395. if (error)
  396. goto out_free_irq;
  397. error = arcmsr_alloc_sysfs_attr(acb);
  398. if (error)
  399. goto out_free_sysfs;
  400. scsi_scan_host(host);
  401. #ifdef CONFIG_SCSI_ARCMSR_AER
  402. pci_enable_pcie_error_reporting(pdev);
  403. #endif
  404. return 0;
  405. out_free_sysfs:
  406. out_free_irq:
  407. free_irq(pdev->irq, acb);
  408. out_free_ccb_pool:
  409. arcmsr_free_ccb_pool(acb);
  410. iounmap(acb->pmu);
  411. out_release_regions:
  412. pci_release_regions(pdev);
  413. out_host_put:
  414. scsi_host_put(host);
  415. out_disable_device:
  416. pci_disable_device(pdev);
  417. out:
  418. return error;
  419. }
  420. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  421. {
  422. struct MessageUnit_A __iomem *reg = acb->pmuA;
  423. uint32_t Index;
  424. uint8_t Retries = 0x00;
  425. do {
  426. for (Index = 0; Index < 100; Index++) {
  427. if (readl(&reg->outbound_intstatus) &
  428. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  429. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  430. &reg->outbound_intstatus);
  431. return 0x00;
  432. }
  433. msleep(10);
  434. }/*max 1 seconds*/
  435. } while (Retries++ < 20);/*max 20 sec*/
  436. return 0xff;
  437. }
  438. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  439. {
  440. struct MessageUnit_B *reg = acb->pmuB;
  441. uint32_t Index;
  442. uint8_t Retries = 0x00;
  443. do {
  444. for (Index = 0; Index < 100; Index++) {
  445. if (readl(reg->iop2drv_doorbell_reg)
  446. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  447. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
  448. , reg->iop2drv_doorbell_reg);
  449. return 0x00;
  450. }
  451. msleep(10);
  452. }/*max 1 seconds*/
  453. } while (Retries++ < 20);/*max 20 sec*/
  454. return 0xff;
  455. }
  456. static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  457. {
  458. struct MessageUnit_A __iomem *reg = acb->pmuA;
  459. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  460. if (arcmsr_hba_wait_msgint_ready(acb))
  461. printk(KERN_NOTICE
  462. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  463. , acb->host->host_no);
  464. }
  465. static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  466. {
  467. struct MessageUnit_B *reg = acb->pmuB;
  468. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
  469. if (arcmsr_hbb_wait_msgint_ready(acb))
  470. printk(KERN_NOTICE
  471. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  472. , acb->host->host_no);
  473. }
  474. static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  475. {
  476. switch (acb->adapter_type) {
  477. case ACB_ADAPTER_TYPE_A: {
  478. arcmsr_abort_hba_allcmd(acb);
  479. }
  480. break;
  481. case ACB_ADAPTER_TYPE_B: {
  482. arcmsr_abort_hbb_allcmd(acb);
  483. }
  484. }
  485. }
  486. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  487. {
  488. struct scsi_cmnd *pcmd = ccb->pcmd;
  489. scsi_dma_unmap(pcmd);
  490. }
  491. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
  492. {
  493. struct AdapterControlBlock *acb = ccb->acb;
  494. struct scsi_cmnd *pcmd = ccb->pcmd;
  495. arcmsr_pci_unmap_dma(ccb);
  496. if (stand_flag == 1)
  497. atomic_dec(&acb->ccboutstandingcount);
  498. ccb->startdone = ARCMSR_CCB_DONE;
  499. ccb->ccb_flags = 0;
  500. list_add_tail(&ccb->list, &acb->ccb_free_list);
  501. pcmd->scsi_done(pcmd);
  502. }
  503. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  504. {
  505. struct MessageUnit_A __iomem *reg = acb->pmuA;
  506. int retry_count = 30;
  507. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  508. do {
  509. if (!arcmsr_hba_wait_msgint_ready(acb))
  510. break;
  511. else {
  512. retry_count--;
  513. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  514. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  515. }
  516. } while (retry_count != 0);
  517. }
  518. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  519. {
  520. struct MessageUnit_B *reg = acb->pmuB;
  521. int retry_count = 30;
  522. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
  523. do {
  524. if (!arcmsr_hbb_wait_msgint_ready(acb))
  525. break;
  526. else {
  527. retry_count--;
  528. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  529. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  530. }
  531. } while (retry_count != 0);
  532. }
  533. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  534. {
  535. switch (acb->adapter_type) {
  536. case ACB_ADAPTER_TYPE_A: {
  537. arcmsr_flush_hba_cache(acb);
  538. }
  539. break;
  540. case ACB_ADAPTER_TYPE_B: {
  541. arcmsr_flush_hbb_cache(acb);
  542. }
  543. }
  544. }
  545. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  546. {
  547. struct scsi_cmnd *pcmd = ccb->pcmd;
  548. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  549. pcmd->result = DID_OK << 16;
  550. if (sensebuffer) {
  551. int sense_data_length =
  552. sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
  553. ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
  554. memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
  555. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  556. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  557. sensebuffer->Valid = 1;
  558. }
  559. }
  560. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  561. {
  562. u32 orig_mask = 0;
  563. switch (acb->adapter_type) {
  564. case ACB_ADAPTER_TYPE_A : {
  565. struct MessageUnit_A __iomem *reg = acb->pmuA;
  566. orig_mask = readl(&reg->outbound_intmask)|\
  567. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
  568. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  569. &reg->outbound_intmask);
  570. }
  571. break;
  572. case ACB_ADAPTER_TYPE_B : {
  573. struct MessageUnit_B *reg = acb->pmuB;
  574. orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
  575. (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  576. writel(0, reg->iop2drv_doorbell_mask_reg);
  577. }
  578. break;
  579. }
  580. return orig_mask;
  581. }
  582. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
  583. struct CommandControlBlock *ccb, uint32_t flag_ccb)
  584. {
  585. uint8_t id, lun;
  586. id = ccb->pcmd->device->id;
  587. lun = ccb->pcmd->device->lun;
  588. if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
  589. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  590. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  591. ccb->pcmd->result = DID_OK << 16;
  592. arcmsr_ccb_complete(ccb, 1);
  593. } else {
  594. switch (ccb->arcmsr_cdb.DeviceStatus) {
  595. case ARCMSR_DEV_SELECT_TIMEOUT: {
  596. acb->devstate[id][lun] = ARECA_RAID_GONE;
  597. ccb->pcmd->result = DID_NO_CONNECT << 16;
  598. arcmsr_ccb_complete(ccb, 1);
  599. }
  600. break;
  601. case ARCMSR_DEV_ABORTED:
  602. case ARCMSR_DEV_INIT_FAIL: {
  603. acb->devstate[id][lun] = ARECA_RAID_GONE;
  604. ccb->pcmd->result = DID_BAD_TARGET << 16;
  605. arcmsr_ccb_complete(ccb, 1);
  606. }
  607. break;
  608. case ARCMSR_DEV_CHECK_CONDITION: {
  609. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  610. arcmsr_report_sense_info(ccb);
  611. arcmsr_ccb_complete(ccb, 1);
  612. }
  613. break;
  614. default:
  615. printk(KERN_NOTICE
  616. "arcmsr%d: scsi id = %d lun = %d"
  617. " isr get command error done, "
  618. "but got unknown DeviceStatus = 0x%x \n"
  619. , acb->host->host_no
  620. , id
  621. , lun
  622. , ccb->arcmsr_cdb.DeviceStatus);
  623. acb->devstate[id][lun] = ARECA_RAID_GONE;
  624. ccb->pcmd->result = DID_NO_CONNECT << 16;
  625. arcmsr_ccb_complete(ccb, 1);
  626. break;
  627. }
  628. }
  629. }
  630. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
  631. {
  632. struct CommandControlBlock *ccb;
  633. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  634. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  635. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  636. struct scsi_cmnd *abortcmd = ccb->pcmd;
  637. if (abortcmd) {
  638. abortcmd->result |= DID_ABORT << 16;
  639. arcmsr_ccb_complete(ccb, 1);
  640. printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
  641. isr got aborted command \n", acb->host->host_no, ccb);
  642. }
  643. }
  644. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  645. done acb = '0x%p'"
  646. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  647. " ccboutstandingcount = %d \n"
  648. , acb->host->host_no
  649. , acb
  650. , ccb
  651. , ccb->acb
  652. , ccb->startdone
  653. , atomic_read(&acb->ccboutstandingcount));
  654. }
  655. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  656. }
  657. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  658. {
  659. int i = 0;
  660. uint32_t flag_ccb;
  661. switch (acb->adapter_type) {
  662. case ACB_ADAPTER_TYPE_A: {
  663. struct MessageUnit_A __iomem *reg = acb->pmuA;
  664. uint32_t outbound_intstatus;
  665. outbound_intstatus = readl(&reg->outbound_intstatus) &
  666. acb->outbound_int_enable;
  667. /*clear and abort all outbound posted Q*/
  668. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  669. while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  670. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  671. arcmsr_drain_donequeue(acb, flag_ccb);
  672. }
  673. }
  674. break;
  675. case ACB_ADAPTER_TYPE_B: {
  676. struct MessageUnit_B *reg = acb->pmuB;
  677. /*clear all outbound posted Q*/
  678. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  679. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  680. writel(0, &reg->done_qbuffer[i]);
  681. arcmsr_drain_donequeue(acb, flag_ccb);
  682. }
  683. writel(0, &reg->post_qbuffer[i]);
  684. }
  685. reg->doneq_index = 0;
  686. reg->postq_index = 0;
  687. }
  688. break;
  689. }
  690. }
  691. static void arcmsr_remove(struct pci_dev *pdev)
  692. {
  693. struct Scsi_Host *host = pci_get_drvdata(pdev);
  694. struct AdapterControlBlock *acb =
  695. (struct AdapterControlBlock *) host->hostdata;
  696. int poll_count = 0;
  697. arcmsr_free_sysfs_attr(acb);
  698. scsi_remove_host(host);
  699. arcmsr_stop_adapter_bgrb(acb);
  700. arcmsr_flush_adapter_cache(acb);
  701. arcmsr_disable_outbound_ints(acb);
  702. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  703. acb->acb_flags &= ~ACB_F_IOP_INITED;
  704. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
  705. if (!atomic_read(&acb->ccboutstandingcount))
  706. break;
  707. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  708. msleep(25);
  709. }
  710. if (atomic_read(&acb->ccboutstandingcount)) {
  711. int i;
  712. arcmsr_abort_allcmd(acb);
  713. arcmsr_done4abort_postqueue(acb);
  714. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  715. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  716. if (ccb->startdone == ARCMSR_CCB_START) {
  717. ccb->startdone = ARCMSR_CCB_ABORTED;
  718. ccb->pcmd->result = DID_ABORT << 16;
  719. arcmsr_ccb_complete(ccb, 1);
  720. }
  721. }
  722. }
  723. free_irq(pdev->irq, acb);
  724. iounmap(acb->pmu);
  725. arcmsr_free_ccb_pool(acb);
  726. pci_release_regions(pdev);
  727. scsi_host_put(host);
  728. pci_disable_device(pdev);
  729. pci_set_drvdata(pdev, NULL);
  730. }
  731. static void arcmsr_shutdown(struct pci_dev *pdev)
  732. {
  733. struct Scsi_Host *host = pci_get_drvdata(pdev);
  734. struct AdapterControlBlock *acb =
  735. (struct AdapterControlBlock *)host->hostdata;
  736. arcmsr_stop_adapter_bgrb(acb);
  737. arcmsr_flush_adapter_cache(acb);
  738. }
  739. static int arcmsr_module_init(void)
  740. {
  741. int error = 0;
  742. error = pci_register_driver(&arcmsr_pci_driver);
  743. return error;
  744. }
  745. static void arcmsr_module_exit(void)
  746. {
  747. pci_unregister_driver(&arcmsr_pci_driver);
  748. }
  749. module_init(arcmsr_module_init);
  750. module_exit(arcmsr_module_exit);
  751. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
  752. u32 intmask_org)
  753. {
  754. u32 mask;
  755. switch (acb->adapter_type) {
  756. case ACB_ADAPTER_TYPE_A : {
  757. struct MessageUnit_A __iomem *reg = acb->pmuA;
  758. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  759. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
  760. writel(mask, &reg->outbound_intmask);
  761. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  762. }
  763. break;
  764. case ACB_ADAPTER_TYPE_B : {
  765. struct MessageUnit_B *reg = acb->pmuB;
  766. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
  767. ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
  768. writel(mask, reg->iop2drv_doorbell_mask_reg);
  769. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  770. }
  771. }
  772. }
  773. static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
  774. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  775. {
  776. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  777. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  778. __le32 address_lo, address_hi;
  779. int arccdbsize = 0x30;
  780. int nseg;
  781. ccb->pcmd = pcmd;
  782. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  783. arcmsr_cdb->Bus = 0;
  784. arcmsr_cdb->TargetID = pcmd->device->id;
  785. arcmsr_cdb->LUN = pcmd->device->lun;
  786. arcmsr_cdb->Function = 1;
  787. arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
  788. arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
  789. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  790. nseg = scsi_dma_map(pcmd);
  791. BUG_ON(nseg < 0);
  792. if (nseg) {
  793. __le32 length;
  794. int i, cdb_sgcount = 0;
  795. struct scatterlist *sg;
  796. /* map stor port SG list to our iop SG List. */
  797. scsi_for_each_sg(pcmd, sg, nseg, i) {
  798. /* Get the physical address of the current data pointer */
  799. length = cpu_to_le32(sg_dma_len(sg));
  800. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  801. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  802. if (address_hi == 0) {
  803. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  804. pdma_sg->address = address_lo;
  805. pdma_sg->length = length;
  806. psge += sizeof (struct SG32ENTRY);
  807. arccdbsize += sizeof (struct SG32ENTRY);
  808. } else {
  809. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  810. pdma_sg->addresshigh = address_hi;
  811. pdma_sg->address = address_lo;
  812. pdma_sg->length = length|IS_SG64_ADDR;
  813. psge += sizeof (struct SG64ENTRY);
  814. arccdbsize += sizeof (struct SG64ENTRY);
  815. }
  816. cdb_sgcount++;
  817. }
  818. arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
  819. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  820. if ( arccdbsize > 256)
  821. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  822. }
  823. if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
  824. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  825. ccb->ccb_flags |= CCB_FLAG_WRITE;
  826. }
  827. }
  828. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  829. {
  830. uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
  831. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  832. atomic_inc(&acb->ccboutstandingcount);
  833. ccb->startdone = ARCMSR_CCB_START;
  834. switch (acb->adapter_type) {
  835. case ACB_ADAPTER_TYPE_A: {
  836. struct MessageUnit_A __iomem *reg = acb->pmuA;
  837. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  838. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  839. &reg->inbound_queueport);
  840. else {
  841. writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
  842. }
  843. }
  844. break;
  845. case ACB_ADAPTER_TYPE_B: {
  846. struct MessageUnit_B *reg = acb->pmuB;
  847. uint32_t ending_index, index = reg->postq_index;
  848. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  849. writel(0, &reg->post_qbuffer[ending_index]);
  850. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  851. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  852. &reg->post_qbuffer[index]);
  853. }
  854. else {
  855. writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
  856. }
  857. index++;
  858. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  859. reg->postq_index = index;
  860. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
  861. }
  862. break;
  863. }
  864. }
  865. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  866. {
  867. struct MessageUnit_A __iomem *reg = acb->pmuA;
  868. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  869. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  870. if (arcmsr_hba_wait_msgint_ready(acb)) {
  871. printk(KERN_NOTICE
  872. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  873. , acb->host->host_no);
  874. }
  875. }
  876. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  877. {
  878. struct MessageUnit_B *reg = acb->pmuB;
  879. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  880. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
  881. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  882. printk(KERN_NOTICE
  883. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  884. , acb->host->host_no);
  885. }
  886. }
  887. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  888. {
  889. switch (acb->adapter_type) {
  890. case ACB_ADAPTER_TYPE_A: {
  891. arcmsr_stop_hba_bgrb(acb);
  892. }
  893. break;
  894. case ACB_ADAPTER_TYPE_B: {
  895. arcmsr_stop_hbb_bgrb(acb);
  896. }
  897. break;
  898. }
  899. }
  900. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  901. {
  902. dma_free_coherent(&acb->pdev->dev,
  903. ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
  904. acb->dma_coherent,
  905. acb->dma_coherent_handle);
  906. }
  907. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  908. {
  909. switch (acb->adapter_type) {
  910. case ACB_ADAPTER_TYPE_A: {
  911. struct MessageUnit_A __iomem *reg = acb->pmuA;
  912. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  913. }
  914. break;
  915. case ACB_ADAPTER_TYPE_B: {
  916. struct MessageUnit_B *reg = acb->pmuB;
  917. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  918. }
  919. break;
  920. }
  921. }
  922. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  923. {
  924. switch (acb->adapter_type) {
  925. case ACB_ADAPTER_TYPE_A: {
  926. struct MessageUnit_A __iomem *reg = acb->pmuA;
  927. /*
  928. ** push inbound doorbell tell iop, driver data write ok
  929. ** and wait reply on next hwinterrupt for next Qbuffer post
  930. */
  931. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  932. }
  933. break;
  934. case ACB_ADAPTER_TYPE_B: {
  935. struct MessageUnit_B *reg = acb->pmuB;
  936. /*
  937. ** push inbound doorbell tell iop, driver data write ok
  938. ** and wait reply on next hwinterrupt for next Qbuffer post
  939. */
  940. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
  941. }
  942. break;
  943. }
  944. }
  945. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  946. {
  947. static struct QBUFFER __iomem *qbuffer;
  948. switch (acb->adapter_type) {
  949. case ACB_ADAPTER_TYPE_A: {
  950. struct MessageUnit_A __iomem *reg = acb->pmuA;
  951. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  952. }
  953. break;
  954. case ACB_ADAPTER_TYPE_B: {
  955. struct MessageUnit_B *reg = acb->pmuB;
  956. qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
  957. }
  958. break;
  959. }
  960. return qbuffer;
  961. }
  962. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  963. {
  964. static struct QBUFFER __iomem *pqbuffer;
  965. switch (acb->adapter_type) {
  966. case ACB_ADAPTER_TYPE_A: {
  967. struct MessageUnit_A __iomem *reg = acb->pmuA;
  968. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  969. }
  970. break;
  971. case ACB_ADAPTER_TYPE_B: {
  972. struct MessageUnit_B *reg = acb->pmuB;
  973. pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
  974. }
  975. break;
  976. }
  977. return pqbuffer;
  978. }
  979. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  980. {
  981. struct QBUFFER __iomem *prbuffer;
  982. struct QBUFFER *pQbuffer;
  983. uint8_t __iomem *iop_data;
  984. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  985. rqbuf_lastindex = acb->rqbuf_lastindex;
  986. rqbuf_firstindex = acb->rqbuf_firstindex;
  987. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  988. iop_data = (uint8_t __iomem *)prbuffer->data;
  989. iop_len = prbuffer->data_len;
  990. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
  991. if (my_empty_len >= iop_len)
  992. {
  993. while (iop_len > 0) {
  994. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  995. memcpy(pQbuffer, iop_data,1);
  996. rqbuf_lastindex++;
  997. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  998. iop_data++;
  999. iop_len--;
  1000. }
  1001. acb->rqbuf_lastindex = rqbuf_lastindex;
  1002. arcmsr_iop_message_read(acb);
  1003. }
  1004. else {
  1005. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1006. }
  1007. }
  1008. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1009. {
  1010. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1011. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1012. uint8_t *pQbuffer;
  1013. struct QBUFFER __iomem *pwbuffer;
  1014. uint8_t __iomem *iop_data;
  1015. int32_t allxfer_len = 0;
  1016. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1017. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1018. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1019. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1020. (allxfer_len < 124)) {
  1021. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1022. memcpy(iop_data, pQbuffer, 1);
  1023. acb->wqbuf_firstindex++;
  1024. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1025. iop_data++;
  1026. allxfer_len++;
  1027. }
  1028. pwbuffer->data_len = allxfer_len;
  1029. arcmsr_iop_message_wrote(acb);
  1030. }
  1031. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1032. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1033. }
  1034. }
  1035. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1036. {
  1037. uint32_t outbound_doorbell;
  1038. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1039. outbound_doorbell = readl(&reg->outbound_doorbell);
  1040. writel(outbound_doorbell, &reg->outbound_doorbell);
  1041. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1042. arcmsr_iop2drv_data_wrote_handle(acb);
  1043. }
  1044. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1045. arcmsr_iop2drv_data_read_handle(acb);
  1046. }
  1047. }
  1048. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1049. {
  1050. uint32_t flag_ccb;
  1051. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1052. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1053. arcmsr_drain_donequeue(acb, flag_ccb);
  1054. }
  1055. }
  1056. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1057. {
  1058. uint32_t index;
  1059. uint32_t flag_ccb;
  1060. struct MessageUnit_B *reg = acb->pmuB;
  1061. index = reg->doneq_index;
  1062. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1063. writel(0, &reg->done_qbuffer[index]);
  1064. arcmsr_drain_donequeue(acb, flag_ccb);
  1065. index++;
  1066. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1067. reg->doneq_index = index;
  1068. }
  1069. }
  1070. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1071. {
  1072. uint32_t outbound_intstatus;
  1073. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1074. outbound_intstatus = readl(&reg->outbound_intstatus) & \
  1075. acb->outbound_int_enable;
  1076. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1077. return 1;
  1078. }
  1079. writel(outbound_intstatus, &reg->outbound_intstatus);
  1080. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1081. arcmsr_hba_doorbell_isr(acb);
  1082. }
  1083. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1084. arcmsr_hba_postqueue_isr(acb);
  1085. }
  1086. return 0;
  1087. }
  1088. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1089. {
  1090. uint32_t outbound_doorbell;
  1091. struct MessageUnit_B *reg = acb->pmuB;
  1092. outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
  1093. acb->outbound_int_enable;
  1094. if (!outbound_doorbell)
  1095. return 1;
  1096. writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
  1097. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1098. arcmsr_iop2drv_data_wrote_handle(acb);
  1099. }
  1100. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1101. arcmsr_iop2drv_data_read_handle(acb);
  1102. }
  1103. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1104. arcmsr_hbb_postqueue_isr(acb);
  1105. }
  1106. return 0;
  1107. }
  1108. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1109. {
  1110. switch (acb->adapter_type) {
  1111. case ACB_ADAPTER_TYPE_A: {
  1112. if (arcmsr_handle_hba_isr(acb)) {
  1113. return IRQ_NONE;
  1114. }
  1115. }
  1116. break;
  1117. case ACB_ADAPTER_TYPE_B: {
  1118. if (arcmsr_handle_hbb_isr(acb)) {
  1119. return IRQ_NONE;
  1120. }
  1121. }
  1122. break;
  1123. }
  1124. return IRQ_HANDLED;
  1125. }
  1126. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1127. {
  1128. if (acb) {
  1129. /* stop adapter background rebuild */
  1130. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1131. uint32_t intmask_org;
  1132. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1133. intmask_org = arcmsr_disable_outbound_ints(acb);
  1134. arcmsr_stop_adapter_bgrb(acb);
  1135. arcmsr_flush_adapter_cache(acb);
  1136. arcmsr_enable_outbound_ints(acb, intmask_org);
  1137. }
  1138. }
  1139. }
  1140. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1141. {
  1142. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1143. uint8_t *pQbuffer;
  1144. struct QBUFFER __iomem *pwbuffer;
  1145. uint8_t __iomem *iop_data;
  1146. int32_t allxfer_len = 0;
  1147. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1148. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1149. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1150. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1151. wqbuf_firstindex = acb->wqbuf_firstindex;
  1152. wqbuf_lastindex = acb->wqbuf_lastindex;
  1153. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1154. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1155. memcpy(iop_data, pQbuffer, 1);
  1156. wqbuf_firstindex++;
  1157. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1158. iop_data++;
  1159. allxfer_len++;
  1160. }
  1161. acb->wqbuf_firstindex = wqbuf_firstindex;
  1162. pwbuffer->data_len = allxfer_len;
  1163. arcmsr_iop_message_wrote(acb);
  1164. }
  1165. }
  1166. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
  1167. struct scsi_cmnd *cmd)
  1168. {
  1169. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1170. int retvalue = 0, transfer_len = 0;
  1171. char *buffer;
  1172. struct scatterlist *sg;
  1173. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1174. (uint32_t ) cmd->cmnd[6] << 16 |
  1175. (uint32_t ) cmd->cmnd[7] << 8 |
  1176. (uint32_t ) cmd->cmnd[8];
  1177. /* 4 bytes: Areca io control code */
  1178. sg = scsi_sglist(cmd);
  1179. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1180. if (scsi_sg_count(cmd) > 1) {
  1181. retvalue = ARCMSR_MESSAGE_FAIL;
  1182. goto message_out;
  1183. }
  1184. transfer_len += sg->length;
  1185. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1186. retvalue = ARCMSR_MESSAGE_FAIL;
  1187. goto message_out;
  1188. }
  1189. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1190. switch(controlcode) {
  1191. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1192. unsigned long *ver_addr;
  1193. dma_addr_t buf_handle;
  1194. uint8_t *pQbuffer, *ptmpQbuffer;
  1195. int32_t allxfer_len = 0;
  1196. ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
  1197. if (!ver_addr) {
  1198. retvalue = ARCMSR_MESSAGE_FAIL;
  1199. goto message_out;
  1200. }
  1201. ptmpQbuffer = (uint8_t *) ver_addr;
  1202. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1203. && (allxfer_len < 1031)) {
  1204. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1205. memcpy(ptmpQbuffer, pQbuffer, 1);
  1206. acb->rqbuf_firstindex++;
  1207. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1208. ptmpQbuffer++;
  1209. allxfer_len++;
  1210. }
  1211. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1212. struct QBUFFER __iomem *prbuffer;
  1213. uint8_t __iomem *iop_data;
  1214. int32_t iop_len;
  1215. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1216. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1217. iop_data = prbuffer->data;
  1218. iop_len = readl(&prbuffer->data_len);
  1219. while (iop_len > 0) {
  1220. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1221. acb->rqbuf_lastindex++;
  1222. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1223. iop_data++;
  1224. iop_len--;
  1225. }
  1226. arcmsr_iop_message_read(acb);
  1227. }
  1228. memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
  1229. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1230. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1231. pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
  1232. }
  1233. break;
  1234. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1235. unsigned long *ver_addr;
  1236. dma_addr_t buf_handle;
  1237. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1238. uint8_t *pQbuffer, *ptmpuserbuffer;
  1239. ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
  1240. if (!ver_addr) {
  1241. retvalue = ARCMSR_MESSAGE_FAIL;
  1242. goto message_out;
  1243. }
  1244. ptmpuserbuffer = (uint8_t *)ver_addr;
  1245. user_len = pcmdmessagefld->cmdmessage.Length;
  1246. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1247. wqbuf_lastindex = acb->wqbuf_lastindex;
  1248. wqbuf_firstindex = acb->wqbuf_firstindex;
  1249. if (wqbuf_lastindex != wqbuf_firstindex) {
  1250. struct SENSE_DATA *sensebuffer =
  1251. (struct SENSE_DATA *)cmd->sense_buffer;
  1252. arcmsr_post_ioctldata2iop(acb);
  1253. /* has error report sensedata */
  1254. sensebuffer->ErrorCode = 0x70;
  1255. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1256. sensebuffer->AdditionalSenseLength = 0x0A;
  1257. sensebuffer->AdditionalSenseCode = 0x20;
  1258. sensebuffer->Valid = 1;
  1259. retvalue = ARCMSR_MESSAGE_FAIL;
  1260. } else {
  1261. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1262. &(ARCMSR_MAX_QBUFFER - 1);
  1263. if (my_empty_len >= user_len) {
  1264. while (user_len > 0) {
  1265. pQbuffer =
  1266. &acb->wqbuffer[acb->wqbuf_lastindex];
  1267. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1268. acb->wqbuf_lastindex++;
  1269. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1270. ptmpuserbuffer++;
  1271. user_len--;
  1272. }
  1273. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1274. acb->acb_flags &=
  1275. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1276. arcmsr_post_ioctldata2iop(acb);
  1277. }
  1278. } else {
  1279. /* has error report sensedata */
  1280. struct SENSE_DATA *sensebuffer =
  1281. (struct SENSE_DATA *)cmd->sense_buffer;
  1282. sensebuffer->ErrorCode = 0x70;
  1283. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1284. sensebuffer->AdditionalSenseLength = 0x0A;
  1285. sensebuffer->AdditionalSenseCode = 0x20;
  1286. sensebuffer->Valid = 1;
  1287. retvalue = ARCMSR_MESSAGE_FAIL;
  1288. }
  1289. }
  1290. pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
  1291. }
  1292. break;
  1293. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1294. uint8_t *pQbuffer = acb->rqbuffer;
  1295. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1296. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1297. arcmsr_iop_message_read(acb);
  1298. }
  1299. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1300. acb->rqbuf_firstindex = 0;
  1301. acb->rqbuf_lastindex = 0;
  1302. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1303. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1304. }
  1305. break;
  1306. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1307. uint8_t *pQbuffer = acb->wqbuffer;
  1308. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1309. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1310. arcmsr_iop_message_read(acb);
  1311. }
  1312. acb->acb_flags |=
  1313. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1314. ACB_F_MESSAGE_WQBUFFER_READED);
  1315. acb->wqbuf_firstindex = 0;
  1316. acb->wqbuf_lastindex = 0;
  1317. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1318. pcmdmessagefld->cmdmessage.ReturnCode =
  1319. ARCMSR_MESSAGE_RETURNCODE_OK;
  1320. }
  1321. break;
  1322. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1323. uint8_t *pQbuffer;
  1324. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1325. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1326. arcmsr_iop_message_read(acb);
  1327. }
  1328. acb->acb_flags |=
  1329. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1330. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1331. | ACB_F_MESSAGE_WQBUFFER_READED);
  1332. acb->rqbuf_firstindex = 0;
  1333. acb->rqbuf_lastindex = 0;
  1334. acb->wqbuf_firstindex = 0;
  1335. acb->wqbuf_lastindex = 0;
  1336. pQbuffer = acb->rqbuffer;
  1337. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1338. pQbuffer = acb->wqbuffer;
  1339. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1340. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1341. }
  1342. break;
  1343. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1344. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
  1345. }
  1346. break;
  1347. case ARCMSR_MESSAGE_SAY_HELLO: {
  1348. int8_t *hello_string = "Hello! I am ARCMSR";
  1349. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1350. , (int16_t)strlen(hello_string));
  1351. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1352. }
  1353. break;
  1354. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1355. arcmsr_iop_parking(acb);
  1356. break;
  1357. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1358. arcmsr_flush_adapter_cache(acb);
  1359. break;
  1360. default:
  1361. retvalue = ARCMSR_MESSAGE_FAIL;
  1362. }
  1363. message_out:
  1364. sg = scsi_sglist(cmd);
  1365. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1366. return retvalue;
  1367. }
  1368. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1369. {
  1370. struct list_head *head = &acb->ccb_free_list;
  1371. struct CommandControlBlock *ccb = NULL;
  1372. if (!list_empty(head)) {
  1373. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1374. list_del(head->next);
  1375. }
  1376. return ccb;
  1377. }
  1378. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1379. struct scsi_cmnd *cmd)
  1380. {
  1381. switch (cmd->cmnd[0]) {
  1382. case INQUIRY: {
  1383. unsigned char inqdata[36];
  1384. char *buffer;
  1385. struct scatterlist *sg;
  1386. if (cmd->device->lun) {
  1387. cmd->result = (DID_TIME_OUT << 16);
  1388. cmd->scsi_done(cmd);
  1389. return;
  1390. }
  1391. inqdata[0] = TYPE_PROCESSOR;
  1392. /* Periph Qualifier & Periph Dev Type */
  1393. inqdata[1] = 0;
  1394. /* rem media bit & Dev Type Modifier */
  1395. inqdata[2] = 0;
  1396. /* ISO, ECMA, & ANSI versions */
  1397. inqdata[4] = 31;
  1398. /* length of additional data */
  1399. strncpy(&inqdata[8], "Areca ", 8);
  1400. /* Vendor Identification */
  1401. strncpy(&inqdata[16], "RAID controller ", 16);
  1402. /* Product Identification */
  1403. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1404. sg = scsi_sglist(cmd);
  1405. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1406. memcpy(buffer, inqdata, sizeof(inqdata));
  1407. sg = scsi_sglist(cmd);
  1408. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1409. cmd->scsi_done(cmd);
  1410. }
  1411. break;
  1412. case WRITE_BUFFER:
  1413. case READ_BUFFER: {
  1414. if (arcmsr_iop_message_xfer(acb, cmd))
  1415. cmd->result = (DID_ERROR << 16);
  1416. cmd->scsi_done(cmd);
  1417. }
  1418. break;
  1419. default:
  1420. cmd->scsi_done(cmd);
  1421. }
  1422. }
  1423. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  1424. void (* done)(struct scsi_cmnd *))
  1425. {
  1426. struct Scsi_Host *host = cmd->device->host;
  1427. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1428. struct CommandControlBlock *ccb;
  1429. int target = cmd->device->id;
  1430. int lun = cmd->device->lun;
  1431. cmd->scsi_done = done;
  1432. cmd->host_scribble = NULL;
  1433. cmd->result = 0;
  1434. if (acb->acb_flags & ACB_F_BUS_RESET) {
  1435. printk(KERN_NOTICE "arcmsr%d: bus reset"
  1436. " and return busy \n"
  1437. , acb->host->host_no);
  1438. return SCSI_MLQUEUE_HOST_BUSY;
  1439. }
  1440. if (target == 16) {
  1441. /* virtual device for iop message transfer */
  1442. arcmsr_handle_virtual_command(acb, cmd);
  1443. return 0;
  1444. }
  1445. if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1446. uint8_t block_cmd;
  1447. block_cmd = cmd->cmnd[0] & 0x0f;
  1448. if (block_cmd == 0x08 || block_cmd == 0x0a) {
  1449. printk(KERN_NOTICE
  1450. "arcmsr%d: block 'read/write'"
  1451. "command with gone raid volume"
  1452. " Cmd = %2x, TargetId = %d, Lun = %d \n"
  1453. , acb->host->host_no
  1454. , cmd->cmnd[0]
  1455. , target, lun);
  1456. cmd->result = (DID_NO_CONNECT << 16);
  1457. cmd->scsi_done(cmd);
  1458. return 0;
  1459. }
  1460. }
  1461. if (atomic_read(&acb->ccboutstandingcount) >=
  1462. ARCMSR_MAX_OUTSTANDING_CMD)
  1463. return SCSI_MLQUEUE_HOST_BUSY;
  1464. ccb = arcmsr_get_freeccb(acb);
  1465. if (!ccb)
  1466. return SCSI_MLQUEUE_HOST_BUSY;
  1467. arcmsr_build_ccb(acb, ccb, cmd);
  1468. arcmsr_post_ccb(acb, ccb);
  1469. return 0;
  1470. }
  1471. static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
  1472. {
  1473. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1474. char *acb_firm_model = acb->firm_model;
  1475. char *acb_firm_version = acb->firm_version;
  1476. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  1477. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  1478. int count;
  1479. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1480. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1481. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1482. miscellaneous data' timeout \n", acb->host->host_no);
  1483. }
  1484. count = 8;
  1485. while (count) {
  1486. *acb_firm_model = readb(iop_firm_model);
  1487. acb_firm_model++;
  1488. iop_firm_model++;
  1489. count--;
  1490. }
  1491. count = 16;
  1492. while (count) {
  1493. *acb_firm_version = readb(iop_firm_version);
  1494. acb_firm_version++;
  1495. iop_firm_version++;
  1496. count--;
  1497. }
  1498. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
  1499. , acb->host->host_no
  1500. , acb->firm_version);
  1501. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  1502. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  1503. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  1504. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  1505. }
  1506. static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
  1507. {
  1508. struct MessageUnit_B *reg = acb->pmuB;
  1509. uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
  1510. char *acb_firm_model = acb->firm_model;
  1511. char *acb_firm_version = acb->firm_version;
  1512. char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
  1513. /*firm_model,15,60-67*/
  1514. char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
  1515. /*firm_version,17,68-83*/
  1516. int count;
  1517. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
  1518. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1519. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1520. miscellaneous data' timeout \n", acb->host->host_no);
  1521. }
  1522. count = 8;
  1523. while (count)
  1524. {
  1525. *acb_firm_model = readb(iop_firm_model);
  1526. acb_firm_model++;
  1527. iop_firm_model++;
  1528. count--;
  1529. }
  1530. count = 16;
  1531. while (count)
  1532. {
  1533. *acb_firm_version = readb(iop_firm_version);
  1534. acb_firm_version++;
  1535. iop_firm_version++;
  1536. count--;
  1537. }
  1538. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
  1539. acb->host->host_no,
  1540. acb->firm_version);
  1541. lrwbuffer++;
  1542. acb->firm_request_len = readl(lrwbuffer++);
  1543. /*firm_request_len,1,04-07*/
  1544. acb->firm_numbers_queue = readl(lrwbuffer++);
  1545. /*firm_numbers_queue,2,08-11*/
  1546. acb->firm_sdram_size = readl(lrwbuffer++);
  1547. /*firm_sdram_size,3,12-15*/
  1548. acb->firm_hd_channels = readl(lrwbuffer);
  1549. /*firm_ide_channels,4,16-19*/
  1550. }
  1551. static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  1552. {
  1553. switch (acb->adapter_type) {
  1554. case ACB_ADAPTER_TYPE_A: {
  1555. arcmsr_get_hba_config(acb);
  1556. }
  1557. break;
  1558. case ACB_ADAPTER_TYPE_B: {
  1559. arcmsr_get_hbb_config(acb);
  1560. }
  1561. break;
  1562. }
  1563. }
  1564. static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  1565. struct CommandControlBlock *poll_ccb)
  1566. {
  1567. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1568. struct CommandControlBlock *ccb;
  1569. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  1570. polling_hba_ccb_retry:
  1571. poll_count++;
  1572. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  1573. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  1574. while (1) {
  1575. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  1576. if (poll_ccb_done)
  1577. break;
  1578. else {
  1579. msleep(25);
  1580. if (poll_count > 100)
  1581. break;
  1582. goto polling_hba_ccb_retry;
  1583. }
  1584. }
  1585. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  1586. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1587. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1588. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1589. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  1590. " poll command abort successfully \n"
  1591. , acb->host->host_no
  1592. , ccb->pcmd->device->id
  1593. , ccb->pcmd->device->lun
  1594. , ccb);
  1595. ccb->pcmd->result = DID_ABORT << 16;
  1596. arcmsr_ccb_complete(ccb, 1);
  1597. poll_ccb_done = 1;
  1598. continue;
  1599. }
  1600. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1601. " command done ccb = '0x%p'"
  1602. "ccboutstandingcount = %d \n"
  1603. , acb->host->host_no
  1604. , ccb
  1605. , atomic_read(&acb->ccboutstandingcount));
  1606. continue;
  1607. }
  1608. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1609. }
  1610. }
  1611. static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
  1612. struct CommandControlBlock *poll_ccb)
  1613. {
  1614. struct MessageUnit_B *reg = acb->pmuB;
  1615. struct CommandControlBlock *ccb;
  1616. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  1617. int index;
  1618. polling_hbb_ccb_retry:
  1619. poll_count++;
  1620. /* clear doorbell interrupt */
  1621. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1622. while (1) {
  1623. index = reg->doneq_index;
  1624. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  1625. if (poll_ccb_done)
  1626. break;
  1627. else {
  1628. msleep(25);
  1629. if (poll_count > 100)
  1630. break;
  1631. goto polling_hbb_ccb_retry;
  1632. }
  1633. }
  1634. writel(0, &reg->done_qbuffer[index]);
  1635. index++;
  1636. /*if last index number set it to 0 */
  1637. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1638. reg->doneq_index = index;
  1639. /* check ifcommand done with no error*/
  1640. ccb = (struct CommandControlBlock *)\
  1641. (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1642. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1643. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1644. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  1645. printk(KERN_NOTICE "arcmsr%d: \
  1646. scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
  1647. ,acb->host->host_no
  1648. ,ccb->pcmd->device->id
  1649. ,ccb->pcmd->device->lun
  1650. ,ccb);
  1651. ccb->pcmd->result = DID_ABORT << 16;
  1652. arcmsr_ccb_complete(ccb, 1);
  1653. continue;
  1654. }
  1655. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1656. " command done ccb = '0x%p'"
  1657. "ccboutstandingcount = %d \n"
  1658. , acb->host->host_no
  1659. , ccb
  1660. , atomic_read(&acb->ccboutstandingcount));
  1661. continue;
  1662. }
  1663. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1664. } /*drain reply FIFO*/
  1665. }
  1666. static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
  1667. struct CommandControlBlock *poll_ccb)
  1668. {
  1669. switch (acb->adapter_type) {
  1670. case ACB_ADAPTER_TYPE_A: {
  1671. arcmsr_polling_hba_ccbdone(acb,poll_ccb);
  1672. }
  1673. break;
  1674. case ACB_ADAPTER_TYPE_B: {
  1675. arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
  1676. }
  1677. }
  1678. }
  1679. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  1680. {
  1681. uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
  1682. dma_addr_t dma_coherent_handle;
  1683. /*
  1684. ********************************************************************
  1685. ** here we need to tell iop 331 our freeccb.HighPart
  1686. ** if freeccb.HighPart is not zero
  1687. ********************************************************************
  1688. */
  1689. dma_coherent_handle = acb->dma_coherent_handle;
  1690. cdb_phyaddr = (uint32_t)(dma_coherent_handle);
  1691. ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
  1692. /*
  1693. ***********************************************************************
  1694. ** if adapter type B, set window of "post command Q"
  1695. ***********************************************************************
  1696. */
  1697. switch (acb->adapter_type) {
  1698. case ACB_ADAPTER_TYPE_A: {
  1699. if (ccb_phyaddr_hi32 != 0) {
  1700. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1701. uint32_t intmask_org;
  1702. intmask_org = arcmsr_disable_outbound_ints(acb);
  1703. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  1704. &reg->message_rwbuffer[0]);
  1705. writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  1706. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  1707. &reg->inbound_msgaddr0);
  1708. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1709. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  1710. part physical address timeout\n",
  1711. acb->host->host_no);
  1712. return 1;
  1713. }
  1714. arcmsr_enable_outbound_ints(acb, intmask_org);
  1715. }
  1716. }
  1717. break;
  1718. case ACB_ADAPTER_TYPE_B: {
  1719. unsigned long post_queue_phyaddr;
  1720. uint32_t __iomem *rwbuffer;
  1721. struct MessageUnit_B *reg = acb->pmuB;
  1722. uint32_t intmask_org;
  1723. intmask_org = arcmsr_disable_outbound_ints(acb);
  1724. reg->postq_index = 0;
  1725. reg->doneq_index = 0;
  1726. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
  1727. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1728. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  1729. acb->host->host_no);
  1730. return 1;
  1731. }
  1732. post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
  1733. sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
  1734. rwbuffer = reg->msgcode_rwbuffer_reg;
  1735. /* driver "set config" signature */
  1736. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  1737. /* normal should be zero */
  1738. writel(ccb_phyaddr_hi32, rwbuffer++);
  1739. /* postQ size (256 + 8)*4 */
  1740. writel(post_queue_phyaddr, rwbuffer++);
  1741. /* doneQ size (256 + 8)*4 */
  1742. writel(post_queue_phyaddr + 1056, rwbuffer++);
  1743. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  1744. writel(1056, rwbuffer);
  1745. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
  1746. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1747. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  1748. timeout \n",acb->host->host_no);
  1749. return 1;
  1750. }
  1751. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
  1752. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1753. printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
  1754. ,acb->host->host_no);
  1755. return 1;
  1756. }
  1757. arcmsr_enable_outbound_ints(acb, intmask_org);
  1758. }
  1759. break;
  1760. }
  1761. return 0;
  1762. }
  1763. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  1764. {
  1765. uint32_t firmware_state = 0;
  1766. switch (acb->adapter_type) {
  1767. case ACB_ADAPTER_TYPE_A: {
  1768. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1769. do {
  1770. firmware_state = readl(&reg->outbound_msgaddr1);
  1771. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  1772. }
  1773. break;
  1774. case ACB_ADAPTER_TYPE_B: {
  1775. struct MessageUnit_B *reg = acb->pmuB;
  1776. do {
  1777. firmware_state = readl(reg->iop2drv_doorbell_reg);
  1778. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  1779. }
  1780. break;
  1781. }
  1782. }
  1783. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  1784. {
  1785. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1786. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1787. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  1788. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1789. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1790. rebulid' timeout \n", acb->host->host_no);
  1791. }
  1792. }
  1793. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  1794. {
  1795. struct MessageUnit_B *reg = acb->pmuB;
  1796. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1797. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
  1798. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1799. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1800. rebulid' timeout \n",acb->host->host_no);
  1801. }
  1802. }
  1803. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  1804. {
  1805. switch (acb->adapter_type) {
  1806. case ACB_ADAPTER_TYPE_A:
  1807. arcmsr_start_hba_bgrb(acb);
  1808. break;
  1809. case ACB_ADAPTER_TYPE_B:
  1810. arcmsr_start_hbb_bgrb(acb);
  1811. break;
  1812. }
  1813. }
  1814. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  1815. {
  1816. switch (acb->adapter_type) {
  1817. case ACB_ADAPTER_TYPE_A: {
  1818. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1819. uint32_t outbound_doorbell;
  1820. /* empty doorbell Qbuffer if door bell ringed */
  1821. outbound_doorbell = readl(&reg->outbound_doorbell);
  1822. /*clear doorbell interrupt */
  1823. writel(outbound_doorbell, &reg->outbound_doorbell);
  1824. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1825. }
  1826. break;
  1827. case ACB_ADAPTER_TYPE_B: {
  1828. struct MessageUnit_B *reg = acb->pmuB;
  1829. /*clear interrupt and message state*/
  1830. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1831. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  1832. /* let IOP know data has been read */
  1833. }
  1834. break;
  1835. }
  1836. }
  1837. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  1838. {
  1839. uint32_t intmask_org;
  1840. arcmsr_wait_firmware_ready(acb);
  1841. arcmsr_iop_confirm(acb);
  1842. /* disable all outbound interrupt */
  1843. intmask_org = arcmsr_disable_outbound_ints(acb);
  1844. arcmsr_get_firmware_spec(acb);
  1845. /*start background rebuild*/
  1846. arcmsr_start_adapter_bgrb(acb);
  1847. /* empty doorbell Qbuffer if door bell ringed */
  1848. arcmsr_clear_doorbell_queue_buffer(acb);
  1849. /* enable outbound Post Queue,outbound doorbell Interrupt */
  1850. arcmsr_enable_outbound_ints(acb, intmask_org);
  1851. acb->acb_flags |= ACB_F_IOP_INITED;
  1852. }
  1853. static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
  1854. {
  1855. struct CommandControlBlock *ccb;
  1856. uint32_t intmask_org;
  1857. int i = 0;
  1858. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  1859. /* talk to iop 331 outstanding command aborted */
  1860. arcmsr_abort_allcmd(acb);
  1861. /* wait for 3 sec for all command aborted*/
  1862. ssleep(3);
  1863. /* disable all outbound interrupt */
  1864. intmask_org = arcmsr_disable_outbound_ints(acb);
  1865. /* clear all outbound posted Q */
  1866. arcmsr_done4abort_postqueue(acb);
  1867. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1868. ccb = acb->pccb_pool[i];
  1869. if (ccb->startdone == ARCMSR_CCB_START) {
  1870. ccb->startdone = ARCMSR_CCB_ABORTED;
  1871. arcmsr_ccb_complete(ccb, 1);
  1872. }
  1873. }
  1874. /* enable all outbound interrupt */
  1875. arcmsr_enable_outbound_ints(acb, intmask_org);
  1876. }
  1877. }
  1878. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  1879. {
  1880. struct AdapterControlBlock *acb =
  1881. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1882. int i;
  1883. acb->num_resets++;
  1884. acb->acb_flags |= ACB_F_BUS_RESET;
  1885. for (i = 0; i < 400; i++) {
  1886. if (!atomic_read(&acb->ccboutstandingcount))
  1887. break;
  1888. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  1889. msleep(25);
  1890. }
  1891. arcmsr_iop_reset(acb);
  1892. acb->acb_flags &= ~ACB_F_BUS_RESET;
  1893. return SUCCESS;
  1894. }
  1895. static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  1896. struct CommandControlBlock *ccb)
  1897. {
  1898. u32 intmask;
  1899. ccb->startdone = ARCMSR_CCB_ABORTED;
  1900. /*
  1901. ** Wait for 3 sec for all command done.
  1902. */
  1903. ssleep(3);
  1904. intmask = arcmsr_disable_outbound_ints(acb);
  1905. arcmsr_polling_ccbdone(acb, ccb);
  1906. arcmsr_enable_outbound_ints(acb, intmask);
  1907. }
  1908. static int arcmsr_abort(struct scsi_cmnd *cmd)
  1909. {
  1910. struct AdapterControlBlock *acb =
  1911. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1912. int i = 0;
  1913. printk(KERN_NOTICE
  1914. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  1915. acb->host->host_no, cmd->device->id, cmd->device->lun);
  1916. acb->num_aborts++;
  1917. /*
  1918. ************************************************
  1919. ** the all interrupt service routine is locked
  1920. ** we need to handle it as soon as possible and exit
  1921. ************************************************
  1922. */
  1923. if (!atomic_read(&acb->ccboutstandingcount))
  1924. return SUCCESS;
  1925. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1926. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  1927. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  1928. arcmsr_abort_one_cmd(acb, ccb);
  1929. break;
  1930. }
  1931. }
  1932. return SUCCESS;
  1933. }
  1934. static const char *arcmsr_info(struct Scsi_Host *host)
  1935. {
  1936. struct AdapterControlBlock *acb =
  1937. (struct AdapterControlBlock *) host->hostdata;
  1938. static char buf[256];
  1939. char *type;
  1940. int raid6 = 1;
  1941. switch (acb->pdev->device) {
  1942. case PCI_DEVICE_ID_ARECA_1110:
  1943. case PCI_DEVICE_ID_ARECA_1200:
  1944. case PCI_DEVICE_ID_ARECA_1202:
  1945. case PCI_DEVICE_ID_ARECA_1210:
  1946. raid6 = 0;
  1947. /*FALLTHRU*/
  1948. case PCI_DEVICE_ID_ARECA_1120:
  1949. case PCI_DEVICE_ID_ARECA_1130:
  1950. case PCI_DEVICE_ID_ARECA_1160:
  1951. case PCI_DEVICE_ID_ARECA_1170:
  1952. case PCI_DEVICE_ID_ARECA_1201:
  1953. case PCI_DEVICE_ID_ARECA_1220:
  1954. case PCI_DEVICE_ID_ARECA_1230:
  1955. case PCI_DEVICE_ID_ARECA_1260:
  1956. case PCI_DEVICE_ID_ARECA_1270:
  1957. case PCI_DEVICE_ID_ARECA_1280:
  1958. type = "SATA";
  1959. break;
  1960. case PCI_DEVICE_ID_ARECA_1380:
  1961. case PCI_DEVICE_ID_ARECA_1381:
  1962. case PCI_DEVICE_ID_ARECA_1680:
  1963. case PCI_DEVICE_ID_ARECA_1681:
  1964. type = "SAS";
  1965. break;
  1966. default:
  1967. type = "X-TYPE";
  1968. break;
  1969. }
  1970. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  1971. type, raid6 ? "( RAID6 capable)" : "",
  1972. ARCMSR_DRIVER_VERSION);
  1973. return buf;
  1974. }
  1975. #ifdef CONFIG_SCSI_ARCMSR_AER
  1976. static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
  1977. {
  1978. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1979. struct AdapterControlBlock *acb =
  1980. (struct AdapterControlBlock *) host->hostdata;
  1981. uint32_t intmask_org;
  1982. int i, j;
  1983. if (pci_enable_device(pdev)) {
  1984. return PCI_ERS_RESULT_DISCONNECT;
  1985. }
  1986. pci_set_master(pdev);
  1987. intmask_org = arcmsr_disable_outbound_ints(acb);
  1988. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1989. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  1990. ACB_F_MESSAGE_WQBUFFER_READED);
  1991. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  1992. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  1993. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  1994. acb->devstate[i][j] = ARECA_RAID_GONE;
  1995. arcmsr_wait_firmware_ready(acb);
  1996. arcmsr_iop_confirm(acb);
  1997. /* disable all outbound interrupt */
  1998. arcmsr_get_firmware_spec(acb);
  1999. /*start background rebuild*/
  2000. arcmsr_start_adapter_bgrb(acb);
  2001. /* empty doorbell Qbuffer if door bell ringed */
  2002. arcmsr_clear_doorbell_queue_buffer(acb);
  2003. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2004. arcmsr_enable_outbound_ints(acb, intmask_org);
  2005. acb->acb_flags |= ACB_F_IOP_INITED;
  2006. pci_enable_pcie_error_reporting(pdev);
  2007. return PCI_ERS_RESULT_RECOVERED;
  2008. }
  2009. static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
  2010. {
  2011. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2012. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
  2013. struct CommandControlBlock *ccb;
  2014. uint32_t intmask_org;
  2015. int i = 0;
  2016. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2017. /* talk to iop 331 outstanding command aborted */
  2018. arcmsr_abort_allcmd(acb);
  2019. /* wait for 3 sec for all command aborted*/
  2020. ssleep(3);
  2021. /* disable all outbound interrupt */
  2022. intmask_org = arcmsr_disable_outbound_ints(acb);
  2023. /* clear all outbound posted Q */
  2024. arcmsr_done4abort_postqueue(acb);
  2025. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2026. ccb = acb->pccb_pool[i];
  2027. if (ccb->startdone == ARCMSR_CCB_START) {
  2028. ccb->startdone = ARCMSR_CCB_ABORTED;
  2029. arcmsr_ccb_complete(ccb, 1);
  2030. }
  2031. }
  2032. /* enable all outbound interrupt */
  2033. arcmsr_enable_outbound_ints(acb, intmask_org);
  2034. }
  2035. pci_disable_device(pdev);
  2036. }
  2037. static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
  2038. {
  2039. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2040. struct AdapterControlBlock *acb = \
  2041. (struct AdapterControlBlock *)host->hostdata;
  2042. arcmsr_stop_adapter_bgrb(acb);
  2043. arcmsr_flush_adapter_cache(acb);
  2044. }
  2045. static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
  2046. pci_channel_state_t state)
  2047. {
  2048. switch (state) {
  2049. case pci_channel_io_frozen:
  2050. arcmsr_pci_ers_need_reset_forepart(pdev);
  2051. return PCI_ERS_RESULT_NEED_RESET;
  2052. case pci_channel_io_perm_failure:
  2053. arcmsr_pci_ers_disconnect_forepart(pdev);
  2054. return PCI_ERS_RESULT_DISCONNECT;
  2055. break;
  2056. default:
  2057. return PCI_ERS_RESULT_NEED_RESET;
  2058. }
  2059. }
  2060. #endif