arcmsr_hba.c 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Nick Cheng
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/slab.h>
  60. #include <linux/pci.h>
  61. #include <linux/aer.h>
  62. #include <asm/dma.h>
  63. #include <asm/io.h>
  64. #include <asm/system.h>
  65. #include <asm/uaccess.h>
  66. #include <scsi/scsi_host.h>
  67. #include <scsi/scsi.h>
  68. #include <scsi/scsi_cmnd.h>
  69. #include <scsi/scsi_tcq.h>
  70. #include <scsi/scsi_device.h>
  71. #include <scsi/scsi_transport.h>
  72. #include <scsi/scsicam.h>
  73. #include "arcmsr.h"
  74. MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
  75. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
  76. MODULE_LICENSE("Dual BSD/GPL");
  77. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  78. #define ARCMSR_SLEEPTIME 10
  79. #define ARCMSR_RETRYCOUNT 12
  80. wait_queue_head_t wait_q;
  81. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  82. struct scsi_cmnd *cmd);
  83. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  84. static int arcmsr_abort(struct scsi_cmnd *);
  85. static int arcmsr_bus_reset(struct scsi_cmnd *);
  86. static int arcmsr_bios_param(struct scsi_device *sdev,
  87. struct block_device *bdev, sector_t capacity, int *info);
  88. static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  89. static int arcmsr_probe(struct pci_dev *pdev,
  90. const struct pci_device_id *id);
  91. static void arcmsr_remove(struct pci_dev *pdev);
  92. static void arcmsr_shutdown(struct pci_dev *pdev);
  93. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  94. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  95. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  96. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  97. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  98. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  99. static void arcmsr_request_device_map(unsigned long pacb);
  100. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
  101. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
  102. static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
  103. static void arcmsr_message_isr_bh_fn(struct work_struct *work);
  104. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
  105. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
  106. static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
  107. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
  108. static const char *arcmsr_info(struct Scsi_Host *);
  109. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  110. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  111. int queue_depth, int reason)
  112. {
  113. if (reason != SCSI_QDEPTH_DEFAULT)
  114. return -EOPNOTSUPP;
  115. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  116. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  117. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  118. return queue_depth;
  119. }
  120. static struct scsi_host_template arcmsr_scsi_host_template = {
  121. .module = THIS_MODULE,
  122. .name = "ARCMSR ARECA SATA/SAS RAID Controller"
  123. ARCMSR_DRIVER_VERSION,
  124. .info = arcmsr_info,
  125. .queuecommand = arcmsr_queue_command,
  126. .eh_abort_handler = arcmsr_abort,
  127. .eh_bus_reset_handler = arcmsr_bus_reset,
  128. .bios_param = arcmsr_bios_param,
  129. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  130. .can_queue = ARCMSR_MAX_FREECCB_NUM,
  131. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  132. .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
  133. .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
  134. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  135. .use_clustering = ENABLE_CLUSTERING,
  136. .shost_attrs = arcmsr_host_attrs,
  137. };
  138. static struct pci_device_id arcmsr_device_id_table[] = {
  139. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  153. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  154. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  155. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  156. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  157. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
  158. {0, 0}, /* Terminating entry */
  159. };
  160. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  161. static struct pci_driver arcmsr_pci_driver = {
  162. .name = "arcmsr",
  163. .id_table = arcmsr_device_id_table,
  164. .probe = arcmsr_probe,
  165. .remove = arcmsr_remove,
  166. .shutdown = arcmsr_shutdown,
  167. };
  168. /*
  169. ****************************************************************************
  170. ****************************************************************************
  171. */
  172. static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
  173. {
  174. switch (acb->adapter_type) {
  175. case ACB_ADAPTER_TYPE_A:
  176. case ACB_ADAPTER_TYPE_C:
  177. break;
  178. case ACB_ADAPTER_TYPE_B:{
  179. dma_free_coherent(&acb->pdev->dev,
  180. sizeof(struct MessageUnit_B),
  181. acb->pmuB, acb->dma_coherent_handle_hbb_mu);
  182. }
  183. }
  184. }
  185. static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
  186. {
  187. struct pci_dev *pdev = acb->pdev;
  188. switch (acb->adapter_type){
  189. case ACB_ADAPTER_TYPE_A:{
  190. acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
  191. if (!acb->pmuA) {
  192. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  193. return false;
  194. }
  195. break;
  196. }
  197. case ACB_ADAPTER_TYPE_B:{
  198. void __iomem *mem_base0, *mem_base1;
  199. mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  200. if (!mem_base0) {
  201. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  202. return false;
  203. }
  204. mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
  205. if (!mem_base1) {
  206. iounmap(mem_base0);
  207. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  208. return false;
  209. }
  210. acb->mem_base0 = mem_base0;
  211. acb->mem_base1 = mem_base1;
  212. break;
  213. }
  214. case ACB_ADAPTER_TYPE_C:{
  215. acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
  216. if (!acb->pmuC) {
  217. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  218. return false;
  219. }
  220. if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  221. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
  222. return true;
  223. }
  224. break;
  225. }
  226. }
  227. return true;
  228. }
  229. static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
  230. {
  231. switch (acb->adapter_type) {
  232. case ACB_ADAPTER_TYPE_A:{
  233. iounmap(acb->pmuA);
  234. }
  235. break;
  236. case ACB_ADAPTER_TYPE_B:{
  237. iounmap(acb->mem_base0);
  238. iounmap(acb->mem_base1);
  239. }
  240. break;
  241. case ACB_ADAPTER_TYPE_C:{
  242. iounmap(acb->pmuC);
  243. }
  244. }
  245. }
  246. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  247. {
  248. irqreturn_t handle_state;
  249. struct AdapterControlBlock *acb = dev_id;
  250. handle_state = arcmsr_interrupt(acb);
  251. return handle_state;
  252. }
  253. static int arcmsr_bios_param(struct scsi_device *sdev,
  254. struct block_device *bdev, sector_t capacity, int *geom)
  255. {
  256. int ret, heads, sectors, cylinders, total_capacity;
  257. unsigned char *buffer;/* return copy of block device's partition table */
  258. buffer = scsi_bios_ptable(bdev);
  259. if (buffer) {
  260. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  261. kfree(buffer);
  262. if (ret != -1)
  263. return ret;
  264. }
  265. total_capacity = capacity;
  266. heads = 64;
  267. sectors = 32;
  268. cylinders = total_capacity / (heads * sectors);
  269. if (cylinders > 1024) {
  270. heads = 255;
  271. sectors = 63;
  272. cylinders = total_capacity / (heads * sectors);
  273. }
  274. geom[0] = heads;
  275. geom[1] = sectors;
  276. geom[2] = cylinders;
  277. return 0;
  278. }
  279. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  280. {
  281. struct pci_dev *pdev = acb->pdev;
  282. u16 dev_id;
  283. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  284. acb->dev_id = dev_id;
  285. switch (dev_id) {
  286. case 0x1880: {
  287. acb->adapter_type = ACB_ADAPTER_TYPE_C;
  288. }
  289. break;
  290. case 0x1201: {
  291. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  292. }
  293. break;
  294. default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
  295. }
  296. }
  297. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  298. {
  299. struct MessageUnit_A __iomem *reg = acb->pmuA;
  300. int i;
  301. for (i = 0; i < 2000; i++) {
  302. if (readl(&reg->outbound_intstatus) &
  303. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  304. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  305. &reg->outbound_intstatus);
  306. return true;
  307. }
  308. msleep(10);
  309. } /* max 20 seconds */
  310. return false;
  311. }
  312. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  313. {
  314. struct MessageUnit_B *reg = acb->pmuB;
  315. int i;
  316. for (i = 0; i < 2000; i++) {
  317. if (readl(reg->iop2drv_doorbell)
  318. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  319. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
  320. reg->iop2drv_doorbell);
  321. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
  322. reg->drv2iop_doorbell);
  323. return true;
  324. }
  325. msleep(10);
  326. } /* max 20 seconds */
  327. return false;
  328. }
  329. static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
  330. {
  331. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  332. int i;
  333. for (i = 0; i < 2000; i++) {
  334. if (readl(&phbcmu->outbound_doorbell)
  335. & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  336. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
  337. &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
  338. return true;
  339. }
  340. msleep(10);
  341. } /* max 20 seconds */
  342. return false;
  343. }
  344. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  345. {
  346. struct MessageUnit_A __iomem *reg = acb->pmuA;
  347. int retry_count = 30;
  348. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  349. do {
  350. if (arcmsr_hba_wait_msgint_ready(acb))
  351. break;
  352. else {
  353. retry_count--;
  354. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  355. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  356. }
  357. } while (retry_count != 0);
  358. }
  359. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  360. {
  361. struct MessageUnit_B *reg = acb->pmuB;
  362. int retry_count = 30;
  363. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
  364. do {
  365. if (arcmsr_hbb_wait_msgint_ready(acb))
  366. break;
  367. else {
  368. retry_count--;
  369. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  370. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  371. }
  372. } while (retry_count != 0);
  373. }
  374. static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
  375. {
  376. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  377. int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
  378. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  379. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  380. do {
  381. if (arcmsr_hbc_wait_msgint_ready(pACB)) {
  382. break;
  383. } else {
  384. retry_count--;
  385. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  386. timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
  387. }
  388. } while (retry_count != 0);
  389. return;
  390. }
  391. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  392. {
  393. switch (acb->adapter_type) {
  394. case ACB_ADAPTER_TYPE_A: {
  395. arcmsr_flush_hba_cache(acb);
  396. }
  397. break;
  398. case ACB_ADAPTER_TYPE_B: {
  399. arcmsr_flush_hbb_cache(acb);
  400. }
  401. break;
  402. case ACB_ADAPTER_TYPE_C: {
  403. arcmsr_flush_hbc_cache(acb);
  404. }
  405. }
  406. }
  407. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  408. {
  409. struct pci_dev *pdev = acb->pdev;
  410. void *dma_coherent;
  411. dma_addr_t dma_coherent_handle;
  412. struct CommandControlBlock *ccb_tmp;
  413. int i = 0, j = 0;
  414. dma_addr_t cdb_phyaddr;
  415. unsigned long roundup_ccbsize;
  416. unsigned long max_xfer_len;
  417. unsigned long max_sg_entrys;
  418. uint32_t firm_config_version;
  419. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  420. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  421. acb->devstate[i][j] = ARECA_RAID_GONE;
  422. max_xfer_len = ARCMSR_MAX_XFER_LEN;
  423. max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
  424. firm_config_version = acb->firm_cfg_version;
  425. if((firm_config_version & 0xFF) >= 3){
  426. max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
  427. max_sg_entrys = (max_xfer_len/4096);
  428. }
  429. acb->host->max_sectors = max_xfer_len/512;
  430. acb->host->sg_tablesize = max_sg_entrys;
  431. roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
  432. acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
  433. dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
  434. if(!dma_coherent){
  435. printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
  436. return -ENOMEM;
  437. }
  438. acb->dma_coherent = dma_coherent;
  439. acb->dma_coherent_handle = dma_coherent_handle;
  440. memset(dma_coherent, 0, acb->uncache_size);
  441. ccb_tmp = dma_coherent;
  442. acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
  443. for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
  444. cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
  445. ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
  446. acb->pccb_pool[i] = ccb_tmp;
  447. ccb_tmp->acb = acb;
  448. INIT_LIST_HEAD(&ccb_tmp->list);
  449. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  450. ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
  451. dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
  452. }
  453. return 0;
  454. }
  455. static void arcmsr_message_isr_bh_fn(struct work_struct *work)
  456. {
  457. struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
  458. switch (acb->adapter_type) {
  459. case ACB_ADAPTER_TYPE_A: {
  460. struct MessageUnit_A __iomem *reg = acb->pmuA;
  461. char *acb_dev_map = (char *)acb->device_map;
  462. uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
  463. char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
  464. int target, lun;
  465. struct scsi_device *psdev;
  466. char diff;
  467. atomic_inc(&acb->rq_map_token);
  468. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  469. for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
  470. diff = (*acb_dev_map)^readb(devicemap);
  471. if (diff != 0) {
  472. char temp;
  473. *acb_dev_map = readb(devicemap);
  474. temp =*acb_dev_map;
  475. for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  476. if((temp & 0x01)==1 && (diff & 0x01) == 1) {
  477. scsi_add_device(acb->host, 0, target, lun);
  478. }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  479. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  480. if (psdev != NULL ) {
  481. scsi_remove_device(psdev);
  482. scsi_device_put(psdev);
  483. }
  484. }
  485. temp >>= 1;
  486. diff >>= 1;
  487. }
  488. }
  489. devicemap++;
  490. acb_dev_map++;
  491. }
  492. }
  493. break;
  494. }
  495. case ACB_ADAPTER_TYPE_B: {
  496. struct MessageUnit_B *reg = acb->pmuB;
  497. char *acb_dev_map = (char *)acb->device_map;
  498. uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
  499. char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
  500. int target, lun;
  501. struct scsi_device *psdev;
  502. char diff;
  503. atomic_inc(&acb->rq_map_token);
  504. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  505. for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
  506. diff = (*acb_dev_map)^readb(devicemap);
  507. if (diff != 0) {
  508. char temp;
  509. *acb_dev_map = readb(devicemap);
  510. temp =*acb_dev_map;
  511. for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  512. if((temp & 0x01)==1 && (diff & 0x01) == 1) {
  513. scsi_add_device(acb->host, 0, target, lun);
  514. }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  515. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  516. if (psdev != NULL ) {
  517. scsi_remove_device(psdev);
  518. scsi_device_put(psdev);
  519. }
  520. }
  521. temp >>= 1;
  522. diff >>= 1;
  523. }
  524. }
  525. devicemap++;
  526. acb_dev_map++;
  527. }
  528. }
  529. }
  530. break;
  531. case ACB_ADAPTER_TYPE_C: {
  532. struct MessageUnit_C *reg = acb->pmuC;
  533. char *acb_dev_map = (char *)acb->device_map;
  534. uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
  535. char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
  536. int target, lun;
  537. struct scsi_device *psdev;
  538. char diff;
  539. atomic_inc(&acb->rq_map_token);
  540. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  541. for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
  542. diff = (*acb_dev_map)^readb(devicemap);
  543. if (diff != 0) {
  544. char temp;
  545. *acb_dev_map = readb(devicemap);
  546. temp = *acb_dev_map;
  547. for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  548. if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
  549. scsi_add_device(acb->host, 0, target, lun);
  550. } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  551. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  552. if (psdev != NULL) {
  553. scsi_remove_device(psdev);
  554. scsi_device_put(psdev);
  555. }
  556. }
  557. temp >>= 1;
  558. diff >>= 1;
  559. }
  560. }
  561. devicemap++;
  562. acb_dev_map++;
  563. }
  564. }
  565. }
  566. }
  567. }
  568. static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  569. {
  570. struct Scsi_Host *host;
  571. struct AdapterControlBlock *acb;
  572. uint8_t bus,dev_fun;
  573. int error;
  574. error = pci_enable_device(pdev);
  575. if(error){
  576. return -ENODEV;
  577. }
  578. host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
  579. if(!host){
  580. goto pci_disable_dev;
  581. }
  582. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  583. if(error){
  584. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  585. if(error){
  586. printk(KERN_WARNING
  587. "scsi%d: No suitable DMA mask available\n",
  588. host->host_no);
  589. goto scsi_host_release;
  590. }
  591. }
  592. init_waitqueue_head(&wait_q);
  593. bus = pdev->bus->number;
  594. dev_fun = pdev->devfn;
  595. acb = (struct AdapterControlBlock *) host->hostdata;
  596. memset(acb,0,sizeof(struct AdapterControlBlock));
  597. acb->pdev = pdev;
  598. acb->host = host;
  599. host->max_lun = ARCMSR_MAX_TARGETLUN;
  600. host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
  601. host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
  602. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  603. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  604. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  605. host->unique_id = (bus << 8) | dev_fun;
  606. pci_set_drvdata(pdev, host);
  607. pci_set_master(pdev);
  608. error = pci_request_regions(pdev, "arcmsr");
  609. if(error){
  610. goto scsi_host_release;
  611. }
  612. spin_lock_init(&acb->eh_lock);
  613. spin_lock_init(&acb->ccblist_lock);
  614. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  615. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  616. ACB_F_MESSAGE_WQBUFFER_READED);
  617. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  618. INIT_LIST_HEAD(&acb->ccb_free_list);
  619. arcmsr_define_adapter_type(acb);
  620. error = arcmsr_remap_pciregion(acb);
  621. if(!error){
  622. goto pci_release_regs;
  623. }
  624. error = arcmsr_get_firmware_spec(acb);
  625. if(!error){
  626. goto unmap_pci_region;
  627. }
  628. error = arcmsr_alloc_ccb_pool(acb);
  629. if(error){
  630. goto free_hbb_mu;
  631. }
  632. arcmsr_iop_init(acb);
  633. error = scsi_add_host(host, &pdev->dev);
  634. if(error){
  635. goto RAID_controller_stop;
  636. }
  637. error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
  638. if(error){
  639. goto scsi_host_remove;
  640. }
  641. host->irq = pdev->irq;
  642. scsi_scan_host(host);
  643. INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
  644. atomic_set(&acb->rq_map_token, 16);
  645. atomic_set(&acb->ante_token_value, 16);
  646. acb->fw_flag = FW_NORMAL;
  647. init_timer(&acb->eternal_timer);
  648. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
  649. acb->eternal_timer.data = (unsigned long) acb;
  650. acb->eternal_timer.function = &arcmsr_request_device_map;
  651. add_timer(&acb->eternal_timer);
  652. if(arcmsr_alloc_sysfs_attr(acb))
  653. goto out_free_sysfs;
  654. return 0;
  655. out_free_sysfs:
  656. scsi_host_remove:
  657. scsi_remove_host(host);
  658. RAID_controller_stop:
  659. arcmsr_stop_adapter_bgrb(acb);
  660. arcmsr_flush_adapter_cache(acb);
  661. arcmsr_free_ccb_pool(acb);
  662. free_hbb_mu:
  663. arcmsr_free_hbb_mu(acb);
  664. unmap_pci_region:
  665. arcmsr_unmap_pciregion(acb);
  666. pci_release_regs:
  667. pci_release_regions(pdev);
  668. scsi_host_release:
  669. scsi_host_put(host);
  670. pci_disable_dev:
  671. pci_disable_device(pdev);
  672. return -ENODEV;
  673. }
  674. static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  675. {
  676. struct MessageUnit_A __iomem *reg = acb->pmuA;
  677. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  678. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  679. printk(KERN_NOTICE
  680. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  681. , acb->host->host_no);
  682. return false;
  683. }
  684. return true;
  685. }
  686. static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  687. {
  688. struct MessageUnit_B *reg = acb->pmuB;
  689. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
  690. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  691. printk(KERN_NOTICE
  692. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  693. , acb->host->host_no);
  694. return false;
  695. }
  696. return true;
  697. }
  698. static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
  699. {
  700. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  701. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  702. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  703. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  704. printk(KERN_NOTICE
  705. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  706. , pACB->host->host_no);
  707. return false;
  708. }
  709. return true;
  710. }
  711. static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  712. {
  713. uint8_t rtnval = 0;
  714. switch (acb->adapter_type) {
  715. case ACB_ADAPTER_TYPE_A: {
  716. rtnval = arcmsr_abort_hba_allcmd(acb);
  717. }
  718. break;
  719. case ACB_ADAPTER_TYPE_B: {
  720. rtnval = arcmsr_abort_hbb_allcmd(acb);
  721. }
  722. break;
  723. case ACB_ADAPTER_TYPE_C: {
  724. rtnval = arcmsr_abort_hbc_allcmd(acb);
  725. }
  726. }
  727. return rtnval;
  728. }
  729. static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
  730. {
  731. struct MessageUnit_B *reg = pacb->pmuB;
  732. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
  733. if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
  734. printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
  735. return false;
  736. }
  737. return true;
  738. }
  739. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  740. {
  741. struct scsi_cmnd *pcmd = ccb->pcmd;
  742. scsi_dma_unmap(pcmd);
  743. }
  744. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
  745. {
  746. struct AdapterControlBlock *acb = ccb->acb;
  747. struct scsi_cmnd *pcmd = ccb->pcmd;
  748. unsigned long flags;
  749. atomic_dec(&acb->ccboutstandingcount);
  750. arcmsr_pci_unmap_dma(ccb);
  751. ccb->startdone = ARCMSR_CCB_DONE;
  752. spin_lock_irqsave(&acb->ccblist_lock, flags);
  753. list_add_tail(&ccb->list, &acb->ccb_free_list);
  754. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  755. pcmd->scsi_done(pcmd);
  756. }
  757. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  758. {
  759. struct scsi_cmnd *pcmd = ccb->pcmd;
  760. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  761. pcmd->result = DID_OK << 16;
  762. if (sensebuffer) {
  763. int sense_data_length =
  764. sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
  765. ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
  766. memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
  767. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  768. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  769. sensebuffer->Valid = 1;
  770. }
  771. }
  772. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  773. {
  774. u32 orig_mask = 0;
  775. switch (acb->adapter_type) {
  776. case ACB_ADAPTER_TYPE_A : {
  777. struct MessageUnit_A __iomem *reg = acb->pmuA;
  778. orig_mask = readl(&reg->outbound_intmask);
  779. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  780. &reg->outbound_intmask);
  781. }
  782. break;
  783. case ACB_ADAPTER_TYPE_B : {
  784. struct MessageUnit_B *reg = acb->pmuB;
  785. orig_mask = readl(reg->iop2drv_doorbell_mask);
  786. writel(0, reg->iop2drv_doorbell_mask);
  787. }
  788. break;
  789. case ACB_ADAPTER_TYPE_C:{
  790. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  791. /* disable all outbound interrupt */
  792. orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
  793. writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  794. }
  795. break;
  796. }
  797. return orig_mask;
  798. }
  799. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
  800. struct CommandControlBlock *ccb, bool error)
  801. {
  802. uint8_t id, lun;
  803. id = ccb->pcmd->device->id;
  804. lun = ccb->pcmd->device->lun;
  805. if (!error) {
  806. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  807. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  808. ccb->pcmd->result = DID_OK << 16;
  809. arcmsr_ccb_complete(ccb);
  810. }else{
  811. switch (ccb->arcmsr_cdb.DeviceStatus) {
  812. case ARCMSR_DEV_SELECT_TIMEOUT: {
  813. acb->devstate[id][lun] = ARECA_RAID_GONE;
  814. ccb->pcmd->result = DID_NO_CONNECT << 16;
  815. arcmsr_ccb_complete(ccb);
  816. }
  817. break;
  818. case ARCMSR_DEV_ABORTED:
  819. case ARCMSR_DEV_INIT_FAIL: {
  820. acb->devstate[id][lun] = ARECA_RAID_GONE;
  821. ccb->pcmd->result = DID_BAD_TARGET << 16;
  822. arcmsr_ccb_complete(ccb);
  823. }
  824. break;
  825. case ARCMSR_DEV_CHECK_CONDITION: {
  826. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  827. arcmsr_report_sense_info(ccb);
  828. arcmsr_ccb_complete(ccb);
  829. }
  830. break;
  831. default:
  832. printk(KERN_NOTICE
  833. "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
  834. but got unknown DeviceStatus = 0x%x \n"
  835. , acb->host->host_no
  836. , id
  837. , lun
  838. , ccb->arcmsr_cdb.DeviceStatus);
  839. acb->devstate[id][lun] = ARECA_RAID_GONE;
  840. ccb->pcmd->result = DID_NO_CONNECT << 16;
  841. arcmsr_ccb_complete(ccb);
  842. break;
  843. }
  844. }
  845. }
  846. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
  847. {
  848. int id, lun;
  849. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  850. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  851. struct scsi_cmnd *abortcmd = pCCB->pcmd;
  852. if (abortcmd) {
  853. id = abortcmd->device->id;
  854. lun = abortcmd->device->lun;
  855. abortcmd->result |= DID_ABORT << 16;
  856. arcmsr_ccb_complete(pCCB);
  857. printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
  858. acb->host->host_no, pCCB);
  859. }
  860. return;
  861. }
  862. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  863. done acb = '0x%p'"
  864. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  865. " ccboutstandingcount = %d \n"
  866. , acb->host->host_no
  867. , acb
  868. , pCCB
  869. , pCCB->acb
  870. , pCCB->startdone
  871. , atomic_read(&acb->ccboutstandingcount));
  872. return;
  873. }
  874. arcmsr_report_ccb_state(acb, pCCB, error);
  875. }
  876. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  877. {
  878. int i = 0;
  879. uint32_t flag_ccb;
  880. struct ARCMSR_CDB *pARCMSR_CDB;
  881. bool error;
  882. struct CommandControlBlock *pCCB;
  883. switch (acb->adapter_type) {
  884. case ACB_ADAPTER_TYPE_A: {
  885. struct MessageUnit_A __iomem *reg = acb->pmuA;
  886. uint32_t outbound_intstatus;
  887. outbound_intstatus = readl(&reg->outbound_intstatus) &
  888. acb->outbound_int_enable;
  889. /*clear and abort all outbound posted Q*/
  890. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  891. while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  892. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  893. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  894. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  895. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  896. arcmsr_drain_donequeue(acb, pCCB, error);
  897. }
  898. }
  899. break;
  900. case ACB_ADAPTER_TYPE_B: {
  901. struct MessageUnit_B *reg = acb->pmuB;
  902. /*clear all outbound posted Q*/
  903. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
  904. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  905. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  906. writel(0, &reg->done_qbuffer[i]);
  907. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  908. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  909. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  910. arcmsr_drain_donequeue(acb, pCCB, error);
  911. }
  912. reg->post_qbuffer[i] = 0;
  913. }
  914. reg->doneq_index = 0;
  915. reg->postq_index = 0;
  916. }
  917. break;
  918. case ACB_ADAPTER_TYPE_C: {
  919. struct MessageUnit_C *reg = acb->pmuC;
  920. struct ARCMSR_CDB *pARCMSR_CDB;
  921. uint32_t flag_ccb, ccb_cdb_phy;
  922. bool error;
  923. struct CommandControlBlock *pCCB;
  924. while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  925. /*need to do*/
  926. flag_ccb = readl(&reg->outbound_queueport_low);
  927. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  928. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  929. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  930. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  931. arcmsr_drain_donequeue(acb, pCCB, error);
  932. }
  933. }
  934. }
  935. }
  936. static void arcmsr_remove(struct pci_dev *pdev)
  937. {
  938. struct Scsi_Host *host = pci_get_drvdata(pdev);
  939. struct AdapterControlBlock *acb =
  940. (struct AdapterControlBlock *) host->hostdata;
  941. int poll_count = 0;
  942. arcmsr_free_sysfs_attr(acb);
  943. scsi_remove_host(host);
  944. flush_work_sync(&acb->arcmsr_do_message_isr_bh);
  945. del_timer_sync(&acb->eternal_timer);
  946. arcmsr_disable_outbound_ints(acb);
  947. arcmsr_stop_adapter_bgrb(acb);
  948. arcmsr_flush_adapter_cache(acb);
  949. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  950. acb->acb_flags &= ~ACB_F_IOP_INITED;
  951. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
  952. if (!atomic_read(&acb->ccboutstandingcount))
  953. break;
  954. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  955. msleep(25);
  956. }
  957. if (atomic_read(&acb->ccboutstandingcount)) {
  958. int i;
  959. arcmsr_abort_allcmd(acb);
  960. arcmsr_done4abort_postqueue(acb);
  961. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  962. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  963. if (ccb->startdone == ARCMSR_CCB_START) {
  964. ccb->startdone = ARCMSR_CCB_ABORTED;
  965. ccb->pcmd->result = DID_ABORT << 16;
  966. arcmsr_ccb_complete(ccb);
  967. }
  968. }
  969. }
  970. free_irq(pdev->irq, acb);
  971. arcmsr_free_ccb_pool(acb);
  972. arcmsr_free_hbb_mu(acb);
  973. arcmsr_unmap_pciregion(acb);
  974. pci_release_regions(pdev);
  975. scsi_host_put(host);
  976. pci_disable_device(pdev);
  977. pci_set_drvdata(pdev, NULL);
  978. }
  979. static void arcmsr_shutdown(struct pci_dev *pdev)
  980. {
  981. struct Scsi_Host *host = pci_get_drvdata(pdev);
  982. struct AdapterControlBlock *acb =
  983. (struct AdapterControlBlock *)host->hostdata;
  984. del_timer_sync(&acb->eternal_timer);
  985. arcmsr_disable_outbound_ints(acb);
  986. flush_work_sync(&acb->arcmsr_do_message_isr_bh);
  987. arcmsr_stop_adapter_bgrb(acb);
  988. arcmsr_flush_adapter_cache(acb);
  989. }
  990. static int arcmsr_module_init(void)
  991. {
  992. int error = 0;
  993. error = pci_register_driver(&arcmsr_pci_driver);
  994. return error;
  995. }
  996. static void arcmsr_module_exit(void)
  997. {
  998. pci_unregister_driver(&arcmsr_pci_driver);
  999. }
  1000. module_init(arcmsr_module_init);
  1001. module_exit(arcmsr_module_exit);
  1002. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
  1003. u32 intmask_org)
  1004. {
  1005. u32 mask;
  1006. switch (acb->adapter_type) {
  1007. case ACB_ADAPTER_TYPE_A: {
  1008. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1009. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  1010. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
  1011. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
  1012. writel(mask, &reg->outbound_intmask);
  1013. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  1014. }
  1015. break;
  1016. case ACB_ADAPTER_TYPE_B: {
  1017. struct MessageUnit_B *reg = acb->pmuB;
  1018. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
  1019. ARCMSR_IOP2DRV_DATA_READ_OK |
  1020. ARCMSR_IOP2DRV_CDB_DONE |
  1021. ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  1022. writel(mask, reg->iop2drv_doorbell_mask);
  1023. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  1024. }
  1025. break;
  1026. case ACB_ADAPTER_TYPE_C: {
  1027. struct MessageUnit_C *reg = acb->pmuC;
  1028. mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
  1029. writel(intmask_org & mask, &reg->host_int_mask);
  1030. acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
  1031. }
  1032. }
  1033. }
  1034. static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
  1035. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  1036. {
  1037. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1038. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  1039. __le32 address_lo, address_hi;
  1040. int arccdbsize = 0x30;
  1041. __le32 length = 0;
  1042. int i;
  1043. struct scatterlist *sg;
  1044. int nseg;
  1045. ccb->pcmd = pcmd;
  1046. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  1047. arcmsr_cdb->TargetID = pcmd->device->id;
  1048. arcmsr_cdb->LUN = pcmd->device->lun;
  1049. arcmsr_cdb->Function = 1;
  1050. arcmsr_cdb->Context = 0;
  1051. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  1052. nseg = scsi_dma_map(pcmd);
  1053. if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
  1054. return FAILED;
  1055. scsi_for_each_sg(pcmd, sg, nseg, i) {
  1056. /* Get the physical address of the current data pointer */
  1057. length = cpu_to_le32(sg_dma_len(sg));
  1058. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  1059. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  1060. if (address_hi == 0) {
  1061. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  1062. pdma_sg->address = address_lo;
  1063. pdma_sg->length = length;
  1064. psge += sizeof (struct SG32ENTRY);
  1065. arccdbsize += sizeof (struct SG32ENTRY);
  1066. } else {
  1067. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  1068. pdma_sg->addresshigh = address_hi;
  1069. pdma_sg->address = address_lo;
  1070. pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
  1071. psge += sizeof (struct SG64ENTRY);
  1072. arccdbsize += sizeof (struct SG64ENTRY);
  1073. }
  1074. }
  1075. arcmsr_cdb->sgcount = (uint8_t)nseg;
  1076. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  1077. arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
  1078. if ( arccdbsize > 256)
  1079. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  1080. if (pcmd->sc_data_direction == DMA_TO_DEVICE)
  1081. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  1082. ccb->arc_cdb_size = arccdbsize;
  1083. return SUCCESS;
  1084. }
  1085. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  1086. {
  1087. uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
  1088. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1089. atomic_inc(&acb->ccboutstandingcount);
  1090. ccb->startdone = ARCMSR_CCB_START;
  1091. switch (acb->adapter_type) {
  1092. case ACB_ADAPTER_TYPE_A: {
  1093. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1094. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  1095. writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  1096. &reg->inbound_queueport);
  1097. else {
  1098. writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
  1099. }
  1100. }
  1101. break;
  1102. case ACB_ADAPTER_TYPE_B: {
  1103. struct MessageUnit_B *reg = acb->pmuB;
  1104. uint32_t ending_index, index = reg->postq_index;
  1105. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  1106. writel(0, &reg->post_qbuffer[ending_index]);
  1107. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  1108. writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  1109. &reg->post_qbuffer[index]);
  1110. } else {
  1111. writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
  1112. }
  1113. index++;
  1114. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  1115. reg->postq_index = index;
  1116. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
  1117. }
  1118. break;
  1119. case ACB_ADAPTER_TYPE_C: {
  1120. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1121. uint32_t ccb_post_stamp, arc_cdb_size;
  1122. arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
  1123. ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
  1124. if (acb->cdb_phyaddr_hi32) {
  1125. writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
  1126. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1127. } else {
  1128. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1129. }
  1130. }
  1131. }
  1132. }
  1133. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  1134. {
  1135. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1136. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1137. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1138. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  1139. printk(KERN_NOTICE
  1140. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1141. , acb->host->host_no);
  1142. }
  1143. }
  1144. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  1145. {
  1146. struct MessageUnit_B *reg = acb->pmuB;
  1147. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1148. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
  1149. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  1150. printk(KERN_NOTICE
  1151. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1152. , acb->host->host_no);
  1153. }
  1154. }
  1155. static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
  1156. {
  1157. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  1158. pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1159. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1160. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  1161. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  1162. printk(KERN_NOTICE
  1163. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1164. , pACB->host->host_no);
  1165. }
  1166. return;
  1167. }
  1168. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  1169. {
  1170. switch (acb->adapter_type) {
  1171. case ACB_ADAPTER_TYPE_A: {
  1172. arcmsr_stop_hba_bgrb(acb);
  1173. }
  1174. break;
  1175. case ACB_ADAPTER_TYPE_B: {
  1176. arcmsr_stop_hbb_bgrb(acb);
  1177. }
  1178. break;
  1179. case ACB_ADAPTER_TYPE_C: {
  1180. arcmsr_stop_hbc_bgrb(acb);
  1181. }
  1182. }
  1183. }
  1184. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  1185. {
  1186. dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
  1187. }
  1188. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  1189. {
  1190. switch (acb->adapter_type) {
  1191. case ACB_ADAPTER_TYPE_A: {
  1192. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1193. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1194. }
  1195. break;
  1196. case ACB_ADAPTER_TYPE_B: {
  1197. struct MessageUnit_B *reg = acb->pmuB;
  1198. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  1199. }
  1200. break;
  1201. case ACB_ADAPTER_TYPE_C: {
  1202. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1203. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  1204. }
  1205. }
  1206. }
  1207. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  1208. {
  1209. switch (acb->adapter_type) {
  1210. case ACB_ADAPTER_TYPE_A: {
  1211. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1212. /*
  1213. ** push inbound doorbell tell iop, driver data write ok
  1214. ** and wait reply on next hwinterrupt for next Qbuffer post
  1215. */
  1216. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  1217. }
  1218. break;
  1219. case ACB_ADAPTER_TYPE_B: {
  1220. struct MessageUnit_B *reg = acb->pmuB;
  1221. /*
  1222. ** push inbound doorbell tell iop, driver data write ok
  1223. ** and wait reply on next hwinterrupt for next Qbuffer post
  1224. */
  1225. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
  1226. }
  1227. break;
  1228. case ACB_ADAPTER_TYPE_C: {
  1229. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1230. /*
  1231. ** push inbound doorbell tell iop, driver data write ok
  1232. ** and wait reply on next hwinterrupt for next Qbuffer post
  1233. */
  1234. writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
  1235. }
  1236. break;
  1237. }
  1238. }
  1239. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  1240. {
  1241. struct QBUFFER __iomem *qbuffer = NULL;
  1242. switch (acb->adapter_type) {
  1243. case ACB_ADAPTER_TYPE_A: {
  1244. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1245. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  1246. }
  1247. break;
  1248. case ACB_ADAPTER_TYPE_B: {
  1249. struct MessageUnit_B *reg = acb->pmuB;
  1250. qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
  1251. }
  1252. break;
  1253. case ACB_ADAPTER_TYPE_C: {
  1254. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1255. qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
  1256. }
  1257. }
  1258. return qbuffer;
  1259. }
  1260. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  1261. {
  1262. struct QBUFFER __iomem *pqbuffer = NULL;
  1263. switch (acb->adapter_type) {
  1264. case ACB_ADAPTER_TYPE_A: {
  1265. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1266. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  1267. }
  1268. break;
  1269. case ACB_ADAPTER_TYPE_B: {
  1270. struct MessageUnit_B *reg = acb->pmuB;
  1271. pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
  1272. }
  1273. break;
  1274. case ACB_ADAPTER_TYPE_C: {
  1275. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  1276. pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
  1277. }
  1278. }
  1279. return pqbuffer;
  1280. }
  1281. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  1282. {
  1283. struct QBUFFER __iomem *prbuffer;
  1284. struct QBUFFER *pQbuffer;
  1285. uint8_t __iomem *iop_data;
  1286. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  1287. rqbuf_lastindex = acb->rqbuf_lastindex;
  1288. rqbuf_firstindex = acb->rqbuf_firstindex;
  1289. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1290. iop_data = (uint8_t __iomem *)prbuffer->data;
  1291. iop_len = prbuffer->data_len;
  1292. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
  1293. if (my_empty_len >= iop_len)
  1294. {
  1295. while (iop_len > 0) {
  1296. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  1297. memcpy(pQbuffer, iop_data, 1);
  1298. rqbuf_lastindex++;
  1299. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1300. iop_data++;
  1301. iop_len--;
  1302. }
  1303. acb->rqbuf_lastindex = rqbuf_lastindex;
  1304. arcmsr_iop_message_read(acb);
  1305. }
  1306. else {
  1307. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1308. }
  1309. }
  1310. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1311. {
  1312. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1313. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1314. uint8_t *pQbuffer;
  1315. struct QBUFFER __iomem *pwbuffer;
  1316. uint8_t __iomem *iop_data;
  1317. int32_t allxfer_len = 0;
  1318. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1319. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1320. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1321. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1322. (allxfer_len < 124)) {
  1323. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1324. memcpy(iop_data, pQbuffer, 1);
  1325. acb->wqbuf_firstindex++;
  1326. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1327. iop_data++;
  1328. allxfer_len++;
  1329. }
  1330. pwbuffer->data_len = allxfer_len;
  1331. arcmsr_iop_message_wrote(acb);
  1332. }
  1333. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1334. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1335. }
  1336. }
  1337. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1338. {
  1339. uint32_t outbound_doorbell;
  1340. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1341. outbound_doorbell = readl(&reg->outbound_doorbell);
  1342. writel(outbound_doorbell, &reg->outbound_doorbell);
  1343. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1344. arcmsr_iop2drv_data_wrote_handle(acb);
  1345. }
  1346. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1347. arcmsr_iop2drv_data_read_handle(acb);
  1348. }
  1349. }
  1350. static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
  1351. {
  1352. uint32_t outbound_doorbell;
  1353. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  1354. /*
  1355. *******************************************************************
  1356. ** Maybe here we need to check wrqbuffer_lock is lock or not
  1357. ** DOORBELL: din! don!
  1358. ** check if there are any mail need to pack from firmware
  1359. *******************************************************************
  1360. */
  1361. outbound_doorbell = readl(&reg->outbound_doorbell);
  1362. writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
  1363. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
  1364. arcmsr_iop2drv_data_wrote_handle(pACB);
  1365. }
  1366. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
  1367. arcmsr_iop2drv_data_read_handle(pACB);
  1368. }
  1369. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  1370. arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */
  1371. }
  1372. return;
  1373. }
  1374. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1375. {
  1376. uint32_t flag_ccb;
  1377. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1378. struct ARCMSR_CDB *pARCMSR_CDB;
  1379. struct CommandControlBlock *pCCB;
  1380. bool error;
  1381. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1382. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1383. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1384. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1385. arcmsr_drain_donequeue(acb, pCCB, error);
  1386. }
  1387. }
  1388. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1389. {
  1390. uint32_t index;
  1391. uint32_t flag_ccb;
  1392. struct MessageUnit_B *reg = acb->pmuB;
  1393. struct ARCMSR_CDB *pARCMSR_CDB;
  1394. struct CommandControlBlock *pCCB;
  1395. bool error;
  1396. index = reg->doneq_index;
  1397. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1398. writel(0, &reg->done_qbuffer[index]);
  1399. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1400. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1401. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1402. arcmsr_drain_donequeue(acb, pCCB, error);
  1403. index++;
  1404. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1405. reg->doneq_index = index;
  1406. }
  1407. }
  1408. static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
  1409. {
  1410. struct MessageUnit_C *phbcmu;
  1411. struct ARCMSR_CDB *arcmsr_cdb;
  1412. struct CommandControlBlock *ccb;
  1413. uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
  1414. int error;
  1415. phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1416. /* areca cdb command done */
  1417. /* Use correct offset and size for syncing */
  1418. while (readl(&phbcmu->host_int_status) &
  1419. ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
  1420. /* check if command done with no error*/
  1421. flag_ccb = readl(&phbcmu->outbound_queueport_low);
  1422. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
  1423. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
  1424. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  1425. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  1426. /* check if command done with no error */
  1427. arcmsr_drain_donequeue(acb, ccb, error);
  1428. if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
  1429. writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
  1430. break;
  1431. }
  1432. throttling++;
  1433. }
  1434. }
  1435. /*
  1436. **********************************************************************************
  1437. ** Handle a message interrupt
  1438. **
  1439. ** The only message interrupt we expect is in response to a query for the current adapter config.
  1440. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1441. **********************************************************************************
  1442. */
  1443. static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
  1444. {
  1445. struct MessageUnit_A *reg = acb->pmuA;
  1446. /*clear interrupt and message state*/
  1447. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
  1448. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1449. }
  1450. static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
  1451. {
  1452. struct MessageUnit_B *reg = acb->pmuB;
  1453. /*clear interrupt and message state*/
  1454. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  1455. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1456. }
  1457. /*
  1458. **********************************************************************************
  1459. ** Handle a message interrupt
  1460. **
  1461. ** The only message interrupt we expect is in response to a query for the
  1462. ** current adapter config.
  1463. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1464. **********************************************************************************
  1465. */
  1466. static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
  1467. {
  1468. struct MessageUnit_C *reg = acb->pmuC;
  1469. /*clear interrupt and message state*/
  1470. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
  1471. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1472. }
  1473. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1474. {
  1475. uint32_t outbound_intstatus;
  1476. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1477. outbound_intstatus = readl(&reg->outbound_intstatus) &
  1478. acb->outbound_int_enable;
  1479. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1480. return 1;
  1481. }
  1482. writel(outbound_intstatus, &reg->outbound_intstatus);
  1483. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1484. arcmsr_hba_doorbell_isr(acb);
  1485. }
  1486. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1487. arcmsr_hba_postqueue_isr(acb);
  1488. }
  1489. if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  1490. /* messenger of "driver to iop commands" */
  1491. arcmsr_hba_message_isr(acb);
  1492. }
  1493. return 0;
  1494. }
  1495. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1496. {
  1497. uint32_t outbound_doorbell;
  1498. struct MessageUnit_B *reg = acb->pmuB;
  1499. outbound_doorbell = readl(reg->iop2drv_doorbell) &
  1500. acb->outbound_int_enable;
  1501. if (!outbound_doorbell)
  1502. return 1;
  1503. writel(~outbound_doorbell, reg->iop2drv_doorbell);
  1504. /*in case the last action of doorbell interrupt clearance is cached,
  1505. this action can push HW to write down the clear bit*/
  1506. readl(reg->iop2drv_doorbell);
  1507. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  1508. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1509. arcmsr_iop2drv_data_wrote_handle(acb);
  1510. }
  1511. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1512. arcmsr_iop2drv_data_read_handle(acb);
  1513. }
  1514. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1515. arcmsr_hbb_postqueue_isr(acb);
  1516. }
  1517. if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  1518. /* messenger of "driver to iop commands" */
  1519. arcmsr_hbb_message_isr(acb);
  1520. }
  1521. return 0;
  1522. }
  1523. static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
  1524. {
  1525. uint32_t host_interrupt_status;
  1526. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  1527. /*
  1528. *********************************************
  1529. ** check outbound intstatus
  1530. *********************************************
  1531. */
  1532. host_interrupt_status = readl(&phbcmu->host_int_status);
  1533. if (!host_interrupt_status) {
  1534. /*it must be share irq*/
  1535. return 1;
  1536. }
  1537. /* MU ioctl transfer doorbell interrupts*/
  1538. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
  1539. arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */
  1540. }
  1541. /* MU post queue interrupts*/
  1542. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
  1543. arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */
  1544. }
  1545. return 0;
  1546. }
  1547. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1548. {
  1549. switch (acb->adapter_type) {
  1550. case ACB_ADAPTER_TYPE_A: {
  1551. if (arcmsr_handle_hba_isr(acb)) {
  1552. return IRQ_NONE;
  1553. }
  1554. }
  1555. break;
  1556. case ACB_ADAPTER_TYPE_B: {
  1557. if (arcmsr_handle_hbb_isr(acb)) {
  1558. return IRQ_NONE;
  1559. }
  1560. }
  1561. break;
  1562. case ACB_ADAPTER_TYPE_C: {
  1563. if (arcmsr_handle_hbc_isr(acb)) {
  1564. return IRQ_NONE;
  1565. }
  1566. }
  1567. }
  1568. return IRQ_HANDLED;
  1569. }
  1570. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1571. {
  1572. if (acb) {
  1573. /* stop adapter background rebuild */
  1574. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1575. uint32_t intmask_org;
  1576. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1577. intmask_org = arcmsr_disable_outbound_ints(acb);
  1578. arcmsr_stop_adapter_bgrb(acb);
  1579. arcmsr_flush_adapter_cache(acb);
  1580. arcmsr_enable_outbound_ints(acb, intmask_org);
  1581. }
  1582. }
  1583. }
  1584. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1585. {
  1586. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1587. uint8_t *pQbuffer;
  1588. struct QBUFFER __iomem *pwbuffer;
  1589. uint8_t __iomem *iop_data;
  1590. int32_t allxfer_len = 0;
  1591. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1592. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1593. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1594. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1595. wqbuf_firstindex = acb->wqbuf_firstindex;
  1596. wqbuf_lastindex = acb->wqbuf_lastindex;
  1597. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1598. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1599. memcpy(iop_data, pQbuffer, 1);
  1600. wqbuf_firstindex++;
  1601. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1602. iop_data++;
  1603. allxfer_len++;
  1604. }
  1605. acb->wqbuf_firstindex = wqbuf_firstindex;
  1606. pwbuffer->data_len = allxfer_len;
  1607. arcmsr_iop_message_wrote(acb);
  1608. }
  1609. }
  1610. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  1611. struct scsi_cmnd *cmd)
  1612. {
  1613. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1614. int retvalue = 0, transfer_len = 0;
  1615. char *buffer;
  1616. struct scatterlist *sg;
  1617. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1618. (uint32_t ) cmd->cmnd[6] << 16 |
  1619. (uint32_t ) cmd->cmnd[7] << 8 |
  1620. (uint32_t ) cmd->cmnd[8];
  1621. /* 4 bytes: Areca io control code */
  1622. sg = scsi_sglist(cmd);
  1623. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1624. if (scsi_sg_count(cmd) > 1) {
  1625. retvalue = ARCMSR_MESSAGE_FAIL;
  1626. goto message_out;
  1627. }
  1628. transfer_len += sg->length;
  1629. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1630. retvalue = ARCMSR_MESSAGE_FAIL;
  1631. goto message_out;
  1632. }
  1633. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1634. switch(controlcode) {
  1635. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1636. unsigned char *ver_addr;
  1637. uint8_t *pQbuffer, *ptmpQbuffer;
  1638. int32_t allxfer_len = 0;
  1639. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1640. if (!ver_addr) {
  1641. retvalue = ARCMSR_MESSAGE_FAIL;
  1642. goto message_out;
  1643. }
  1644. ptmpQbuffer = ver_addr;
  1645. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1646. && (allxfer_len < 1031)) {
  1647. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1648. memcpy(ptmpQbuffer, pQbuffer, 1);
  1649. acb->rqbuf_firstindex++;
  1650. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1651. ptmpQbuffer++;
  1652. allxfer_len++;
  1653. }
  1654. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1655. struct QBUFFER __iomem *prbuffer;
  1656. uint8_t __iomem *iop_data;
  1657. int32_t iop_len;
  1658. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1659. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1660. iop_data = prbuffer->data;
  1661. iop_len = readl(&prbuffer->data_len);
  1662. while (iop_len > 0) {
  1663. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1664. acb->rqbuf_lastindex++;
  1665. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1666. iop_data++;
  1667. iop_len--;
  1668. }
  1669. arcmsr_iop_message_read(acb);
  1670. }
  1671. memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
  1672. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1673. if(acb->fw_flag == FW_DEADLOCK) {
  1674. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1675. }else{
  1676. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1677. }
  1678. kfree(ver_addr);
  1679. }
  1680. break;
  1681. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1682. unsigned char *ver_addr;
  1683. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1684. uint8_t *pQbuffer, *ptmpuserbuffer;
  1685. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1686. if (!ver_addr) {
  1687. retvalue = ARCMSR_MESSAGE_FAIL;
  1688. goto message_out;
  1689. }
  1690. if(acb->fw_flag == FW_DEADLOCK) {
  1691. pcmdmessagefld->cmdmessage.ReturnCode =
  1692. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1693. }else{
  1694. pcmdmessagefld->cmdmessage.ReturnCode =
  1695. ARCMSR_MESSAGE_RETURNCODE_OK;
  1696. }
  1697. ptmpuserbuffer = ver_addr;
  1698. user_len = pcmdmessagefld->cmdmessage.Length;
  1699. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1700. wqbuf_lastindex = acb->wqbuf_lastindex;
  1701. wqbuf_firstindex = acb->wqbuf_firstindex;
  1702. if (wqbuf_lastindex != wqbuf_firstindex) {
  1703. struct SENSE_DATA *sensebuffer =
  1704. (struct SENSE_DATA *)cmd->sense_buffer;
  1705. arcmsr_post_ioctldata2iop(acb);
  1706. /* has error report sensedata */
  1707. sensebuffer->ErrorCode = 0x70;
  1708. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1709. sensebuffer->AdditionalSenseLength = 0x0A;
  1710. sensebuffer->AdditionalSenseCode = 0x20;
  1711. sensebuffer->Valid = 1;
  1712. retvalue = ARCMSR_MESSAGE_FAIL;
  1713. } else {
  1714. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1715. &(ARCMSR_MAX_QBUFFER - 1);
  1716. if (my_empty_len >= user_len) {
  1717. while (user_len > 0) {
  1718. pQbuffer =
  1719. &acb->wqbuffer[acb->wqbuf_lastindex];
  1720. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1721. acb->wqbuf_lastindex++;
  1722. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1723. ptmpuserbuffer++;
  1724. user_len--;
  1725. }
  1726. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1727. acb->acb_flags &=
  1728. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1729. arcmsr_post_ioctldata2iop(acb);
  1730. }
  1731. } else {
  1732. /* has error report sensedata */
  1733. struct SENSE_DATA *sensebuffer =
  1734. (struct SENSE_DATA *)cmd->sense_buffer;
  1735. sensebuffer->ErrorCode = 0x70;
  1736. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1737. sensebuffer->AdditionalSenseLength = 0x0A;
  1738. sensebuffer->AdditionalSenseCode = 0x20;
  1739. sensebuffer->Valid = 1;
  1740. retvalue = ARCMSR_MESSAGE_FAIL;
  1741. }
  1742. }
  1743. kfree(ver_addr);
  1744. }
  1745. break;
  1746. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1747. uint8_t *pQbuffer = acb->rqbuffer;
  1748. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1749. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1750. arcmsr_iop_message_read(acb);
  1751. }
  1752. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1753. acb->rqbuf_firstindex = 0;
  1754. acb->rqbuf_lastindex = 0;
  1755. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1756. if(acb->fw_flag == FW_DEADLOCK) {
  1757. pcmdmessagefld->cmdmessage.ReturnCode =
  1758. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1759. }else{
  1760. pcmdmessagefld->cmdmessage.ReturnCode =
  1761. ARCMSR_MESSAGE_RETURNCODE_OK;
  1762. }
  1763. }
  1764. break;
  1765. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1766. uint8_t *pQbuffer = acb->wqbuffer;
  1767. if(acb->fw_flag == FW_DEADLOCK) {
  1768. pcmdmessagefld->cmdmessage.ReturnCode =
  1769. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1770. }else{
  1771. pcmdmessagefld->cmdmessage.ReturnCode =
  1772. ARCMSR_MESSAGE_RETURNCODE_OK;
  1773. }
  1774. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1775. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1776. arcmsr_iop_message_read(acb);
  1777. }
  1778. acb->acb_flags |=
  1779. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1780. ACB_F_MESSAGE_WQBUFFER_READED);
  1781. acb->wqbuf_firstindex = 0;
  1782. acb->wqbuf_lastindex = 0;
  1783. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1784. }
  1785. break;
  1786. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1787. uint8_t *pQbuffer;
  1788. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1789. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1790. arcmsr_iop_message_read(acb);
  1791. }
  1792. acb->acb_flags |=
  1793. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1794. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1795. | ACB_F_MESSAGE_WQBUFFER_READED);
  1796. acb->rqbuf_firstindex = 0;
  1797. acb->rqbuf_lastindex = 0;
  1798. acb->wqbuf_firstindex = 0;
  1799. acb->wqbuf_lastindex = 0;
  1800. pQbuffer = acb->rqbuffer;
  1801. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1802. pQbuffer = acb->wqbuffer;
  1803. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1804. if(acb->fw_flag == FW_DEADLOCK) {
  1805. pcmdmessagefld->cmdmessage.ReturnCode =
  1806. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1807. }else{
  1808. pcmdmessagefld->cmdmessage.ReturnCode =
  1809. ARCMSR_MESSAGE_RETURNCODE_OK;
  1810. }
  1811. }
  1812. break;
  1813. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1814. if(acb->fw_flag == FW_DEADLOCK) {
  1815. pcmdmessagefld->cmdmessage.ReturnCode =
  1816. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1817. }else{
  1818. pcmdmessagefld->cmdmessage.ReturnCode =
  1819. ARCMSR_MESSAGE_RETURNCODE_3F;
  1820. }
  1821. break;
  1822. }
  1823. case ARCMSR_MESSAGE_SAY_HELLO: {
  1824. int8_t *hello_string = "Hello! I am ARCMSR";
  1825. if(acb->fw_flag == FW_DEADLOCK) {
  1826. pcmdmessagefld->cmdmessage.ReturnCode =
  1827. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1828. }else{
  1829. pcmdmessagefld->cmdmessage.ReturnCode =
  1830. ARCMSR_MESSAGE_RETURNCODE_OK;
  1831. }
  1832. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1833. , (int16_t)strlen(hello_string));
  1834. }
  1835. break;
  1836. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1837. if(acb->fw_flag == FW_DEADLOCK) {
  1838. pcmdmessagefld->cmdmessage.ReturnCode =
  1839. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1840. }
  1841. arcmsr_iop_parking(acb);
  1842. break;
  1843. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1844. if(acb->fw_flag == FW_DEADLOCK) {
  1845. pcmdmessagefld->cmdmessage.ReturnCode =
  1846. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1847. }
  1848. arcmsr_flush_adapter_cache(acb);
  1849. break;
  1850. default:
  1851. retvalue = ARCMSR_MESSAGE_FAIL;
  1852. }
  1853. message_out:
  1854. sg = scsi_sglist(cmd);
  1855. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1856. return retvalue;
  1857. }
  1858. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1859. {
  1860. struct list_head *head = &acb->ccb_free_list;
  1861. struct CommandControlBlock *ccb = NULL;
  1862. unsigned long flags;
  1863. spin_lock_irqsave(&acb->ccblist_lock, flags);
  1864. if (!list_empty(head)) {
  1865. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1866. list_del_init(&ccb->list);
  1867. }else{
  1868. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  1869. return 0;
  1870. }
  1871. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  1872. return ccb;
  1873. }
  1874. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1875. struct scsi_cmnd *cmd)
  1876. {
  1877. switch (cmd->cmnd[0]) {
  1878. case INQUIRY: {
  1879. unsigned char inqdata[36];
  1880. char *buffer;
  1881. struct scatterlist *sg;
  1882. if (cmd->device->lun) {
  1883. cmd->result = (DID_TIME_OUT << 16);
  1884. cmd->scsi_done(cmd);
  1885. return;
  1886. }
  1887. inqdata[0] = TYPE_PROCESSOR;
  1888. /* Periph Qualifier & Periph Dev Type */
  1889. inqdata[1] = 0;
  1890. /* rem media bit & Dev Type Modifier */
  1891. inqdata[2] = 0;
  1892. /* ISO, ECMA, & ANSI versions */
  1893. inqdata[4] = 31;
  1894. /* length of additional data */
  1895. strncpy(&inqdata[8], "Areca ", 8);
  1896. /* Vendor Identification */
  1897. strncpy(&inqdata[16], "RAID controller ", 16);
  1898. /* Product Identification */
  1899. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1900. sg = scsi_sglist(cmd);
  1901. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1902. memcpy(buffer, inqdata, sizeof(inqdata));
  1903. sg = scsi_sglist(cmd);
  1904. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1905. cmd->scsi_done(cmd);
  1906. }
  1907. break;
  1908. case WRITE_BUFFER:
  1909. case READ_BUFFER: {
  1910. if (arcmsr_iop_message_xfer(acb, cmd))
  1911. cmd->result = (DID_ERROR << 16);
  1912. cmd->scsi_done(cmd);
  1913. }
  1914. break;
  1915. default:
  1916. cmd->scsi_done(cmd);
  1917. }
  1918. }
  1919. static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
  1920. void (* done)(struct scsi_cmnd *))
  1921. {
  1922. struct Scsi_Host *host = cmd->device->host;
  1923. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1924. struct CommandControlBlock *ccb;
  1925. int target = cmd->device->id;
  1926. int lun = cmd->device->lun;
  1927. uint8_t scsicmd = cmd->cmnd[0];
  1928. cmd->scsi_done = done;
  1929. cmd->host_scribble = NULL;
  1930. cmd->result = 0;
  1931. if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
  1932. if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1933. cmd->result = (DID_NO_CONNECT << 16);
  1934. }
  1935. cmd->scsi_done(cmd);
  1936. return 0;
  1937. }
  1938. if (target == 16) {
  1939. /* virtual device for iop message transfer */
  1940. arcmsr_handle_virtual_command(acb, cmd);
  1941. return 0;
  1942. }
  1943. if (atomic_read(&acb->ccboutstandingcount) >=
  1944. ARCMSR_MAX_OUTSTANDING_CMD)
  1945. return SCSI_MLQUEUE_HOST_BUSY;
  1946. ccb = arcmsr_get_freeccb(acb);
  1947. if (!ccb)
  1948. return SCSI_MLQUEUE_HOST_BUSY;
  1949. if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
  1950. cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
  1951. cmd->scsi_done(cmd);
  1952. return 0;
  1953. }
  1954. arcmsr_post_ccb(acb, ccb);
  1955. return 0;
  1956. }
  1957. static DEF_SCSI_QCMD(arcmsr_queue_command)
  1958. static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
  1959. {
  1960. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1961. char *acb_firm_model = acb->firm_model;
  1962. char *acb_firm_version = acb->firm_version;
  1963. char *acb_device_map = acb->device_map;
  1964. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  1965. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  1966. char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
  1967. int count;
  1968. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1969. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  1970. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1971. miscellaneous data' timeout \n", acb->host->host_no);
  1972. return false;
  1973. }
  1974. count = 8;
  1975. while (count){
  1976. *acb_firm_model = readb(iop_firm_model);
  1977. acb_firm_model++;
  1978. iop_firm_model++;
  1979. count--;
  1980. }
  1981. count = 16;
  1982. while (count){
  1983. *acb_firm_version = readb(iop_firm_version);
  1984. acb_firm_version++;
  1985. iop_firm_version++;
  1986. count--;
  1987. }
  1988. count=16;
  1989. while(count){
  1990. *acb_device_map = readb(iop_device_map);
  1991. acb_device_map++;
  1992. iop_device_map++;
  1993. count--;
  1994. }
  1995. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  1996. acb->host->host_no,
  1997. acb->firm_version,
  1998. acb->firm_model);
  1999. acb->signature = readl(&reg->message_rwbuffer[0]);
  2000. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  2001. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  2002. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  2003. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  2004. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2005. return true;
  2006. }
  2007. static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
  2008. {
  2009. struct MessageUnit_B *reg = acb->pmuB;
  2010. struct pci_dev *pdev = acb->pdev;
  2011. void *dma_coherent;
  2012. dma_addr_t dma_coherent_handle;
  2013. char *acb_firm_model = acb->firm_model;
  2014. char *acb_firm_version = acb->firm_version;
  2015. char *acb_device_map = acb->device_map;
  2016. char __iomem *iop_firm_model;
  2017. /*firm_model,15,60-67*/
  2018. char __iomem *iop_firm_version;
  2019. /*firm_version,17,68-83*/
  2020. char __iomem *iop_device_map;
  2021. /*firm_version,21,84-99*/
  2022. int count;
  2023. dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
  2024. if (!dma_coherent){
  2025. printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
  2026. return false;
  2027. }
  2028. acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
  2029. reg = (struct MessageUnit_B *)dma_coherent;
  2030. acb->pmuB = reg;
  2031. reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
  2032. reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
  2033. reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
  2034. reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
  2035. reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
  2036. reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
  2037. reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
  2038. iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
  2039. iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
  2040. iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
  2041. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  2042. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2043. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2044. miscellaneous data' timeout \n", acb->host->host_no);
  2045. return false;
  2046. }
  2047. count = 8;
  2048. while (count){
  2049. *acb_firm_model = readb(iop_firm_model);
  2050. acb_firm_model++;
  2051. iop_firm_model++;
  2052. count--;
  2053. }
  2054. count = 16;
  2055. while (count){
  2056. *acb_firm_version = readb(iop_firm_version);
  2057. acb_firm_version++;
  2058. iop_firm_version++;
  2059. count--;
  2060. }
  2061. count = 16;
  2062. while(count){
  2063. *acb_device_map = readb(iop_device_map);
  2064. acb_device_map++;
  2065. iop_device_map++;
  2066. count--;
  2067. }
  2068. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  2069. acb->host->host_no,
  2070. acb->firm_version,
  2071. acb->firm_model);
  2072. acb->signature = readl(&reg->message_rwbuffer[1]);
  2073. /*firm_signature,1,00-03*/
  2074. acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
  2075. /*firm_request_len,1,04-07*/
  2076. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
  2077. /*firm_numbers_queue,2,08-11*/
  2078. acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
  2079. /*firm_sdram_size,3,12-15*/
  2080. acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
  2081. /*firm_ide_channels,4,16-19*/
  2082. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2083. /*firm_ide_channels,4,16-19*/
  2084. return true;
  2085. }
  2086. static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
  2087. {
  2088. uint32_t intmask_org, Index, firmware_state = 0;
  2089. struct MessageUnit_C *reg = pACB->pmuC;
  2090. char *acb_firm_model = pACB->firm_model;
  2091. char *acb_firm_version = pACB->firm_version;
  2092. char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
  2093. char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
  2094. int count;
  2095. /* disable all outbound interrupt */
  2096. intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
  2097. writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  2098. /* wait firmware ready */
  2099. do {
  2100. firmware_state = readl(&reg->outbound_msgaddr1);
  2101. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  2102. /* post "get config" instruction */
  2103. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2104. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2105. /* wait message ready */
  2106. for (Index = 0; Index < 2000; Index++) {
  2107. if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  2108. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
  2109. break;
  2110. }
  2111. udelay(10);
  2112. } /*max 1 seconds*/
  2113. if (Index >= 2000) {
  2114. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2115. miscellaneous data' timeout \n", pACB->host->host_no);
  2116. return false;
  2117. }
  2118. count = 8;
  2119. while (count) {
  2120. *acb_firm_model = readb(iop_firm_model);
  2121. acb_firm_model++;
  2122. iop_firm_model++;
  2123. count--;
  2124. }
  2125. count = 16;
  2126. while (count) {
  2127. *acb_firm_version = readb(iop_firm_version);
  2128. acb_firm_version++;
  2129. iop_firm_version++;
  2130. count--;
  2131. }
  2132. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  2133. pACB->host->host_no,
  2134. pACB->firm_version,
  2135. pACB->firm_model);
  2136. pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
  2137. pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
  2138. pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
  2139. pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
  2140. pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2141. /*all interrupt service will be enable at arcmsr_iop_init*/
  2142. return true;
  2143. }
  2144. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  2145. {
  2146. if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
  2147. return arcmsr_get_hba_config(acb);
  2148. else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
  2149. return arcmsr_get_hbb_config(acb);
  2150. else
  2151. return arcmsr_get_hbc_config(acb);
  2152. }
  2153. static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  2154. struct CommandControlBlock *poll_ccb)
  2155. {
  2156. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2157. struct CommandControlBlock *ccb;
  2158. struct ARCMSR_CDB *arcmsr_cdb;
  2159. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  2160. int rtn;
  2161. bool error;
  2162. polling_hba_ccb_retry:
  2163. poll_count++;
  2164. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  2165. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  2166. while (1) {
  2167. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  2168. if (poll_ccb_done){
  2169. rtn = SUCCESS;
  2170. break;
  2171. }else {
  2172. msleep(25);
  2173. if (poll_count > 100){
  2174. rtn = FAILED;
  2175. break;
  2176. }
  2177. goto polling_hba_ccb_retry;
  2178. }
  2179. }
  2180. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2181. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2182. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  2183. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2184. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2185. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2186. " poll command abort successfully \n"
  2187. , acb->host->host_no
  2188. , ccb->pcmd->device->id
  2189. , ccb->pcmd->device->lun
  2190. , ccb);
  2191. ccb->pcmd->result = DID_ABORT << 16;
  2192. arcmsr_ccb_complete(ccb);
  2193. continue;
  2194. }
  2195. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2196. " command done ccb = '0x%p'"
  2197. "ccboutstandingcount = %d \n"
  2198. , acb->host->host_no
  2199. , ccb
  2200. , atomic_read(&acb->ccboutstandingcount));
  2201. continue;
  2202. }
  2203. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2204. arcmsr_report_ccb_state(acb, ccb, error);
  2205. }
  2206. return rtn;
  2207. }
  2208. static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
  2209. struct CommandControlBlock *poll_ccb)
  2210. {
  2211. struct MessageUnit_B *reg = acb->pmuB;
  2212. struct ARCMSR_CDB *arcmsr_cdb;
  2213. struct CommandControlBlock *ccb;
  2214. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  2215. int index, rtn;
  2216. bool error;
  2217. polling_hbb_ccb_retry:
  2218. poll_count++;
  2219. /* clear doorbell interrupt */
  2220. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  2221. while(1){
  2222. index = reg->doneq_index;
  2223. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  2224. if (poll_ccb_done){
  2225. rtn = SUCCESS;
  2226. break;
  2227. }else {
  2228. msleep(25);
  2229. if (poll_count > 100){
  2230. rtn = FAILED;
  2231. break;
  2232. }
  2233. goto polling_hbb_ccb_retry;
  2234. }
  2235. }
  2236. writel(0, &reg->done_qbuffer[index]);
  2237. index++;
  2238. /*if last index number set it to 0 */
  2239. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  2240. reg->doneq_index = index;
  2241. /* check if command done with no error*/
  2242. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2243. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2244. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  2245. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2246. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2247. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2248. " poll command abort successfully \n"
  2249. ,acb->host->host_no
  2250. ,ccb->pcmd->device->id
  2251. ,ccb->pcmd->device->lun
  2252. ,ccb);
  2253. ccb->pcmd->result = DID_ABORT << 16;
  2254. arcmsr_ccb_complete(ccb);
  2255. continue;
  2256. }
  2257. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2258. " command done ccb = '0x%p'"
  2259. "ccboutstandingcount = %d \n"
  2260. , acb->host->host_no
  2261. , ccb
  2262. , atomic_read(&acb->ccboutstandingcount));
  2263. continue;
  2264. }
  2265. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2266. arcmsr_report_ccb_state(acb, ccb, error);
  2267. }
  2268. return rtn;
  2269. }
  2270. static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
  2271. {
  2272. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2273. uint32_t flag_ccb, ccb_cdb_phy;
  2274. struct ARCMSR_CDB *arcmsr_cdb;
  2275. bool error;
  2276. struct CommandControlBlock *pCCB;
  2277. uint32_t poll_ccb_done = 0, poll_count = 0;
  2278. int rtn;
  2279. polling_hbc_ccb_retry:
  2280. poll_count++;
  2281. while (1) {
  2282. if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
  2283. if (poll_ccb_done) {
  2284. rtn = SUCCESS;
  2285. break;
  2286. } else {
  2287. msleep(25);
  2288. if (poll_count > 100) {
  2289. rtn = FAILED;
  2290. break;
  2291. }
  2292. goto polling_hbc_ccb_retry;
  2293. }
  2294. }
  2295. flag_ccb = readl(&reg->outbound_queueport_low);
  2296. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  2297. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  2298. pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2299. poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
  2300. /* check ifcommand done with no error*/
  2301. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  2302. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  2303. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2304. " poll command abort successfully \n"
  2305. , acb->host->host_no
  2306. , pCCB->pcmd->device->id
  2307. , pCCB->pcmd->device->lun
  2308. , pCCB);
  2309. pCCB->pcmd->result = DID_ABORT << 16;
  2310. arcmsr_ccb_complete(pCCB);
  2311. continue;
  2312. }
  2313. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2314. " command done ccb = '0x%p'"
  2315. "ccboutstandingcount = %d \n"
  2316. , acb->host->host_no
  2317. , pCCB
  2318. , atomic_read(&acb->ccboutstandingcount));
  2319. continue;
  2320. }
  2321. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  2322. arcmsr_report_ccb_state(acb, pCCB, error);
  2323. }
  2324. return rtn;
  2325. }
  2326. static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
  2327. struct CommandControlBlock *poll_ccb)
  2328. {
  2329. int rtn = 0;
  2330. switch (acb->adapter_type) {
  2331. case ACB_ADAPTER_TYPE_A: {
  2332. rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
  2333. }
  2334. break;
  2335. case ACB_ADAPTER_TYPE_B: {
  2336. rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
  2337. }
  2338. break;
  2339. case ACB_ADAPTER_TYPE_C: {
  2340. rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
  2341. }
  2342. }
  2343. return rtn;
  2344. }
  2345. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  2346. {
  2347. uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
  2348. dma_addr_t dma_coherent_handle;
  2349. /*
  2350. ********************************************************************
  2351. ** here we need to tell iop 331 our freeccb.HighPart
  2352. ** if freeccb.HighPart is not zero
  2353. ********************************************************************
  2354. */
  2355. dma_coherent_handle = acb->dma_coherent_handle;
  2356. cdb_phyaddr = (uint32_t)(dma_coherent_handle);
  2357. cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
  2358. acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
  2359. /*
  2360. ***********************************************************************
  2361. ** if adapter type B, set window of "post command Q"
  2362. ***********************************************************************
  2363. */
  2364. switch (acb->adapter_type) {
  2365. case ACB_ADAPTER_TYPE_A: {
  2366. if (cdb_phyaddr_hi32 != 0) {
  2367. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2368. uint32_t intmask_org;
  2369. intmask_org = arcmsr_disable_outbound_ints(acb);
  2370. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  2371. &reg->message_rwbuffer[0]);
  2372. writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  2373. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  2374. &reg->inbound_msgaddr0);
  2375. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  2376. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  2377. part physical address timeout\n",
  2378. acb->host->host_no);
  2379. return 1;
  2380. }
  2381. arcmsr_enable_outbound_ints(acb, intmask_org);
  2382. }
  2383. }
  2384. break;
  2385. case ACB_ADAPTER_TYPE_B: {
  2386. unsigned long post_queue_phyaddr;
  2387. uint32_t __iomem *rwbuffer;
  2388. struct MessageUnit_B *reg = acb->pmuB;
  2389. uint32_t intmask_org;
  2390. intmask_org = arcmsr_disable_outbound_ints(acb);
  2391. reg->postq_index = 0;
  2392. reg->doneq_index = 0;
  2393. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
  2394. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2395. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  2396. acb->host->host_no);
  2397. return 1;
  2398. }
  2399. post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
  2400. rwbuffer = reg->message_rwbuffer;
  2401. /* driver "set config" signature */
  2402. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  2403. /* normal should be zero */
  2404. writel(cdb_phyaddr_hi32, rwbuffer++);
  2405. /* postQ size (256 + 8)*4 */
  2406. writel(post_queue_phyaddr, rwbuffer++);
  2407. /* doneQ size (256 + 8)*4 */
  2408. writel(post_queue_phyaddr + 1056, rwbuffer++);
  2409. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  2410. writel(1056, rwbuffer);
  2411. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
  2412. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2413. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  2414. timeout \n",acb->host->host_no);
  2415. return 1;
  2416. }
  2417. arcmsr_hbb_enable_driver_mode(acb);
  2418. arcmsr_enable_outbound_ints(acb, intmask_org);
  2419. }
  2420. break;
  2421. case ACB_ADAPTER_TYPE_C: {
  2422. if (cdb_phyaddr_hi32 != 0) {
  2423. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2424. printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
  2425. acb->adapter_index, cdb_phyaddr_hi32);
  2426. writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
  2427. writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
  2428. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
  2429. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2430. if (!arcmsr_hbc_wait_msgint_ready(acb)) {
  2431. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  2432. timeout \n", acb->host->host_no);
  2433. return 1;
  2434. }
  2435. }
  2436. }
  2437. }
  2438. return 0;
  2439. }
  2440. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  2441. {
  2442. uint32_t firmware_state = 0;
  2443. switch (acb->adapter_type) {
  2444. case ACB_ADAPTER_TYPE_A: {
  2445. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2446. do {
  2447. firmware_state = readl(&reg->outbound_msgaddr1);
  2448. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  2449. }
  2450. break;
  2451. case ACB_ADAPTER_TYPE_B: {
  2452. struct MessageUnit_B *reg = acb->pmuB;
  2453. do {
  2454. firmware_state = readl(reg->iop2drv_doorbell);
  2455. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  2456. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  2457. }
  2458. break;
  2459. case ACB_ADAPTER_TYPE_C: {
  2460. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2461. do {
  2462. firmware_state = readl(&reg->outbound_msgaddr1);
  2463. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  2464. }
  2465. }
  2466. }
  2467. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
  2468. {
  2469. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2470. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  2471. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2472. return;
  2473. } else {
  2474. acb->fw_flag = FW_NORMAL;
  2475. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
  2476. atomic_set(&acb->rq_map_token, 16);
  2477. }
  2478. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2479. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2480. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2481. return;
  2482. }
  2483. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2484. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2485. }
  2486. return;
  2487. }
  2488. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
  2489. {
  2490. struct MessageUnit_B __iomem *reg = acb->pmuB;
  2491. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  2492. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2493. return;
  2494. } else {
  2495. acb->fw_flag = FW_NORMAL;
  2496. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  2497. atomic_set(&acb->rq_map_token, 16);
  2498. }
  2499. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2500. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2501. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2502. return;
  2503. }
  2504. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  2505. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2506. }
  2507. return;
  2508. }
  2509. static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
  2510. {
  2511. struct MessageUnit_C __iomem *reg = acb->pmuC;
  2512. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
  2513. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2514. return;
  2515. } else {
  2516. acb->fw_flag = FW_NORMAL;
  2517. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  2518. atomic_set(&acb->rq_map_token, 16);
  2519. }
  2520. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2521. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2522. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2523. return;
  2524. }
  2525. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2526. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2527. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2528. }
  2529. return;
  2530. }
  2531. static void arcmsr_request_device_map(unsigned long pacb)
  2532. {
  2533. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
  2534. switch (acb->adapter_type) {
  2535. case ACB_ADAPTER_TYPE_A: {
  2536. arcmsr_request_hba_device_map(acb);
  2537. }
  2538. break;
  2539. case ACB_ADAPTER_TYPE_B: {
  2540. arcmsr_request_hbb_device_map(acb);
  2541. }
  2542. break;
  2543. case ACB_ADAPTER_TYPE_C: {
  2544. arcmsr_request_hbc_device_map(acb);
  2545. }
  2546. }
  2547. }
  2548. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  2549. {
  2550. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2551. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2552. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  2553. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  2554. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2555. rebulid' timeout \n", acb->host->host_no);
  2556. }
  2557. }
  2558. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  2559. {
  2560. struct MessageUnit_B *reg = acb->pmuB;
  2561. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2562. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
  2563. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2564. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2565. rebulid' timeout \n",acb->host->host_no);
  2566. }
  2567. }
  2568. static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
  2569. {
  2570. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  2571. pACB->acb_flags |= ACB_F_MSG_START_BGRB;
  2572. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
  2573. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
  2574. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  2575. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2576. rebulid' timeout \n", pACB->host->host_no);
  2577. }
  2578. return;
  2579. }
  2580. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  2581. {
  2582. switch (acb->adapter_type) {
  2583. case ACB_ADAPTER_TYPE_A:
  2584. arcmsr_start_hba_bgrb(acb);
  2585. break;
  2586. case ACB_ADAPTER_TYPE_B:
  2587. arcmsr_start_hbb_bgrb(acb);
  2588. break;
  2589. case ACB_ADAPTER_TYPE_C:
  2590. arcmsr_start_hbc_bgrb(acb);
  2591. }
  2592. }
  2593. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  2594. {
  2595. switch (acb->adapter_type) {
  2596. case ACB_ADAPTER_TYPE_A: {
  2597. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2598. uint32_t outbound_doorbell;
  2599. /* empty doorbell Qbuffer if door bell ringed */
  2600. outbound_doorbell = readl(&reg->outbound_doorbell);
  2601. /*clear doorbell interrupt */
  2602. writel(outbound_doorbell, &reg->outbound_doorbell);
  2603. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2604. }
  2605. break;
  2606. case ACB_ADAPTER_TYPE_B: {
  2607. struct MessageUnit_B *reg = acb->pmuB;
  2608. /*clear interrupt and message state*/
  2609. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  2610. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  2611. /* let IOP know data has been read */
  2612. }
  2613. break;
  2614. case ACB_ADAPTER_TYPE_C: {
  2615. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2616. uint32_t outbound_doorbell;
  2617. /* empty doorbell Qbuffer if door bell ringed */
  2618. outbound_doorbell = readl(&reg->outbound_doorbell);
  2619. writel(outbound_doorbell, &reg->outbound_doorbell_clear);
  2620. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  2621. }
  2622. }
  2623. }
  2624. static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
  2625. {
  2626. switch (acb->adapter_type) {
  2627. case ACB_ADAPTER_TYPE_A:
  2628. return;
  2629. case ACB_ADAPTER_TYPE_B:
  2630. {
  2631. struct MessageUnit_B *reg = acb->pmuB;
  2632. writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
  2633. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2634. printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
  2635. return;
  2636. }
  2637. }
  2638. break;
  2639. case ACB_ADAPTER_TYPE_C:
  2640. return;
  2641. }
  2642. return;
  2643. }
  2644. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
  2645. {
  2646. uint8_t value[64];
  2647. int i, count = 0;
  2648. struct MessageUnit_A __iomem *pmuA = acb->pmuA;
  2649. struct MessageUnit_C __iomem *pmuC = acb->pmuC;
  2650. u32 temp = 0;
  2651. /* backup pci config data */
  2652. printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
  2653. for (i = 0; i < 64; i++) {
  2654. pci_read_config_byte(acb->pdev, i, &value[i]);
  2655. }
  2656. /* hardware reset signal */
  2657. if ((acb->dev_id == 0x1680)) {
  2658. writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
  2659. } else if ((acb->dev_id == 0x1880)) {
  2660. do {
  2661. count++;
  2662. writel(0xF, &pmuC->write_sequence);
  2663. writel(0x4, &pmuC->write_sequence);
  2664. writel(0xB, &pmuC->write_sequence);
  2665. writel(0x2, &pmuC->write_sequence);
  2666. writel(0x7, &pmuC->write_sequence);
  2667. writel(0xD, &pmuC->write_sequence);
  2668. } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
  2669. writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
  2670. } else {
  2671. pci_write_config_byte(acb->pdev, 0x84, 0x20);
  2672. }
  2673. msleep(2000);
  2674. /* write back pci config data */
  2675. for (i = 0; i < 64; i++) {
  2676. pci_write_config_byte(acb->pdev, i, value[i]);
  2677. }
  2678. msleep(1000);
  2679. return;
  2680. }
  2681. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  2682. {
  2683. uint32_t intmask_org;
  2684. /* disable all outbound interrupt */
  2685. intmask_org = arcmsr_disable_outbound_ints(acb);
  2686. arcmsr_wait_firmware_ready(acb);
  2687. arcmsr_iop_confirm(acb);
  2688. /*start background rebuild*/
  2689. arcmsr_start_adapter_bgrb(acb);
  2690. /* empty doorbell Qbuffer if door bell ringed */
  2691. arcmsr_clear_doorbell_queue_buffer(acb);
  2692. arcmsr_enable_eoi_mode(acb);
  2693. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2694. arcmsr_enable_outbound_ints(acb, intmask_org);
  2695. acb->acb_flags |= ACB_F_IOP_INITED;
  2696. }
  2697. static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
  2698. {
  2699. struct CommandControlBlock *ccb;
  2700. uint32_t intmask_org;
  2701. uint8_t rtnval = 0x00;
  2702. int i = 0;
  2703. unsigned long flags;
  2704. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2705. /* disable all outbound interrupt */
  2706. intmask_org = arcmsr_disable_outbound_ints(acb);
  2707. /* talk to iop 331 outstanding command aborted */
  2708. rtnval = arcmsr_abort_allcmd(acb);
  2709. /* clear all outbound posted Q */
  2710. arcmsr_done4abort_postqueue(acb);
  2711. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2712. ccb = acb->pccb_pool[i];
  2713. if (ccb->startdone == ARCMSR_CCB_START) {
  2714. scsi_dma_unmap(ccb->pcmd);
  2715. ccb->startdone = ARCMSR_CCB_DONE;
  2716. ccb->ccb_flags = 0;
  2717. spin_lock_irqsave(&acb->ccblist_lock, flags);
  2718. list_add_tail(&ccb->list, &acb->ccb_free_list);
  2719. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  2720. }
  2721. }
  2722. atomic_set(&acb->ccboutstandingcount, 0);
  2723. /* enable all outbound interrupt */
  2724. arcmsr_enable_outbound_ints(acb, intmask_org);
  2725. return rtnval;
  2726. }
  2727. return rtnval;
  2728. }
  2729. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  2730. {
  2731. struct AdapterControlBlock *acb;
  2732. uint32_t intmask_org, outbound_doorbell;
  2733. int retry_count = 0;
  2734. int rtn = FAILED;
  2735. acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
  2736. printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
  2737. acb->num_resets++;
  2738. switch(acb->adapter_type){
  2739. case ACB_ADAPTER_TYPE_A:{
  2740. if (acb->acb_flags & ACB_F_BUS_RESET){
  2741. long timeout;
  2742. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  2743. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  2744. if (timeout) {
  2745. return SUCCESS;
  2746. }
  2747. }
  2748. acb->acb_flags |= ACB_F_BUS_RESET;
  2749. if (!arcmsr_iop_reset(acb)) {
  2750. struct MessageUnit_A __iomem *reg;
  2751. reg = acb->pmuA;
  2752. arcmsr_hardware_reset(acb);
  2753. acb->acb_flags &= ~ACB_F_IOP_INITED;
  2754. sleep_again:
  2755. ssleep(ARCMSR_SLEEPTIME);
  2756. if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
  2757. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  2758. if (retry_count > ARCMSR_RETRYCOUNT) {
  2759. acb->fw_flag = FW_DEADLOCK;
  2760. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  2761. return FAILED;
  2762. }
  2763. retry_count++;
  2764. goto sleep_again;
  2765. }
  2766. acb->acb_flags |= ACB_F_IOP_INITED;
  2767. /* disable all outbound interrupt */
  2768. intmask_org = arcmsr_disable_outbound_ints(acb);
  2769. arcmsr_get_firmware_spec(acb);
  2770. arcmsr_start_adapter_bgrb(acb);
  2771. /* clear Qbuffer if door bell ringed */
  2772. outbound_doorbell = readl(&reg->outbound_doorbell);
  2773. writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
  2774. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2775. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2776. arcmsr_enable_outbound_ints(acb, intmask_org);
  2777. atomic_set(&acb->rq_map_token, 16);
  2778. atomic_set(&acb->ante_token_value, 16);
  2779. acb->fw_flag = FW_NORMAL;
  2780. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2781. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2782. rtn = SUCCESS;
  2783. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  2784. } else {
  2785. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2786. atomic_set(&acb->rq_map_token, 16);
  2787. atomic_set(&acb->ante_token_value, 16);
  2788. acb->fw_flag = FW_NORMAL;
  2789. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  2790. rtn = SUCCESS;
  2791. }
  2792. break;
  2793. }
  2794. case ACB_ADAPTER_TYPE_B:{
  2795. acb->acb_flags |= ACB_F_BUS_RESET;
  2796. if (!arcmsr_iop_reset(acb)) {
  2797. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2798. rtn = FAILED;
  2799. } else {
  2800. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2801. atomic_set(&acb->rq_map_token, 16);
  2802. atomic_set(&acb->ante_token_value, 16);
  2803. acb->fw_flag = FW_NORMAL;
  2804. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2805. rtn = SUCCESS;
  2806. }
  2807. break;
  2808. }
  2809. case ACB_ADAPTER_TYPE_C:{
  2810. if (acb->acb_flags & ACB_F_BUS_RESET) {
  2811. long timeout;
  2812. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  2813. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  2814. if (timeout) {
  2815. return SUCCESS;
  2816. }
  2817. }
  2818. acb->acb_flags |= ACB_F_BUS_RESET;
  2819. if (!arcmsr_iop_reset(acb)) {
  2820. struct MessageUnit_C __iomem *reg;
  2821. reg = acb->pmuC;
  2822. arcmsr_hardware_reset(acb);
  2823. acb->acb_flags &= ~ACB_F_IOP_INITED;
  2824. sleep:
  2825. ssleep(ARCMSR_SLEEPTIME);
  2826. if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
  2827. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  2828. if (retry_count > ARCMSR_RETRYCOUNT) {
  2829. acb->fw_flag = FW_DEADLOCK;
  2830. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  2831. return FAILED;
  2832. }
  2833. retry_count++;
  2834. goto sleep;
  2835. }
  2836. acb->acb_flags |= ACB_F_IOP_INITED;
  2837. /* disable all outbound interrupt */
  2838. intmask_org = arcmsr_disable_outbound_ints(acb);
  2839. arcmsr_get_firmware_spec(acb);
  2840. arcmsr_start_adapter_bgrb(acb);
  2841. /* clear Qbuffer if door bell ringed */
  2842. outbound_doorbell = readl(&reg->outbound_doorbell);
  2843. writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
  2844. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  2845. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2846. arcmsr_enable_outbound_ints(acb, intmask_org);
  2847. atomic_set(&acb->rq_map_token, 16);
  2848. atomic_set(&acb->ante_token_value, 16);
  2849. acb->fw_flag = FW_NORMAL;
  2850. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2851. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2852. rtn = SUCCESS;
  2853. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  2854. } else {
  2855. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2856. atomic_set(&acb->rq_map_token, 16);
  2857. atomic_set(&acb->ante_token_value, 16);
  2858. acb->fw_flag = FW_NORMAL;
  2859. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  2860. rtn = SUCCESS;
  2861. }
  2862. break;
  2863. }
  2864. }
  2865. return rtn;
  2866. }
  2867. static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  2868. struct CommandControlBlock *ccb)
  2869. {
  2870. int rtn;
  2871. rtn = arcmsr_polling_ccbdone(acb, ccb);
  2872. return rtn;
  2873. }
  2874. static int arcmsr_abort(struct scsi_cmnd *cmd)
  2875. {
  2876. struct AdapterControlBlock *acb =
  2877. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  2878. int i = 0;
  2879. int rtn = FAILED;
  2880. printk(KERN_NOTICE
  2881. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  2882. acb->host->host_no, cmd->device->id, cmd->device->lun);
  2883. acb->acb_flags |= ACB_F_ABORT;
  2884. acb->num_aborts++;
  2885. /*
  2886. ************************************************
  2887. ** the all interrupt service routine is locked
  2888. ** we need to handle it as soon as possible and exit
  2889. ************************************************
  2890. */
  2891. if (!atomic_read(&acb->ccboutstandingcount))
  2892. return rtn;
  2893. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2894. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  2895. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  2896. ccb->startdone = ARCMSR_CCB_ABORTED;
  2897. rtn = arcmsr_abort_one_cmd(acb, ccb);
  2898. break;
  2899. }
  2900. }
  2901. acb->acb_flags &= ~ACB_F_ABORT;
  2902. return rtn;
  2903. }
  2904. static const char *arcmsr_info(struct Scsi_Host *host)
  2905. {
  2906. struct AdapterControlBlock *acb =
  2907. (struct AdapterControlBlock *) host->hostdata;
  2908. static char buf[256];
  2909. char *type;
  2910. int raid6 = 1;
  2911. switch (acb->pdev->device) {
  2912. case PCI_DEVICE_ID_ARECA_1110:
  2913. case PCI_DEVICE_ID_ARECA_1200:
  2914. case PCI_DEVICE_ID_ARECA_1202:
  2915. case PCI_DEVICE_ID_ARECA_1210:
  2916. raid6 = 0;
  2917. /*FALLTHRU*/
  2918. case PCI_DEVICE_ID_ARECA_1120:
  2919. case PCI_DEVICE_ID_ARECA_1130:
  2920. case PCI_DEVICE_ID_ARECA_1160:
  2921. case PCI_DEVICE_ID_ARECA_1170:
  2922. case PCI_DEVICE_ID_ARECA_1201:
  2923. case PCI_DEVICE_ID_ARECA_1220:
  2924. case PCI_DEVICE_ID_ARECA_1230:
  2925. case PCI_DEVICE_ID_ARECA_1260:
  2926. case PCI_DEVICE_ID_ARECA_1270:
  2927. case PCI_DEVICE_ID_ARECA_1280:
  2928. type = "SATA";
  2929. break;
  2930. case PCI_DEVICE_ID_ARECA_1380:
  2931. case PCI_DEVICE_ID_ARECA_1381:
  2932. case PCI_DEVICE_ID_ARECA_1680:
  2933. case PCI_DEVICE_ID_ARECA_1681:
  2934. case PCI_DEVICE_ID_ARECA_1880:
  2935. type = "SAS";
  2936. break;
  2937. default:
  2938. type = "X-TYPE";
  2939. break;
  2940. }
  2941. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  2942. type, raid6 ? "( RAID6 capable)" : "",
  2943. ARCMSR_DRIVER_VERSION);
  2944. return buf;
  2945. }