arcmsr_hba.c 77 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Erich Chen
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/pci.h>
  60. #include <linux/aer.h>
  61. #include <linux/slab.h>
  62. #include <asm/dma.h>
  63. #include <asm/io.h>
  64. #include <asm/system.h>
  65. #include <asm/uaccess.h>
  66. #include <scsi/scsi_host.h>
  67. #include <scsi/scsi.h>
  68. #include <scsi/scsi_cmnd.h>
  69. #include <scsi/scsi_tcq.h>
  70. #include <scsi/scsi_device.h>
  71. #include <scsi/scsi_transport.h>
  72. #include <scsi/scsicam.h>
  73. #include "arcmsr.h"
  74. #ifdef CONFIG_SCSI_ARCMSR_RESET
  75. static int sleeptime = 20;
  76. static int retrycount = 12;
  77. module_param(sleeptime, int, S_IRUGO|S_IWUSR);
  78. MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
  79. module_param(retrycount, int, S_IRUGO|S_IWUSR);
  80. MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
  81. #endif
  82. MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
  83. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
  84. MODULE_LICENSE("Dual BSD/GPL");
  85. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  86. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  87. struct scsi_cmnd *cmd);
  88. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  89. static int arcmsr_abort(struct scsi_cmnd *);
  90. static int arcmsr_bus_reset(struct scsi_cmnd *);
  91. static int arcmsr_bios_param(struct scsi_device *sdev,
  92. struct block_device *bdev, sector_t capacity, int *info);
  93. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  94. void (*done) (struct scsi_cmnd *));
  95. static int arcmsr_probe(struct pci_dev *pdev,
  96. const struct pci_device_id *id);
  97. static void arcmsr_remove(struct pci_dev *pdev);
  98. static void arcmsr_shutdown(struct pci_dev *pdev);
  99. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  100. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  101. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  102. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  103. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  104. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  105. static void arcmsr_request_device_map(unsigned long pacb);
  106. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
  107. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
  108. static void arcmsr_message_isr_bh_fn(struct work_struct *work);
  109. static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
  110. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
  111. static const char *arcmsr_info(struct Scsi_Host *);
  112. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  113. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  114. int queue_depth, int reason)
  115. {
  116. if (reason != SCSI_QDEPTH_DEFAULT)
  117. return -EOPNOTSUPP;
  118. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  119. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  120. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  121. return queue_depth;
  122. }
  123. static struct scsi_host_template arcmsr_scsi_host_template = {
  124. .module = THIS_MODULE,
  125. .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
  126. ARCMSR_DRIVER_VERSION,
  127. .info = arcmsr_info,
  128. .queuecommand = arcmsr_queue_command,
  129. .eh_abort_handler = arcmsr_abort,
  130. .eh_bus_reset_handler = arcmsr_bus_reset,
  131. .bios_param = arcmsr_bios_param,
  132. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  133. .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
  134. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  135. .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
  136. .max_sectors = ARCMSR_MAX_XFER_SECTORS,
  137. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  138. .use_clustering = ENABLE_CLUSTERING,
  139. .shost_attrs = arcmsr_host_attrs,
  140. };
  141. static struct pci_device_id arcmsr_device_id_table[] = {
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  153. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  154. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  155. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  156. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  157. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  158. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  159. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  160. {0, 0}, /* Terminating entry */
  161. };
  162. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  163. static struct pci_driver arcmsr_pci_driver = {
  164. .name = "arcmsr",
  165. .id_table = arcmsr_device_id_table,
  166. .probe = arcmsr_probe,
  167. .remove = arcmsr_remove,
  168. .shutdown = arcmsr_shutdown,
  169. };
  170. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  171. {
  172. irqreturn_t handle_state;
  173. struct AdapterControlBlock *acb = dev_id;
  174. spin_lock(acb->host->host_lock);
  175. handle_state = arcmsr_interrupt(acb);
  176. spin_unlock(acb->host->host_lock);
  177. return handle_state;
  178. }
  179. static int arcmsr_bios_param(struct scsi_device *sdev,
  180. struct block_device *bdev, sector_t capacity, int *geom)
  181. {
  182. int ret, heads, sectors, cylinders, total_capacity;
  183. unsigned char *buffer;/* return copy of block device's partition table */
  184. buffer = scsi_bios_ptable(bdev);
  185. if (buffer) {
  186. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  187. kfree(buffer);
  188. if (ret != -1)
  189. return ret;
  190. }
  191. total_capacity = capacity;
  192. heads = 64;
  193. sectors = 32;
  194. cylinders = total_capacity / (heads * sectors);
  195. if (cylinders > 1024) {
  196. heads = 255;
  197. sectors = 63;
  198. cylinders = total_capacity / (heads * sectors);
  199. }
  200. geom[0] = heads;
  201. geom[1] = sectors;
  202. geom[2] = cylinders;
  203. return 0;
  204. }
  205. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  206. {
  207. struct pci_dev *pdev = acb->pdev;
  208. u16 dev_id;
  209. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  210. switch (dev_id) {
  211. case 0x1201 : {
  212. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  213. }
  214. break;
  215. default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
  216. }
  217. }
  218. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  219. {
  220. switch (acb->adapter_type) {
  221. case ACB_ADAPTER_TYPE_A: {
  222. struct pci_dev *pdev = acb->pdev;
  223. void *dma_coherent;
  224. dma_addr_t dma_coherent_handle, dma_addr;
  225. struct CommandControlBlock *ccb_tmp;
  226. int i, j;
  227. acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  228. if (!acb->pmuA) {
  229. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
  230. acb->host->host_no);
  231. return -ENOMEM;
  232. }
  233. dma_coherent = dma_alloc_coherent(&pdev->dev,
  234. ARCMSR_MAX_FREECCB_NUM *
  235. sizeof (struct CommandControlBlock) + 0x20,
  236. &dma_coherent_handle, GFP_KERNEL);
  237. if (!dma_coherent) {
  238. iounmap(acb->pmuA);
  239. return -ENOMEM;
  240. }
  241. acb->dma_coherent = dma_coherent;
  242. acb->dma_coherent_handle = dma_coherent_handle;
  243. if (((unsigned long)dma_coherent & 0x1F)) {
  244. dma_coherent = dma_coherent +
  245. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  246. dma_coherent_handle = dma_coherent_handle +
  247. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  248. }
  249. dma_addr = dma_coherent_handle;
  250. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  251. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  252. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  253. ccb_tmp->acb = acb;
  254. acb->pccb_pool[i] = ccb_tmp;
  255. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  256. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  257. ccb_tmp++;
  258. }
  259. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  260. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  261. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  262. acb->devstate[i][j] = ARECA_RAID_GONE;
  263. }
  264. break;
  265. case ACB_ADAPTER_TYPE_B: {
  266. struct pci_dev *pdev = acb->pdev;
  267. struct MessageUnit_B *reg;
  268. void __iomem *mem_base0, *mem_base1;
  269. void *dma_coherent;
  270. dma_addr_t dma_coherent_handle, dma_addr;
  271. struct CommandControlBlock *ccb_tmp;
  272. int i, j;
  273. dma_coherent = dma_alloc_coherent(&pdev->dev,
  274. ((ARCMSR_MAX_FREECCB_NUM *
  275. sizeof(struct CommandControlBlock) + 0x20) +
  276. sizeof(struct MessageUnit_B)),
  277. &dma_coherent_handle, GFP_KERNEL);
  278. if (!dma_coherent)
  279. return -ENOMEM;
  280. acb->dma_coherent = dma_coherent;
  281. acb->dma_coherent_handle = dma_coherent_handle;
  282. if (((unsigned long)dma_coherent & 0x1F)) {
  283. dma_coherent = dma_coherent +
  284. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  285. dma_coherent_handle = dma_coherent_handle +
  286. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  287. }
  288. dma_addr = dma_coherent_handle;
  289. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  290. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  291. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  292. ccb_tmp->acb = acb;
  293. acb->pccb_pool[i] = ccb_tmp;
  294. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  295. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  296. ccb_tmp++;
  297. }
  298. reg = (struct MessageUnit_B *)(dma_coherent +
  299. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  300. acb->pmuB = reg;
  301. mem_base0 = ioremap(pci_resource_start(pdev, 0),
  302. pci_resource_len(pdev, 0));
  303. if (!mem_base0)
  304. goto out;
  305. mem_base1 = ioremap(pci_resource_start(pdev, 2),
  306. pci_resource_len(pdev, 2));
  307. if (!mem_base1) {
  308. iounmap(mem_base0);
  309. goto out;
  310. }
  311. reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
  312. reg->drv2iop_doorbell_mask_reg = mem_base0 +
  313. ARCMSR_DRV2IOP_DOORBELL_MASK;
  314. reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
  315. reg->iop2drv_doorbell_mask_reg = mem_base0 +
  316. ARCMSR_IOP2DRV_DOORBELL_MASK;
  317. reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
  318. reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
  319. reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
  320. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  321. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  322. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  323. acb->devstate[i][j] = ARECA_RAID_GOOD;
  324. }
  325. break;
  326. }
  327. return 0;
  328. out:
  329. dma_free_coherent(&acb->pdev->dev,
  330. (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
  331. sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
  332. return -ENOMEM;
  333. }
  334. static void arcmsr_message_isr_bh_fn(struct work_struct *work)
  335. {
  336. struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
  337. switch (acb->adapter_type) {
  338. case ACB_ADAPTER_TYPE_A: {
  339. struct MessageUnit_A __iomem *reg = acb->pmuA;
  340. char *acb_dev_map = (char *)acb->device_map;
  341. uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
  342. char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
  343. int target, lun;
  344. struct scsi_device *psdev;
  345. char diff;
  346. atomic_inc(&acb->rq_map_token);
  347. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  348. for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
  349. diff = (*acb_dev_map)^readb(devicemap);
  350. if (diff != 0) {
  351. char temp;
  352. *acb_dev_map = readb(devicemap);
  353. temp = *acb_dev_map;
  354. for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  355. if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
  356. scsi_add_device(acb->host, 0, target, lun);
  357. } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  358. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  359. if (psdev != NULL) {
  360. scsi_remove_device(psdev);
  361. scsi_device_put(psdev);
  362. }
  363. }
  364. temp >>= 1;
  365. diff >>= 1;
  366. }
  367. }
  368. devicemap++;
  369. acb_dev_map++;
  370. }
  371. }
  372. break;
  373. }
  374. case ACB_ADAPTER_TYPE_B: {
  375. struct MessageUnit_B *reg = acb->pmuB;
  376. char *acb_dev_map = (char *)acb->device_map;
  377. uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
  378. char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
  379. int target, lun;
  380. struct scsi_device *psdev;
  381. char diff;
  382. atomic_inc(&acb->rq_map_token);
  383. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  384. for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
  385. diff = (*acb_dev_map)^readb(devicemap);
  386. if (diff != 0) {
  387. char temp;
  388. *acb_dev_map = readb(devicemap);
  389. temp = *acb_dev_map;
  390. for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  391. if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
  392. scsi_add_device(acb->host, 0, target, lun);
  393. } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  394. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  395. if (psdev != NULL) {
  396. scsi_remove_device(psdev);
  397. scsi_device_put(psdev);
  398. }
  399. }
  400. temp >>= 1;
  401. diff >>= 1;
  402. }
  403. }
  404. devicemap++;
  405. acb_dev_map++;
  406. }
  407. }
  408. }
  409. }
  410. }
  411. static int arcmsr_probe(struct pci_dev *pdev,
  412. const struct pci_device_id *id)
  413. {
  414. struct Scsi_Host *host;
  415. struct AdapterControlBlock *acb;
  416. uint8_t bus, dev_fun;
  417. int error;
  418. error = pci_enable_device(pdev);
  419. if (error)
  420. goto out;
  421. pci_set_master(pdev);
  422. host = scsi_host_alloc(&arcmsr_scsi_host_template,
  423. sizeof(struct AdapterControlBlock));
  424. if (!host) {
  425. error = -ENOMEM;
  426. goto out_disable_device;
  427. }
  428. acb = (struct AdapterControlBlock *)host->hostdata;
  429. memset(acb, 0, sizeof (struct AdapterControlBlock));
  430. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  431. if (error) {
  432. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  433. if (error) {
  434. printk(KERN_WARNING
  435. "scsi%d: No suitable DMA mask available\n",
  436. host->host_no);
  437. goto out_host_put;
  438. }
  439. }
  440. bus = pdev->bus->number;
  441. dev_fun = pdev->devfn;
  442. acb->host = host;
  443. acb->pdev = pdev;
  444. host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
  445. host->max_lun = ARCMSR_MAX_TARGETLUN;
  446. host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
  447. host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
  448. host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
  449. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  450. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  451. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  452. host->unique_id = (bus << 8) | dev_fun;
  453. host->irq = pdev->irq;
  454. error = pci_request_regions(pdev, "arcmsr");
  455. if (error) {
  456. goto out_host_put;
  457. }
  458. arcmsr_define_adapter_type(acb);
  459. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  460. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  461. ACB_F_MESSAGE_WQBUFFER_READED);
  462. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  463. INIT_LIST_HEAD(&acb->ccb_free_list);
  464. INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
  465. error = arcmsr_alloc_ccb_pool(acb);
  466. if (error)
  467. goto out_release_regions;
  468. arcmsr_iop_init(acb);
  469. error = request_irq(pdev->irq, arcmsr_do_interrupt,
  470. IRQF_SHARED, "arcmsr", acb);
  471. if (error)
  472. goto out_free_ccb_pool;
  473. pci_set_drvdata(pdev, host);
  474. if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
  475. host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
  476. error = scsi_add_host(host, &pdev->dev);
  477. if (error)
  478. goto out_free_irq;
  479. error = arcmsr_alloc_sysfs_attr(acb);
  480. if (error)
  481. goto out_free_sysfs;
  482. scsi_scan_host(host);
  483. #ifdef CONFIG_SCSI_ARCMSR_AER
  484. pci_enable_pcie_error_reporting(pdev);
  485. #endif
  486. atomic_set(&acb->rq_map_token, 16);
  487. acb->fw_state = true;
  488. init_timer(&acb->eternal_timer);
  489. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
  490. acb->eternal_timer.data = (unsigned long) acb;
  491. acb->eternal_timer.function = &arcmsr_request_device_map;
  492. add_timer(&acb->eternal_timer);
  493. return 0;
  494. out_free_sysfs:
  495. out_free_irq:
  496. free_irq(pdev->irq, acb);
  497. out_free_ccb_pool:
  498. arcmsr_free_ccb_pool(acb);
  499. out_release_regions:
  500. pci_release_regions(pdev);
  501. out_host_put:
  502. scsi_host_put(host);
  503. out_disable_device:
  504. pci_disable_device(pdev);
  505. out:
  506. return error;
  507. }
  508. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  509. {
  510. struct MessageUnit_A __iomem *reg = acb->pmuA;
  511. uint32_t Index;
  512. uint8_t Retries = 0x00;
  513. do {
  514. for (Index = 0; Index < 100; Index++) {
  515. if (readl(&reg->outbound_intstatus) &
  516. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  517. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  518. &reg->outbound_intstatus);
  519. return 0x00;
  520. }
  521. msleep(10);
  522. }/*max 1 seconds*/
  523. } while (Retries++ < 20);/*max 20 sec*/
  524. return 0xff;
  525. }
  526. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  527. {
  528. struct MessageUnit_B *reg = acb->pmuB;
  529. uint32_t Index;
  530. uint8_t Retries = 0x00;
  531. do {
  532. for (Index = 0; Index < 100; Index++) {
  533. if (readl(reg->iop2drv_doorbell_reg)
  534. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  535. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
  536. , reg->iop2drv_doorbell_reg);
  537. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  538. return 0x00;
  539. }
  540. msleep(10);
  541. }/*max 1 seconds*/
  542. } while (Retries++ < 20);/*max 20 sec*/
  543. return 0xff;
  544. }
  545. static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  546. {
  547. struct MessageUnit_A __iomem *reg = acb->pmuA;
  548. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  549. if (arcmsr_hba_wait_msgint_ready(acb)) {
  550. printk(KERN_NOTICE
  551. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  552. , acb->host->host_no);
  553. return 0xff;
  554. }
  555. return 0x00;
  556. }
  557. static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  558. {
  559. struct MessageUnit_B *reg = acb->pmuB;
  560. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
  561. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  562. printk(KERN_NOTICE
  563. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  564. , acb->host->host_no);
  565. return 0xff;
  566. }
  567. return 0x00;
  568. }
  569. static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  570. {
  571. uint8_t rtnval = 0;
  572. switch (acb->adapter_type) {
  573. case ACB_ADAPTER_TYPE_A: {
  574. rtnval = arcmsr_abort_hba_allcmd(acb);
  575. }
  576. break;
  577. case ACB_ADAPTER_TYPE_B: {
  578. rtnval = arcmsr_abort_hbb_allcmd(acb);
  579. }
  580. }
  581. return rtnval;
  582. }
  583. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  584. {
  585. struct scsi_cmnd *pcmd = ccb->pcmd;
  586. scsi_dma_unmap(pcmd);
  587. }
  588. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
  589. {
  590. struct AdapterControlBlock *acb = ccb->acb;
  591. struct scsi_cmnd *pcmd = ccb->pcmd;
  592. arcmsr_pci_unmap_dma(ccb);
  593. if (stand_flag == 1)
  594. atomic_dec(&acb->ccboutstandingcount);
  595. ccb->startdone = ARCMSR_CCB_DONE;
  596. ccb->ccb_flags = 0;
  597. list_add_tail(&ccb->list, &acb->ccb_free_list);
  598. pcmd->scsi_done(pcmd);
  599. }
  600. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  601. {
  602. struct MessageUnit_A __iomem *reg = acb->pmuA;
  603. int retry_count = 30;
  604. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  605. do {
  606. if (!arcmsr_hba_wait_msgint_ready(acb))
  607. break;
  608. else {
  609. retry_count--;
  610. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  611. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  612. }
  613. } while (retry_count != 0);
  614. }
  615. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  616. {
  617. struct MessageUnit_B *reg = acb->pmuB;
  618. int retry_count = 30;
  619. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
  620. do {
  621. if (!arcmsr_hbb_wait_msgint_ready(acb))
  622. break;
  623. else {
  624. retry_count--;
  625. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  626. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  627. }
  628. } while (retry_count != 0);
  629. }
  630. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  631. {
  632. switch (acb->adapter_type) {
  633. case ACB_ADAPTER_TYPE_A: {
  634. arcmsr_flush_hba_cache(acb);
  635. }
  636. break;
  637. case ACB_ADAPTER_TYPE_B: {
  638. arcmsr_flush_hbb_cache(acb);
  639. }
  640. }
  641. }
  642. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  643. {
  644. struct scsi_cmnd *pcmd = ccb->pcmd;
  645. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  646. pcmd->result = DID_OK << 16;
  647. if (sensebuffer) {
  648. int sense_data_length =
  649. sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
  650. ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
  651. memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
  652. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  653. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  654. sensebuffer->Valid = 1;
  655. }
  656. }
  657. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  658. {
  659. u32 orig_mask = 0;
  660. switch (acb->adapter_type) {
  661. case ACB_ADAPTER_TYPE_A : {
  662. struct MessageUnit_A __iomem *reg = acb->pmuA;
  663. orig_mask = readl(&reg->outbound_intmask);
  664. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  665. &reg->outbound_intmask);
  666. }
  667. break;
  668. case ACB_ADAPTER_TYPE_B : {
  669. struct MessageUnit_B *reg = acb->pmuB;
  670. orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
  671. writel(0, reg->iop2drv_doorbell_mask_reg);
  672. }
  673. break;
  674. }
  675. return orig_mask;
  676. }
  677. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
  678. struct CommandControlBlock *ccb, uint32_t flag_ccb)
  679. {
  680. uint8_t id, lun;
  681. id = ccb->pcmd->device->id;
  682. lun = ccb->pcmd->device->lun;
  683. if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
  684. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  685. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  686. ccb->pcmd->result = DID_OK << 16;
  687. arcmsr_ccb_complete(ccb, 1);
  688. } else {
  689. switch (ccb->arcmsr_cdb.DeviceStatus) {
  690. case ARCMSR_DEV_SELECT_TIMEOUT: {
  691. acb->devstate[id][lun] = ARECA_RAID_GONE;
  692. ccb->pcmd->result = DID_NO_CONNECT << 16;
  693. arcmsr_ccb_complete(ccb, 1);
  694. }
  695. break;
  696. case ARCMSR_DEV_ABORTED:
  697. case ARCMSR_DEV_INIT_FAIL: {
  698. acb->devstate[id][lun] = ARECA_RAID_GONE;
  699. ccb->pcmd->result = DID_BAD_TARGET << 16;
  700. arcmsr_ccb_complete(ccb, 1);
  701. }
  702. break;
  703. case ARCMSR_DEV_CHECK_CONDITION: {
  704. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  705. arcmsr_report_sense_info(ccb);
  706. arcmsr_ccb_complete(ccb, 1);
  707. }
  708. break;
  709. default:
  710. printk(KERN_NOTICE
  711. "arcmsr%d: scsi id = %d lun = %d"
  712. " isr get command error done, "
  713. "but got unknown DeviceStatus = 0x%x \n"
  714. , acb->host->host_no
  715. , id
  716. , lun
  717. , ccb->arcmsr_cdb.DeviceStatus);
  718. acb->devstate[id][lun] = ARECA_RAID_GONE;
  719. ccb->pcmd->result = DID_NO_CONNECT << 16;
  720. arcmsr_ccb_complete(ccb, 1);
  721. break;
  722. }
  723. }
  724. }
  725. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
  726. {
  727. struct CommandControlBlock *ccb;
  728. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  729. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  730. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  731. struct scsi_cmnd *abortcmd = ccb->pcmd;
  732. if (abortcmd) {
  733. abortcmd->result |= DID_ABORT << 16;
  734. arcmsr_ccb_complete(ccb, 1);
  735. printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
  736. isr got aborted command \n", acb->host->host_no, ccb);
  737. }
  738. }
  739. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  740. done acb = '0x%p'"
  741. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  742. " ccboutstandingcount = %d \n"
  743. , acb->host->host_no
  744. , acb
  745. , ccb
  746. , ccb->acb
  747. , ccb->startdone
  748. , atomic_read(&acb->ccboutstandingcount));
  749. }
  750. else
  751. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  752. }
  753. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  754. {
  755. int i = 0;
  756. uint32_t flag_ccb;
  757. switch (acb->adapter_type) {
  758. case ACB_ADAPTER_TYPE_A: {
  759. struct MessageUnit_A __iomem *reg = acb->pmuA;
  760. uint32_t outbound_intstatus;
  761. outbound_intstatus = readl(&reg->outbound_intstatus) &
  762. acb->outbound_int_enable;
  763. /*clear and abort all outbound posted Q*/
  764. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  765. while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  766. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  767. arcmsr_drain_donequeue(acb, flag_ccb);
  768. }
  769. }
  770. break;
  771. case ACB_ADAPTER_TYPE_B: {
  772. struct MessageUnit_B *reg = acb->pmuB;
  773. /*clear all outbound posted Q*/
  774. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  775. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  776. writel(0, &reg->done_qbuffer[i]);
  777. arcmsr_drain_donequeue(acb, flag_ccb);
  778. }
  779. writel(0, &reg->post_qbuffer[i]);
  780. }
  781. reg->doneq_index = 0;
  782. reg->postq_index = 0;
  783. }
  784. break;
  785. }
  786. }
  787. static void arcmsr_remove(struct pci_dev *pdev)
  788. {
  789. struct Scsi_Host *host = pci_get_drvdata(pdev);
  790. struct AdapterControlBlock *acb =
  791. (struct AdapterControlBlock *) host->hostdata;
  792. int poll_count = 0;
  793. arcmsr_free_sysfs_attr(acb);
  794. scsi_remove_host(host);
  795. flush_scheduled_work();
  796. del_timer_sync(&acb->eternal_timer);
  797. arcmsr_disable_outbound_ints(acb);
  798. arcmsr_stop_adapter_bgrb(acb);
  799. arcmsr_flush_adapter_cache(acb);
  800. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  801. acb->acb_flags &= ~ACB_F_IOP_INITED;
  802. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
  803. if (!atomic_read(&acb->ccboutstandingcount))
  804. break;
  805. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  806. msleep(25);
  807. }
  808. if (atomic_read(&acb->ccboutstandingcount)) {
  809. int i;
  810. arcmsr_abort_allcmd(acb);
  811. arcmsr_done4abort_postqueue(acb);
  812. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  813. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  814. if (ccb->startdone == ARCMSR_CCB_START) {
  815. ccb->startdone = ARCMSR_CCB_ABORTED;
  816. ccb->pcmd->result = DID_ABORT << 16;
  817. arcmsr_ccb_complete(ccb, 1);
  818. }
  819. }
  820. }
  821. free_irq(pdev->irq, acb);
  822. arcmsr_free_ccb_pool(acb);
  823. pci_release_regions(pdev);
  824. scsi_host_put(host);
  825. pci_disable_device(pdev);
  826. pci_set_drvdata(pdev, NULL);
  827. }
  828. static void arcmsr_shutdown(struct pci_dev *pdev)
  829. {
  830. struct Scsi_Host *host = pci_get_drvdata(pdev);
  831. struct AdapterControlBlock *acb =
  832. (struct AdapterControlBlock *)host->hostdata;
  833. del_timer_sync(&acb->eternal_timer);
  834. arcmsr_disable_outbound_ints(acb);
  835. flush_scheduled_work();
  836. arcmsr_stop_adapter_bgrb(acb);
  837. arcmsr_flush_adapter_cache(acb);
  838. }
  839. static int arcmsr_module_init(void)
  840. {
  841. int error = 0;
  842. error = pci_register_driver(&arcmsr_pci_driver);
  843. return error;
  844. }
  845. static void arcmsr_module_exit(void)
  846. {
  847. pci_unregister_driver(&arcmsr_pci_driver);
  848. }
  849. module_init(arcmsr_module_init);
  850. module_exit(arcmsr_module_exit);
  851. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
  852. u32 intmask_org)
  853. {
  854. u32 mask;
  855. switch (acb->adapter_type) {
  856. case ACB_ADAPTER_TYPE_A : {
  857. struct MessageUnit_A __iomem *reg = acb->pmuA;
  858. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  859. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
  860. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
  861. writel(mask, &reg->outbound_intmask);
  862. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  863. }
  864. break;
  865. case ACB_ADAPTER_TYPE_B : {
  866. struct MessageUnit_B *reg = acb->pmuB;
  867. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
  868. ARCMSR_IOP2DRV_DATA_READ_OK |
  869. ARCMSR_IOP2DRV_CDB_DONE |
  870. ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  871. writel(mask, reg->iop2drv_doorbell_mask_reg);
  872. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  873. }
  874. }
  875. }
  876. static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
  877. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  878. {
  879. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  880. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  881. __le32 address_lo, address_hi;
  882. int arccdbsize = 0x30;
  883. int nseg;
  884. ccb->pcmd = pcmd;
  885. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  886. arcmsr_cdb->Bus = 0;
  887. arcmsr_cdb->TargetID = pcmd->device->id;
  888. arcmsr_cdb->LUN = pcmd->device->lun;
  889. arcmsr_cdb->Function = 1;
  890. arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
  891. arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
  892. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  893. nseg = scsi_dma_map(pcmd);
  894. if (nseg > ARCMSR_MAX_SG_ENTRIES)
  895. return FAILED;
  896. BUG_ON(nseg < 0);
  897. if (nseg) {
  898. __le32 length;
  899. int i, cdb_sgcount = 0;
  900. struct scatterlist *sg;
  901. /* map stor port SG list to our iop SG List. */
  902. scsi_for_each_sg(pcmd, sg, nseg, i) {
  903. /* Get the physical address of the current data pointer */
  904. length = cpu_to_le32(sg_dma_len(sg));
  905. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  906. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  907. if (address_hi == 0) {
  908. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  909. pdma_sg->address = address_lo;
  910. pdma_sg->length = length;
  911. psge += sizeof (struct SG32ENTRY);
  912. arccdbsize += sizeof (struct SG32ENTRY);
  913. } else {
  914. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  915. pdma_sg->addresshigh = address_hi;
  916. pdma_sg->address = address_lo;
  917. pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
  918. psge += sizeof (struct SG64ENTRY);
  919. arccdbsize += sizeof (struct SG64ENTRY);
  920. }
  921. cdb_sgcount++;
  922. }
  923. arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
  924. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  925. if ( arccdbsize > 256)
  926. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  927. }
  928. if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
  929. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  930. ccb->ccb_flags |= CCB_FLAG_WRITE;
  931. }
  932. return SUCCESS;
  933. }
  934. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  935. {
  936. uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
  937. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  938. atomic_inc(&acb->ccboutstandingcount);
  939. ccb->startdone = ARCMSR_CCB_START;
  940. switch (acb->adapter_type) {
  941. case ACB_ADAPTER_TYPE_A: {
  942. struct MessageUnit_A __iomem *reg = acb->pmuA;
  943. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  944. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  945. &reg->inbound_queueport);
  946. else {
  947. writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
  948. }
  949. }
  950. break;
  951. case ACB_ADAPTER_TYPE_B: {
  952. struct MessageUnit_B *reg = acb->pmuB;
  953. uint32_t ending_index, index = reg->postq_index;
  954. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  955. writel(0, &reg->post_qbuffer[ending_index]);
  956. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  957. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  958. &reg->post_qbuffer[index]);
  959. }
  960. else {
  961. writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
  962. }
  963. index++;
  964. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  965. reg->postq_index = index;
  966. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
  967. }
  968. break;
  969. }
  970. }
  971. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  972. {
  973. struct MessageUnit_A __iomem *reg = acb->pmuA;
  974. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  975. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  976. if (arcmsr_hba_wait_msgint_ready(acb)) {
  977. printk(KERN_NOTICE
  978. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  979. , acb->host->host_no);
  980. }
  981. }
  982. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  983. {
  984. struct MessageUnit_B *reg = acb->pmuB;
  985. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  986. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
  987. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  988. printk(KERN_NOTICE
  989. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  990. , acb->host->host_no);
  991. }
  992. }
  993. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  994. {
  995. switch (acb->adapter_type) {
  996. case ACB_ADAPTER_TYPE_A: {
  997. arcmsr_stop_hba_bgrb(acb);
  998. }
  999. break;
  1000. case ACB_ADAPTER_TYPE_B: {
  1001. arcmsr_stop_hbb_bgrb(acb);
  1002. }
  1003. break;
  1004. }
  1005. }
  1006. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  1007. {
  1008. switch (acb->adapter_type) {
  1009. case ACB_ADAPTER_TYPE_A: {
  1010. iounmap(acb->pmuA);
  1011. dma_free_coherent(&acb->pdev->dev,
  1012. ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
  1013. acb->dma_coherent,
  1014. acb->dma_coherent_handle);
  1015. break;
  1016. }
  1017. case ACB_ADAPTER_TYPE_B: {
  1018. struct MessageUnit_B *reg = acb->pmuB;
  1019. iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
  1020. iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
  1021. dma_free_coherent(&acb->pdev->dev,
  1022. (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
  1023. sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
  1024. }
  1025. }
  1026. }
  1027. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  1028. {
  1029. switch (acb->adapter_type) {
  1030. case ACB_ADAPTER_TYPE_A: {
  1031. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1032. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1033. }
  1034. break;
  1035. case ACB_ADAPTER_TYPE_B: {
  1036. struct MessageUnit_B *reg = acb->pmuB;
  1037. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  1038. }
  1039. break;
  1040. }
  1041. }
  1042. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  1043. {
  1044. switch (acb->adapter_type) {
  1045. case ACB_ADAPTER_TYPE_A: {
  1046. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1047. /*
  1048. ** push inbound doorbell tell iop, driver data write ok
  1049. ** and wait reply on next hwinterrupt for next Qbuffer post
  1050. */
  1051. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  1052. }
  1053. break;
  1054. case ACB_ADAPTER_TYPE_B: {
  1055. struct MessageUnit_B *reg = acb->pmuB;
  1056. /*
  1057. ** push inbound doorbell tell iop, driver data write ok
  1058. ** and wait reply on next hwinterrupt for next Qbuffer post
  1059. */
  1060. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
  1061. }
  1062. break;
  1063. }
  1064. }
  1065. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  1066. {
  1067. struct QBUFFER __iomem *qbuffer = NULL;
  1068. switch (acb->adapter_type) {
  1069. case ACB_ADAPTER_TYPE_A: {
  1070. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1071. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  1072. }
  1073. break;
  1074. case ACB_ADAPTER_TYPE_B: {
  1075. struct MessageUnit_B *reg = acb->pmuB;
  1076. qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
  1077. }
  1078. break;
  1079. }
  1080. return qbuffer;
  1081. }
  1082. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  1083. {
  1084. struct QBUFFER __iomem *pqbuffer = NULL;
  1085. switch (acb->adapter_type) {
  1086. case ACB_ADAPTER_TYPE_A: {
  1087. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1088. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  1089. }
  1090. break;
  1091. case ACB_ADAPTER_TYPE_B: {
  1092. struct MessageUnit_B *reg = acb->pmuB;
  1093. pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
  1094. }
  1095. break;
  1096. }
  1097. return pqbuffer;
  1098. }
  1099. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  1100. {
  1101. struct QBUFFER __iomem *prbuffer;
  1102. struct QBUFFER *pQbuffer;
  1103. uint8_t __iomem *iop_data;
  1104. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  1105. rqbuf_lastindex = acb->rqbuf_lastindex;
  1106. rqbuf_firstindex = acb->rqbuf_firstindex;
  1107. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1108. iop_data = (uint8_t __iomem *)prbuffer->data;
  1109. iop_len = prbuffer->data_len;
  1110. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
  1111. if (my_empty_len >= iop_len)
  1112. {
  1113. while (iop_len > 0) {
  1114. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  1115. memcpy(pQbuffer, iop_data,1);
  1116. rqbuf_lastindex++;
  1117. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1118. iop_data++;
  1119. iop_len--;
  1120. }
  1121. acb->rqbuf_lastindex = rqbuf_lastindex;
  1122. arcmsr_iop_message_read(acb);
  1123. }
  1124. else {
  1125. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1126. }
  1127. }
  1128. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1129. {
  1130. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1131. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1132. uint8_t *pQbuffer;
  1133. struct QBUFFER __iomem *pwbuffer;
  1134. uint8_t __iomem *iop_data;
  1135. int32_t allxfer_len = 0;
  1136. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1137. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1138. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1139. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1140. (allxfer_len < 124)) {
  1141. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1142. memcpy(iop_data, pQbuffer, 1);
  1143. acb->wqbuf_firstindex++;
  1144. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1145. iop_data++;
  1146. allxfer_len++;
  1147. }
  1148. pwbuffer->data_len = allxfer_len;
  1149. arcmsr_iop_message_wrote(acb);
  1150. }
  1151. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1152. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1153. }
  1154. }
  1155. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1156. {
  1157. uint32_t outbound_doorbell;
  1158. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1159. outbound_doorbell = readl(&reg->outbound_doorbell);
  1160. writel(outbound_doorbell, &reg->outbound_doorbell);
  1161. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1162. arcmsr_iop2drv_data_wrote_handle(acb);
  1163. }
  1164. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1165. arcmsr_iop2drv_data_read_handle(acb);
  1166. }
  1167. }
  1168. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1169. {
  1170. uint32_t flag_ccb;
  1171. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1172. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1173. arcmsr_drain_donequeue(acb, flag_ccb);
  1174. }
  1175. }
  1176. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1177. {
  1178. uint32_t index;
  1179. uint32_t flag_ccb;
  1180. struct MessageUnit_B *reg = acb->pmuB;
  1181. index = reg->doneq_index;
  1182. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1183. writel(0, &reg->done_qbuffer[index]);
  1184. arcmsr_drain_donequeue(acb, flag_ccb);
  1185. index++;
  1186. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1187. reg->doneq_index = index;
  1188. }
  1189. }
  1190. /*
  1191. **********************************************************************************
  1192. ** Handle a message interrupt
  1193. **
  1194. ** The only message interrupt we expect is in response to a query for the current adapter config.
  1195. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1196. **********************************************************************************
  1197. */
  1198. static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
  1199. {
  1200. struct MessageUnit_A *reg = acb->pmuA;
  1201. /*clear interrupt and message state*/
  1202. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
  1203. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1204. }
  1205. static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
  1206. {
  1207. struct MessageUnit_B *reg = acb->pmuB;
  1208. /*clear interrupt and message state*/
  1209. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1210. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1211. }
  1212. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1213. {
  1214. uint32_t outbound_intstatus;
  1215. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1216. outbound_intstatus = readl(&reg->outbound_intstatus) &
  1217. acb->outbound_int_enable;
  1218. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1219. return 1;
  1220. }
  1221. writel(outbound_intstatus, &reg->outbound_intstatus);
  1222. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1223. arcmsr_hba_doorbell_isr(acb);
  1224. }
  1225. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1226. arcmsr_hba_postqueue_isr(acb);
  1227. }
  1228. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  1229. /* messenger of "driver to iop commands" */
  1230. arcmsr_hba_message_isr(acb);
  1231. }
  1232. return 0;
  1233. }
  1234. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1235. {
  1236. uint32_t outbound_doorbell;
  1237. struct MessageUnit_B *reg = acb->pmuB;
  1238. outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
  1239. acb->outbound_int_enable;
  1240. if (!outbound_doorbell)
  1241. return 1;
  1242. writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
  1243. /*in case the last action of doorbell interrupt clearance is cached,
  1244. this action can push HW to write down the clear bit*/
  1245. readl(reg->iop2drv_doorbell_reg);
  1246. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  1247. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1248. arcmsr_iop2drv_data_wrote_handle(acb);
  1249. }
  1250. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1251. arcmsr_iop2drv_data_read_handle(acb);
  1252. }
  1253. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1254. arcmsr_hbb_postqueue_isr(acb);
  1255. }
  1256. if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  1257. /* messenger of "driver to iop commands" */
  1258. arcmsr_hbb_message_isr(acb);
  1259. }
  1260. return 0;
  1261. }
  1262. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1263. {
  1264. switch (acb->adapter_type) {
  1265. case ACB_ADAPTER_TYPE_A: {
  1266. if (arcmsr_handle_hba_isr(acb)) {
  1267. return IRQ_NONE;
  1268. }
  1269. }
  1270. break;
  1271. case ACB_ADAPTER_TYPE_B: {
  1272. if (arcmsr_handle_hbb_isr(acb)) {
  1273. return IRQ_NONE;
  1274. }
  1275. }
  1276. break;
  1277. }
  1278. return IRQ_HANDLED;
  1279. }
  1280. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1281. {
  1282. if (acb) {
  1283. /* stop adapter background rebuild */
  1284. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1285. uint32_t intmask_org;
  1286. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1287. intmask_org = arcmsr_disable_outbound_ints(acb);
  1288. arcmsr_stop_adapter_bgrb(acb);
  1289. arcmsr_flush_adapter_cache(acb);
  1290. arcmsr_enable_outbound_ints(acb, intmask_org);
  1291. }
  1292. }
  1293. }
  1294. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1295. {
  1296. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1297. uint8_t *pQbuffer;
  1298. struct QBUFFER __iomem *pwbuffer;
  1299. uint8_t __iomem *iop_data;
  1300. int32_t allxfer_len = 0;
  1301. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1302. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1303. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1304. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1305. wqbuf_firstindex = acb->wqbuf_firstindex;
  1306. wqbuf_lastindex = acb->wqbuf_lastindex;
  1307. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1308. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1309. memcpy(iop_data, pQbuffer, 1);
  1310. wqbuf_firstindex++;
  1311. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1312. iop_data++;
  1313. allxfer_len++;
  1314. }
  1315. acb->wqbuf_firstindex = wqbuf_firstindex;
  1316. pwbuffer->data_len = allxfer_len;
  1317. arcmsr_iop_message_wrote(acb);
  1318. }
  1319. }
  1320. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  1321. struct scsi_cmnd *cmd)
  1322. {
  1323. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1324. int retvalue = 0, transfer_len = 0;
  1325. char *buffer;
  1326. struct scatterlist *sg;
  1327. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1328. (uint32_t ) cmd->cmnd[6] << 16 |
  1329. (uint32_t ) cmd->cmnd[7] << 8 |
  1330. (uint32_t ) cmd->cmnd[8];
  1331. /* 4 bytes: Areca io control code */
  1332. sg = scsi_sglist(cmd);
  1333. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1334. if (scsi_sg_count(cmd) > 1) {
  1335. retvalue = ARCMSR_MESSAGE_FAIL;
  1336. goto message_out;
  1337. }
  1338. transfer_len += sg->length;
  1339. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1340. retvalue = ARCMSR_MESSAGE_FAIL;
  1341. goto message_out;
  1342. }
  1343. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1344. switch(controlcode) {
  1345. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1346. unsigned char *ver_addr;
  1347. uint8_t *pQbuffer, *ptmpQbuffer;
  1348. int32_t allxfer_len = 0;
  1349. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1350. if (!ver_addr) {
  1351. retvalue = ARCMSR_MESSAGE_FAIL;
  1352. goto message_out;
  1353. }
  1354. if (!acb->fw_state) {
  1355. pcmdmessagefld->cmdmessage.ReturnCode =
  1356. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1357. goto message_out;
  1358. }
  1359. ptmpQbuffer = ver_addr;
  1360. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1361. && (allxfer_len < 1031)) {
  1362. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1363. memcpy(ptmpQbuffer, pQbuffer, 1);
  1364. acb->rqbuf_firstindex++;
  1365. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1366. ptmpQbuffer++;
  1367. allxfer_len++;
  1368. }
  1369. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1370. struct QBUFFER __iomem *prbuffer;
  1371. uint8_t __iomem *iop_data;
  1372. int32_t iop_len;
  1373. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1374. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1375. iop_data = prbuffer->data;
  1376. iop_len = readl(&prbuffer->data_len);
  1377. while (iop_len > 0) {
  1378. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1379. acb->rqbuf_lastindex++;
  1380. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1381. iop_data++;
  1382. iop_len--;
  1383. }
  1384. arcmsr_iop_message_read(acb);
  1385. }
  1386. memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
  1387. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1388. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1389. kfree(ver_addr);
  1390. }
  1391. break;
  1392. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1393. unsigned char *ver_addr;
  1394. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1395. uint8_t *pQbuffer, *ptmpuserbuffer;
  1396. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1397. if (!ver_addr) {
  1398. retvalue = ARCMSR_MESSAGE_FAIL;
  1399. goto message_out;
  1400. }
  1401. if (!acb->fw_state) {
  1402. pcmdmessagefld->cmdmessage.ReturnCode =
  1403. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1404. goto message_out;
  1405. }
  1406. ptmpuserbuffer = ver_addr;
  1407. user_len = pcmdmessagefld->cmdmessage.Length;
  1408. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1409. wqbuf_lastindex = acb->wqbuf_lastindex;
  1410. wqbuf_firstindex = acb->wqbuf_firstindex;
  1411. if (wqbuf_lastindex != wqbuf_firstindex) {
  1412. struct SENSE_DATA *sensebuffer =
  1413. (struct SENSE_DATA *)cmd->sense_buffer;
  1414. arcmsr_post_ioctldata2iop(acb);
  1415. /* has error report sensedata */
  1416. sensebuffer->ErrorCode = 0x70;
  1417. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1418. sensebuffer->AdditionalSenseLength = 0x0A;
  1419. sensebuffer->AdditionalSenseCode = 0x20;
  1420. sensebuffer->Valid = 1;
  1421. retvalue = ARCMSR_MESSAGE_FAIL;
  1422. } else {
  1423. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1424. &(ARCMSR_MAX_QBUFFER - 1);
  1425. if (my_empty_len >= user_len) {
  1426. while (user_len > 0) {
  1427. pQbuffer =
  1428. &acb->wqbuffer[acb->wqbuf_lastindex];
  1429. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1430. acb->wqbuf_lastindex++;
  1431. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1432. ptmpuserbuffer++;
  1433. user_len--;
  1434. }
  1435. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1436. acb->acb_flags &=
  1437. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1438. arcmsr_post_ioctldata2iop(acb);
  1439. }
  1440. } else {
  1441. /* has error report sensedata */
  1442. struct SENSE_DATA *sensebuffer =
  1443. (struct SENSE_DATA *)cmd->sense_buffer;
  1444. sensebuffer->ErrorCode = 0x70;
  1445. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1446. sensebuffer->AdditionalSenseLength = 0x0A;
  1447. sensebuffer->AdditionalSenseCode = 0x20;
  1448. sensebuffer->Valid = 1;
  1449. retvalue = ARCMSR_MESSAGE_FAIL;
  1450. }
  1451. }
  1452. kfree(ver_addr);
  1453. }
  1454. break;
  1455. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1456. uint8_t *pQbuffer = acb->rqbuffer;
  1457. if (!acb->fw_state) {
  1458. pcmdmessagefld->cmdmessage.ReturnCode =
  1459. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1460. goto message_out;
  1461. }
  1462. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1463. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1464. arcmsr_iop_message_read(acb);
  1465. }
  1466. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1467. acb->rqbuf_firstindex = 0;
  1468. acb->rqbuf_lastindex = 0;
  1469. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1470. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1471. }
  1472. break;
  1473. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1474. uint8_t *pQbuffer = acb->wqbuffer;
  1475. if (!acb->fw_state) {
  1476. pcmdmessagefld->cmdmessage.ReturnCode =
  1477. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1478. goto message_out;
  1479. }
  1480. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1481. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1482. arcmsr_iop_message_read(acb);
  1483. }
  1484. acb->acb_flags |=
  1485. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1486. ACB_F_MESSAGE_WQBUFFER_READED);
  1487. acb->wqbuf_firstindex = 0;
  1488. acb->wqbuf_lastindex = 0;
  1489. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1490. pcmdmessagefld->cmdmessage.ReturnCode =
  1491. ARCMSR_MESSAGE_RETURNCODE_OK;
  1492. }
  1493. break;
  1494. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1495. uint8_t *pQbuffer;
  1496. if (!acb->fw_state) {
  1497. pcmdmessagefld->cmdmessage.ReturnCode =
  1498. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1499. goto message_out;
  1500. }
  1501. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1502. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1503. arcmsr_iop_message_read(acb);
  1504. }
  1505. acb->acb_flags |=
  1506. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1507. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1508. | ACB_F_MESSAGE_WQBUFFER_READED);
  1509. acb->rqbuf_firstindex = 0;
  1510. acb->rqbuf_lastindex = 0;
  1511. acb->wqbuf_firstindex = 0;
  1512. acb->wqbuf_lastindex = 0;
  1513. pQbuffer = acb->rqbuffer;
  1514. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1515. pQbuffer = acb->wqbuffer;
  1516. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1517. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1518. }
  1519. break;
  1520. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1521. if (!acb->fw_state) {
  1522. pcmdmessagefld->cmdmessage.ReturnCode =
  1523. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1524. goto message_out;
  1525. }
  1526. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
  1527. }
  1528. break;
  1529. case ARCMSR_MESSAGE_SAY_HELLO: {
  1530. int8_t *hello_string = "Hello! I am ARCMSR";
  1531. if (!acb->fw_state) {
  1532. pcmdmessagefld->cmdmessage.ReturnCode =
  1533. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1534. goto message_out;
  1535. }
  1536. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1537. , (int16_t)strlen(hello_string));
  1538. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1539. }
  1540. break;
  1541. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1542. if (!acb->fw_state) {
  1543. pcmdmessagefld->cmdmessage.ReturnCode =
  1544. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1545. goto message_out;
  1546. }
  1547. arcmsr_iop_parking(acb);
  1548. break;
  1549. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1550. if (!acb->fw_state) {
  1551. pcmdmessagefld->cmdmessage.ReturnCode =
  1552. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1553. goto message_out;
  1554. }
  1555. arcmsr_flush_adapter_cache(acb);
  1556. break;
  1557. default:
  1558. retvalue = ARCMSR_MESSAGE_FAIL;
  1559. }
  1560. message_out:
  1561. sg = scsi_sglist(cmd);
  1562. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1563. return retvalue;
  1564. }
  1565. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1566. {
  1567. struct list_head *head = &acb->ccb_free_list;
  1568. struct CommandControlBlock *ccb = NULL;
  1569. if (!list_empty(head)) {
  1570. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1571. list_del(head->next);
  1572. }
  1573. return ccb;
  1574. }
  1575. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1576. struct scsi_cmnd *cmd)
  1577. {
  1578. switch (cmd->cmnd[0]) {
  1579. case INQUIRY: {
  1580. unsigned char inqdata[36];
  1581. char *buffer;
  1582. struct scatterlist *sg;
  1583. if (cmd->device->lun) {
  1584. cmd->result = (DID_TIME_OUT << 16);
  1585. cmd->scsi_done(cmd);
  1586. return;
  1587. }
  1588. inqdata[0] = TYPE_PROCESSOR;
  1589. /* Periph Qualifier & Periph Dev Type */
  1590. inqdata[1] = 0;
  1591. /* rem media bit & Dev Type Modifier */
  1592. inqdata[2] = 0;
  1593. /* ISO, ECMA, & ANSI versions */
  1594. inqdata[4] = 31;
  1595. /* length of additional data */
  1596. strncpy(&inqdata[8], "Areca ", 8);
  1597. /* Vendor Identification */
  1598. strncpy(&inqdata[16], "RAID controller ", 16);
  1599. /* Product Identification */
  1600. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1601. sg = scsi_sglist(cmd);
  1602. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1603. memcpy(buffer, inqdata, sizeof(inqdata));
  1604. sg = scsi_sglist(cmd);
  1605. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1606. cmd->scsi_done(cmd);
  1607. }
  1608. break;
  1609. case WRITE_BUFFER:
  1610. case READ_BUFFER: {
  1611. if (arcmsr_iop_message_xfer(acb, cmd))
  1612. cmd->result = (DID_ERROR << 16);
  1613. cmd->scsi_done(cmd);
  1614. }
  1615. break;
  1616. default:
  1617. cmd->scsi_done(cmd);
  1618. }
  1619. }
  1620. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  1621. void (* done)(struct scsi_cmnd *))
  1622. {
  1623. struct Scsi_Host *host = cmd->device->host;
  1624. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1625. struct CommandControlBlock *ccb;
  1626. int target = cmd->device->id;
  1627. int lun = cmd->device->lun;
  1628. uint8_t scsicmd = cmd->cmnd[0];
  1629. cmd->scsi_done = done;
  1630. cmd->host_scribble = NULL;
  1631. cmd->result = 0;
  1632. if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
  1633. if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1634. cmd->result = (DID_NO_CONNECT << 16);
  1635. }
  1636. cmd->scsi_done(cmd);
  1637. return 0;
  1638. }
  1639. if (acb->acb_flags & ACB_F_BUS_RESET) {
  1640. switch (acb->adapter_type) {
  1641. case ACB_ADAPTER_TYPE_A: {
  1642. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1643. uint32_t intmask_org, outbound_doorbell;
  1644. if ((readl(&reg->outbound_msgaddr1) &
  1645. ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
  1646. printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
  1647. acb->host->host_no);
  1648. return SCSI_MLQUEUE_HOST_BUSY;
  1649. }
  1650. acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
  1651. printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
  1652. acb->host->host_no);
  1653. /* disable all outbound interrupt */
  1654. intmask_org = arcmsr_disable_outbound_ints(acb);
  1655. arcmsr_get_firmware_spec(acb, 1);
  1656. /*start background rebuild*/
  1657. arcmsr_start_adapter_bgrb(acb);
  1658. /* clear Qbuffer if door bell ringed */
  1659. outbound_doorbell = readl(&reg->outbound_doorbell);
  1660. /*clear interrupt */
  1661. writel(outbound_doorbell, &reg->outbound_doorbell);
  1662. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
  1663. &reg->inbound_doorbell);
  1664. /* enable outbound Post Queue,outbound doorbell Interrupt */
  1665. arcmsr_enable_outbound_ints(acb, intmask_org);
  1666. acb->acb_flags |= ACB_F_IOP_INITED;
  1667. acb->acb_flags &= ~ACB_F_BUS_RESET;
  1668. }
  1669. break;
  1670. case ACB_ADAPTER_TYPE_B: {
  1671. }
  1672. }
  1673. }
  1674. if (target == 16) {
  1675. /* virtual device for iop message transfer */
  1676. arcmsr_handle_virtual_command(acb, cmd);
  1677. return 0;
  1678. }
  1679. if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1680. uint8_t block_cmd;
  1681. block_cmd = cmd->cmnd[0] & 0x0f;
  1682. if (block_cmd == 0x08 || block_cmd == 0x0a) {
  1683. printk(KERN_NOTICE
  1684. "arcmsr%d: block 'read/write'"
  1685. "command with gone raid volume"
  1686. " Cmd = %2x, TargetId = %d, Lun = %d \n"
  1687. , acb->host->host_no
  1688. , cmd->cmnd[0]
  1689. , target, lun);
  1690. cmd->result = (DID_NO_CONNECT << 16);
  1691. cmd->scsi_done(cmd);
  1692. return 0;
  1693. }
  1694. }
  1695. if (atomic_read(&acb->ccboutstandingcount) >=
  1696. ARCMSR_MAX_OUTSTANDING_CMD)
  1697. return SCSI_MLQUEUE_HOST_BUSY;
  1698. ccb = arcmsr_get_freeccb(acb);
  1699. if (!ccb)
  1700. return SCSI_MLQUEUE_HOST_BUSY;
  1701. if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
  1702. cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
  1703. cmd->scsi_done(cmd);
  1704. return 0;
  1705. }
  1706. arcmsr_post_ccb(acb, ccb);
  1707. return 0;
  1708. }
  1709. static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
  1710. {
  1711. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1712. char *acb_firm_model = acb->firm_model;
  1713. char *acb_firm_version = acb->firm_version;
  1714. char *acb_device_map = acb->device_map;
  1715. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  1716. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  1717. char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
  1718. int count;
  1719. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1720. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1721. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1722. miscellaneous data' timeout \n", acb->host->host_no);
  1723. return NULL;
  1724. }
  1725. if (mode == 1) {
  1726. count = 8;
  1727. while (count) {
  1728. *acb_firm_model = readb(iop_firm_model);
  1729. acb_firm_model++;
  1730. iop_firm_model++;
  1731. count--;
  1732. }
  1733. count = 16;
  1734. while (count) {
  1735. *acb_firm_version = readb(iop_firm_version);
  1736. acb_firm_version++;
  1737. iop_firm_version++;
  1738. count--;
  1739. }
  1740. count = 16;
  1741. while (count) {
  1742. *acb_device_map = readb(iop_device_map);
  1743. acb_device_map++;
  1744. iop_device_map++;
  1745. count--;
  1746. }
  1747. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
  1748. , acb->host->host_no
  1749. , acb->firm_version);
  1750. acb->signature = readl(&reg->message_rwbuffer[0]);
  1751. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  1752. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  1753. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  1754. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  1755. }
  1756. return reg->message_rwbuffer;
  1757. }
  1758. static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
  1759. {
  1760. struct MessageUnit_B *reg = acb->pmuB;
  1761. uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
  1762. char *acb_firm_model = acb->firm_model;
  1763. char *acb_firm_version = acb->firm_version;
  1764. char *acb_device_map = acb->device_map;
  1765. char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
  1766. /*firm_model,15,60-67*/
  1767. char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
  1768. /*firm_version,17,68-83*/
  1769. char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
  1770. /*firm_version,21,84-99*/
  1771. int count;
  1772. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
  1773. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1774. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1775. miscellaneous data' timeout \n", acb->host->host_no);
  1776. return NULL;
  1777. }
  1778. if (mode == 1) {
  1779. count = 8;
  1780. while (count)
  1781. {
  1782. *acb_firm_model = readb(iop_firm_model);
  1783. acb_firm_model++;
  1784. iop_firm_model++;
  1785. count--;
  1786. }
  1787. count = 16;
  1788. while (count)
  1789. {
  1790. *acb_firm_version = readb(iop_firm_version);
  1791. acb_firm_version++;
  1792. iop_firm_version++;
  1793. count--;
  1794. }
  1795. count = 16;
  1796. while (count) {
  1797. *acb_device_map = readb(iop_device_map);
  1798. acb_device_map++;
  1799. iop_device_map++;
  1800. count--;
  1801. }
  1802. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
  1803. acb->host->host_no,
  1804. acb->firm_version);
  1805. acb->signature = readl(lrwbuffer++);
  1806. /*firm_signature,1,00-03*/
  1807. acb->firm_request_len = readl(lrwbuffer++);
  1808. /*firm_request_len,1,04-07*/
  1809. acb->firm_numbers_queue = readl(lrwbuffer++);
  1810. /*firm_numbers_queue,2,08-11*/
  1811. acb->firm_sdram_size = readl(lrwbuffer++);
  1812. /*firm_sdram_size,3,12-15*/
  1813. acb->firm_hd_channels = readl(lrwbuffer);
  1814. /*firm_ide_channels,4,16-19*/
  1815. }
  1816. return reg->msgcode_rwbuffer_reg;
  1817. }
  1818. static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
  1819. {
  1820. void *rtnval = 0;
  1821. switch (acb->adapter_type) {
  1822. case ACB_ADAPTER_TYPE_A: {
  1823. rtnval = arcmsr_get_hba_config(acb, mode);
  1824. }
  1825. break;
  1826. case ACB_ADAPTER_TYPE_B: {
  1827. rtnval = arcmsr_get_hbb_config(acb, mode);
  1828. }
  1829. break;
  1830. }
  1831. return rtnval;
  1832. }
  1833. static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  1834. struct CommandControlBlock *poll_ccb)
  1835. {
  1836. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1837. struct CommandControlBlock *ccb;
  1838. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  1839. polling_hba_ccb_retry:
  1840. poll_count++;
  1841. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  1842. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  1843. while (1) {
  1844. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  1845. if (poll_ccb_done)
  1846. break;
  1847. else {
  1848. msleep(25);
  1849. if (poll_count > 100)
  1850. break;
  1851. goto polling_hba_ccb_retry;
  1852. }
  1853. }
  1854. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  1855. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1856. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1857. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1858. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  1859. " poll command abort successfully \n"
  1860. , acb->host->host_no
  1861. , ccb->pcmd->device->id
  1862. , ccb->pcmd->device->lun
  1863. , ccb);
  1864. ccb->pcmd->result = DID_ABORT << 16;
  1865. arcmsr_ccb_complete(ccb, 1);
  1866. poll_ccb_done = 1;
  1867. continue;
  1868. }
  1869. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1870. " command done ccb = '0x%p'"
  1871. "ccboutstandingcount = %d \n"
  1872. , acb->host->host_no
  1873. , ccb
  1874. , atomic_read(&acb->ccboutstandingcount));
  1875. continue;
  1876. }
  1877. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1878. }
  1879. }
  1880. static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
  1881. struct CommandControlBlock *poll_ccb)
  1882. {
  1883. struct MessageUnit_B *reg = acb->pmuB;
  1884. struct CommandControlBlock *ccb;
  1885. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  1886. int index;
  1887. polling_hbb_ccb_retry:
  1888. poll_count++;
  1889. /* clear doorbell interrupt */
  1890. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1891. while (1) {
  1892. index = reg->doneq_index;
  1893. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  1894. if (poll_ccb_done)
  1895. break;
  1896. else {
  1897. msleep(25);
  1898. if (poll_count > 100)
  1899. break;
  1900. goto polling_hbb_ccb_retry;
  1901. }
  1902. }
  1903. writel(0, &reg->done_qbuffer[index]);
  1904. index++;
  1905. /*if last index number set it to 0 */
  1906. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1907. reg->doneq_index = index;
  1908. /* check ifcommand done with no error*/
  1909. ccb = (struct CommandControlBlock *)\
  1910. (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1911. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1912. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1913. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1914. printk(KERN_NOTICE "arcmsr%d: \
  1915. scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
  1916. ,acb->host->host_no
  1917. ,ccb->pcmd->device->id
  1918. ,ccb->pcmd->device->lun
  1919. ,ccb);
  1920. ccb->pcmd->result = DID_ABORT << 16;
  1921. arcmsr_ccb_complete(ccb, 1);
  1922. continue;
  1923. }
  1924. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1925. " command done ccb = '0x%p'"
  1926. "ccboutstandingcount = %d \n"
  1927. , acb->host->host_no
  1928. , ccb
  1929. , atomic_read(&acb->ccboutstandingcount));
  1930. continue;
  1931. }
  1932. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1933. } /*drain reply FIFO*/
  1934. }
  1935. static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
  1936. struct CommandControlBlock *poll_ccb)
  1937. {
  1938. switch (acb->adapter_type) {
  1939. case ACB_ADAPTER_TYPE_A: {
  1940. arcmsr_polling_hba_ccbdone(acb,poll_ccb);
  1941. }
  1942. break;
  1943. case ACB_ADAPTER_TYPE_B: {
  1944. arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
  1945. }
  1946. }
  1947. }
  1948. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  1949. {
  1950. uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
  1951. dma_addr_t dma_coherent_handle;
  1952. /*
  1953. ********************************************************************
  1954. ** here we need to tell iop 331 our freeccb.HighPart
  1955. ** if freeccb.HighPart is not zero
  1956. ********************************************************************
  1957. */
  1958. dma_coherent_handle = acb->dma_coherent_handle;
  1959. cdb_phyaddr = (uint32_t)(dma_coherent_handle);
  1960. ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
  1961. /*
  1962. ***********************************************************************
  1963. ** if adapter type B, set window of "post command Q"
  1964. ***********************************************************************
  1965. */
  1966. switch (acb->adapter_type) {
  1967. case ACB_ADAPTER_TYPE_A: {
  1968. if (ccb_phyaddr_hi32 != 0) {
  1969. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1970. uint32_t intmask_org;
  1971. intmask_org = arcmsr_disable_outbound_ints(acb);
  1972. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  1973. &reg->message_rwbuffer[0]);
  1974. writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  1975. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  1976. &reg->inbound_msgaddr0);
  1977. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1978. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  1979. part physical address timeout\n",
  1980. acb->host->host_no);
  1981. return 1;
  1982. }
  1983. arcmsr_enable_outbound_ints(acb, intmask_org);
  1984. }
  1985. }
  1986. break;
  1987. case ACB_ADAPTER_TYPE_B: {
  1988. unsigned long post_queue_phyaddr;
  1989. uint32_t __iomem *rwbuffer;
  1990. struct MessageUnit_B *reg = acb->pmuB;
  1991. uint32_t intmask_org;
  1992. intmask_org = arcmsr_disable_outbound_ints(acb);
  1993. reg->postq_index = 0;
  1994. reg->doneq_index = 0;
  1995. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
  1996. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1997. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  1998. acb->host->host_no);
  1999. return 1;
  2000. }
  2001. post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
  2002. sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
  2003. rwbuffer = reg->msgcode_rwbuffer_reg;
  2004. /* driver "set config" signature */
  2005. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  2006. /* normal should be zero */
  2007. writel(ccb_phyaddr_hi32, rwbuffer++);
  2008. /* postQ size (256 + 8)*4 */
  2009. writel(post_queue_phyaddr, rwbuffer++);
  2010. /* doneQ size (256 + 8)*4 */
  2011. writel(post_queue_phyaddr + 1056, rwbuffer++);
  2012. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  2013. writel(1056, rwbuffer);
  2014. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
  2015. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  2016. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  2017. timeout \n",acb->host->host_no);
  2018. return 1;
  2019. }
  2020. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
  2021. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  2022. printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
  2023. ,acb->host->host_no);
  2024. return 1;
  2025. }
  2026. arcmsr_enable_outbound_ints(acb, intmask_org);
  2027. }
  2028. break;
  2029. }
  2030. return 0;
  2031. }
  2032. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  2033. {
  2034. uint32_t firmware_state = 0;
  2035. switch (acb->adapter_type) {
  2036. case ACB_ADAPTER_TYPE_A: {
  2037. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2038. do {
  2039. firmware_state = readl(&reg->outbound_msgaddr1);
  2040. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  2041. }
  2042. break;
  2043. case ACB_ADAPTER_TYPE_B: {
  2044. struct MessageUnit_B *reg = acb->pmuB;
  2045. do {
  2046. firmware_state = readl(reg->iop2drv_doorbell_reg);
  2047. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  2048. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  2049. }
  2050. break;
  2051. }
  2052. }
  2053. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
  2054. {
  2055. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2056. if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
  2057. acb->fw_state = false;
  2058. } else {
  2059. /*to prevent rq_map_token from changing by other interrupt, then
  2060. avoid the dead-lock*/
  2061. acb->fw_state = true;
  2062. atomic_dec(&acb->rq_map_token);
  2063. if (!(acb->fw_state) ||
  2064. (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
  2065. atomic_set(&acb->rq_map_token, 16);
  2066. }
  2067. acb->ante_token_value = atomic_read(&acb->rq_map_token);
  2068. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2069. }
  2070. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
  2071. return;
  2072. }
  2073. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
  2074. {
  2075. struct MessageUnit_B __iomem *reg = acb->pmuB;
  2076. if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
  2077. acb->fw_state = false;
  2078. } else {
  2079. /*to prevent rq_map_token from changing by other interrupt, then
  2080. avoid the dead-lock*/
  2081. acb->fw_state = true;
  2082. atomic_dec(&acb->rq_map_token);
  2083. if (!(acb->fw_state) ||
  2084. (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
  2085. atomic_set(&acb->rq_map_token, 16);
  2086. }
  2087. acb->ante_token_value = atomic_read(&acb->rq_map_token);
  2088. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
  2089. }
  2090. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
  2091. return;
  2092. }
  2093. static void arcmsr_request_device_map(unsigned long pacb)
  2094. {
  2095. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
  2096. switch (acb->adapter_type) {
  2097. case ACB_ADAPTER_TYPE_A: {
  2098. arcmsr_request_hba_device_map(acb);
  2099. }
  2100. break;
  2101. case ACB_ADAPTER_TYPE_B: {
  2102. arcmsr_request_hbb_device_map(acb);
  2103. }
  2104. break;
  2105. }
  2106. }
  2107. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  2108. {
  2109. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2110. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2111. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  2112. if (arcmsr_hba_wait_msgint_ready(acb)) {
  2113. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2114. rebulid' timeout \n", acb->host->host_no);
  2115. }
  2116. }
  2117. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  2118. {
  2119. struct MessageUnit_B *reg = acb->pmuB;
  2120. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2121. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
  2122. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  2123. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2124. rebulid' timeout \n",acb->host->host_no);
  2125. }
  2126. }
  2127. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  2128. {
  2129. switch (acb->adapter_type) {
  2130. case ACB_ADAPTER_TYPE_A:
  2131. arcmsr_start_hba_bgrb(acb);
  2132. break;
  2133. case ACB_ADAPTER_TYPE_B:
  2134. arcmsr_start_hbb_bgrb(acb);
  2135. break;
  2136. }
  2137. }
  2138. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  2139. {
  2140. switch (acb->adapter_type) {
  2141. case ACB_ADAPTER_TYPE_A: {
  2142. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2143. uint32_t outbound_doorbell;
  2144. /* empty doorbell Qbuffer if door bell ringed */
  2145. outbound_doorbell = readl(&reg->outbound_doorbell);
  2146. /*clear doorbell interrupt */
  2147. writel(outbound_doorbell, &reg->outbound_doorbell);
  2148. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2149. }
  2150. break;
  2151. case ACB_ADAPTER_TYPE_B: {
  2152. struct MessageUnit_B *reg = acb->pmuB;
  2153. /*clear interrupt and message state*/
  2154. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  2155. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  2156. /* let IOP know data has been read */
  2157. }
  2158. break;
  2159. }
  2160. }
  2161. static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
  2162. {
  2163. switch (acb->adapter_type) {
  2164. case ACB_ADAPTER_TYPE_A:
  2165. return;
  2166. case ACB_ADAPTER_TYPE_B:
  2167. {
  2168. struct MessageUnit_B *reg = acb->pmuB;
  2169. writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
  2170. if(arcmsr_hbb_wait_msgint_ready(acb)) {
  2171. printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
  2172. return;
  2173. }
  2174. }
  2175. break;
  2176. }
  2177. return;
  2178. }
  2179. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
  2180. {
  2181. uint8_t value[64];
  2182. int i;
  2183. /* backup pci config data */
  2184. for (i = 0; i < 64; i++) {
  2185. pci_read_config_byte(acb->pdev, i, &value[i]);
  2186. }
  2187. /* hardware reset signal */
  2188. pci_write_config_byte(acb->pdev, 0x84, 0x20);
  2189. msleep(1000);
  2190. /* write back pci config data */
  2191. for (i = 0; i < 64; i++) {
  2192. pci_write_config_byte(acb->pdev, i, value[i]);
  2193. }
  2194. msleep(1000);
  2195. return;
  2196. }
  2197. /*
  2198. ****************************************************************************
  2199. ****************************************************************************
  2200. */
  2201. #ifdef CONFIG_SCSI_ARCMSR_RESET
  2202. int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
  2203. {
  2204. struct Scsi_Host *shost = NULL;
  2205. spinlock_t *host_lock = NULL;
  2206. int i, isleep;
  2207. shost = cmd->device->host;
  2208. host_lock = shost->host_lock;
  2209. printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
  2210. shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
  2211. isleep = sleeptime / 10;
  2212. spin_unlock_irq(host_lock);
  2213. if (isleep > 0) {
  2214. for (i = 0; i < isleep; i++) {
  2215. msleep(10000);
  2216. printk(KERN_NOTICE "^%d^\n", i);
  2217. }
  2218. }
  2219. isleep = sleeptime % 10;
  2220. if (isleep > 0) {
  2221. msleep(isleep * 1000);
  2222. printk(KERN_NOTICE "^v^\n");
  2223. }
  2224. spin_lock_irq(host_lock);
  2225. printk(KERN_NOTICE "***** wake up *****\n");
  2226. return 0;
  2227. }
  2228. #endif
  2229. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  2230. {
  2231. uint32_t intmask_org;
  2232. /* disable all outbound interrupt */
  2233. intmask_org = arcmsr_disable_outbound_ints(acb);
  2234. arcmsr_wait_firmware_ready(acb);
  2235. arcmsr_iop_confirm(acb);
  2236. arcmsr_get_firmware_spec(acb, 1);
  2237. /*start background rebuild*/
  2238. arcmsr_start_adapter_bgrb(acb);
  2239. /* empty doorbell Qbuffer if door bell ringed */
  2240. arcmsr_clear_doorbell_queue_buffer(acb);
  2241. arcmsr_enable_eoi_mode(acb);
  2242. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2243. arcmsr_enable_outbound_ints(acb, intmask_org);
  2244. acb->acb_flags |= ACB_F_IOP_INITED;
  2245. }
  2246. static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
  2247. {
  2248. struct CommandControlBlock *ccb;
  2249. uint32_t intmask_org;
  2250. uint8_t rtnval = 0x00;
  2251. int i = 0;
  2252. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2253. /* disable all outbound interrupt */
  2254. intmask_org = arcmsr_disable_outbound_ints(acb);
  2255. /* talk to iop 331 outstanding command aborted */
  2256. rtnval = arcmsr_abort_allcmd(acb);
  2257. /* wait for 3 sec for all command aborted*/
  2258. ssleep(3);
  2259. /* clear all outbound posted Q */
  2260. arcmsr_done4abort_postqueue(acb);
  2261. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2262. ccb = acb->pccb_pool[i];
  2263. if (ccb->startdone == ARCMSR_CCB_START) {
  2264. arcmsr_ccb_complete(ccb, 1);
  2265. }
  2266. }
  2267. atomic_set(&acb->ccboutstandingcount, 0);
  2268. /* enable all outbound interrupt */
  2269. arcmsr_enable_outbound_ints(acb, intmask_org);
  2270. return rtnval;
  2271. }
  2272. return rtnval;
  2273. }
  2274. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  2275. {
  2276. struct AdapterControlBlock *acb =
  2277. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  2278. int retry = 0;
  2279. if (acb->acb_flags & ACB_F_BUS_RESET)
  2280. return SUCCESS;
  2281. printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
  2282. acb->acb_flags |= ACB_F_BUS_RESET;
  2283. acb->num_resets++;
  2284. while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
  2285. arcmsr_interrupt(acb);
  2286. retry++;
  2287. }
  2288. if (arcmsr_iop_reset(acb)) {
  2289. switch (acb->adapter_type) {
  2290. case ACB_ADAPTER_TYPE_A: {
  2291. printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
  2292. acb->adapter_index, acb->num_resets, acb->num_aborts);
  2293. arcmsr_hardware_reset(acb);
  2294. acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
  2295. acb->acb_flags &= ~ACB_F_IOP_INITED;
  2296. #ifdef CONFIG_SCSI_ARCMSR_RESET
  2297. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2298. uint32_t intmask_org, outbound_doorbell;
  2299. int retry_count = 0;
  2300. sleep_again:
  2301. arcmsr_sleep_for_bus_reset(cmd);
  2302. if ((readl(&reg->outbound_msgaddr1) &
  2303. ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
  2304. printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
  2305. acb->host->host_no, retry_count);
  2306. if (retry_count > retrycount) {
  2307. printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
  2308. acb->host->host_no);
  2309. return SUCCESS;
  2310. }
  2311. retry_count++;
  2312. goto sleep_again;
  2313. }
  2314. acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
  2315. acb->acb_flags |= ACB_F_IOP_INITED;
  2316. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2317. printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
  2318. acb->host->host_no);
  2319. /* disable all outbound interrupt */
  2320. intmask_org = arcmsr_disable_outbound_ints(acb);
  2321. arcmsr_get_firmware_spec(acb, 1);
  2322. /*start background rebuild*/
  2323. arcmsr_start_adapter_bgrb(acb);
  2324. /* clear Qbuffer if door bell ringed */
  2325. outbound_doorbell = readl(&reg->outbound_doorbell);
  2326. writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
  2327. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2328. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2329. arcmsr_enable_outbound_ints(acb, intmask_org);
  2330. atomic_set(&acb->rq_map_token, 16);
  2331. init_timer(&acb->eternal_timer);
  2332. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
  2333. acb->eternal_timer.data = (unsigned long) acb;
  2334. acb->eternal_timer.function = &arcmsr_request_device_map;
  2335. add_timer(&acb->eternal_timer);
  2336. #endif
  2337. }
  2338. break;
  2339. case ACB_ADAPTER_TYPE_B: {
  2340. }
  2341. }
  2342. } else {
  2343. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2344. }
  2345. return SUCCESS;
  2346. }
  2347. static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  2348. struct CommandControlBlock *ccb)
  2349. {
  2350. u32 intmask;
  2351. ccb->startdone = ARCMSR_CCB_ABORTED;
  2352. /*
  2353. ** Wait for 3 sec for all command done.
  2354. */
  2355. ssleep(3);
  2356. intmask = arcmsr_disable_outbound_ints(acb);
  2357. arcmsr_polling_ccbdone(acb, ccb);
  2358. arcmsr_enable_outbound_ints(acb, intmask);
  2359. }
  2360. static int arcmsr_abort(struct scsi_cmnd *cmd)
  2361. {
  2362. struct AdapterControlBlock *acb =
  2363. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  2364. int i = 0;
  2365. printk(KERN_NOTICE
  2366. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  2367. acb->host->host_no, cmd->device->id, cmd->device->lun);
  2368. acb->num_aborts++;
  2369. /*
  2370. ************************************************
  2371. ** the all interrupt service routine is locked
  2372. ** we need to handle it as soon as possible and exit
  2373. ************************************************
  2374. */
  2375. if (!atomic_read(&acb->ccboutstandingcount))
  2376. return SUCCESS;
  2377. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2378. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  2379. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  2380. arcmsr_abort_one_cmd(acb, ccb);
  2381. break;
  2382. }
  2383. }
  2384. return SUCCESS;
  2385. }
  2386. static const char *arcmsr_info(struct Scsi_Host *host)
  2387. {
  2388. struct AdapterControlBlock *acb =
  2389. (struct AdapterControlBlock *) host->hostdata;
  2390. static char buf[256];
  2391. char *type;
  2392. int raid6 = 1;
  2393. switch (acb->pdev->device) {
  2394. case PCI_DEVICE_ID_ARECA_1110:
  2395. case PCI_DEVICE_ID_ARECA_1200:
  2396. case PCI_DEVICE_ID_ARECA_1202:
  2397. case PCI_DEVICE_ID_ARECA_1210:
  2398. raid6 = 0;
  2399. /*FALLTHRU*/
  2400. case PCI_DEVICE_ID_ARECA_1120:
  2401. case PCI_DEVICE_ID_ARECA_1130:
  2402. case PCI_DEVICE_ID_ARECA_1160:
  2403. case PCI_DEVICE_ID_ARECA_1170:
  2404. case PCI_DEVICE_ID_ARECA_1201:
  2405. case PCI_DEVICE_ID_ARECA_1220:
  2406. case PCI_DEVICE_ID_ARECA_1230:
  2407. case PCI_DEVICE_ID_ARECA_1260:
  2408. case PCI_DEVICE_ID_ARECA_1270:
  2409. case PCI_DEVICE_ID_ARECA_1280:
  2410. type = "SATA";
  2411. break;
  2412. case PCI_DEVICE_ID_ARECA_1380:
  2413. case PCI_DEVICE_ID_ARECA_1381:
  2414. case PCI_DEVICE_ID_ARECA_1680:
  2415. case PCI_DEVICE_ID_ARECA_1681:
  2416. type = "SAS";
  2417. break;
  2418. default:
  2419. type = "X-TYPE";
  2420. break;
  2421. }
  2422. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  2423. type, raid6 ? "( RAID6 capable)" : "",
  2424. ARCMSR_DRIVER_VERSION);
  2425. return buf;
  2426. }