arcmsr_hba.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Erich Chen
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/pci.h>
  60. #include <linux/aer.h>
  61. #include <asm/dma.h>
  62. #include <asm/io.h>
  63. #include <asm/system.h>
  64. #include <asm/uaccess.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi.h>
  67. #include <scsi/scsi_cmnd.h>
  68. #include <scsi/scsi_tcq.h>
  69. #include <scsi/scsi_device.h>
  70. #include <scsi/scsi_transport.h>
  71. #include <scsi/scsicam.h>
  72. #include "arcmsr.h"
  73. MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
  74. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  77. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  78. struct scsi_cmnd *cmd);
  79. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  80. static int arcmsr_abort(struct scsi_cmnd *);
  81. static int arcmsr_bus_reset(struct scsi_cmnd *);
  82. static int arcmsr_bios_param(struct scsi_device *sdev,
  83. struct block_device *bdev, sector_t capacity, int *info);
  84. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  85. void (*done) (struct scsi_cmnd *));
  86. static int arcmsr_probe(struct pci_dev *pdev,
  87. const struct pci_device_id *id);
  88. static void arcmsr_remove(struct pci_dev *pdev);
  89. static void arcmsr_shutdown(struct pci_dev *pdev);
  90. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  91. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  92. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  93. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  94. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  95. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  96. static const char *arcmsr_info(struct Scsi_Host *);
  97. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  98. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  99. int queue_depth)
  100. {
  101. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  102. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  103. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  104. return queue_depth;
  105. }
  106. static struct scsi_host_template arcmsr_scsi_host_template = {
  107. .module = THIS_MODULE,
  108. .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
  109. ARCMSR_DRIVER_VERSION,
  110. .info = arcmsr_info,
  111. .queuecommand = arcmsr_queue_command,
  112. .eh_abort_handler = arcmsr_abort,
  113. .eh_bus_reset_handler = arcmsr_bus_reset,
  114. .bios_param = arcmsr_bios_param,
  115. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  116. .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
  117. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  118. .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
  119. .max_sectors = ARCMSR_MAX_XFER_SECTORS,
  120. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  121. .use_clustering = ENABLE_CLUSTERING,
  122. .shost_attrs = arcmsr_host_attrs,
  123. };
  124. #ifdef CONFIG_SCSI_ARCMSR_AER
  125. static struct pci_error_handlers arcmsr_pci_error_handlers = {
  126. .error_detected = arcmsr_pci_error_detected,
  127. .slot_reset = arcmsr_pci_slot_reset,
  128. };
  129. #endif
  130. static struct pci_device_id arcmsr_device_id_table[] = {
  131. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  132. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  133. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  134. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  135. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  136. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  137. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  138. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  139. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  149. {0, 0}, /* Terminating entry */
  150. };
  151. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  152. static struct pci_driver arcmsr_pci_driver = {
  153. .name = "arcmsr",
  154. .id_table = arcmsr_device_id_table,
  155. .probe = arcmsr_probe,
  156. .remove = arcmsr_remove,
  157. .shutdown = arcmsr_shutdown,
  158. #ifdef CONFIG_SCSI_ARCMSR_AER
  159. .err_handler = &arcmsr_pci_error_handlers,
  160. #endif
  161. };
  162. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  163. {
  164. irqreturn_t handle_state;
  165. struct AdapterControlBlock *acb = dev_id;
  166. spin_lock(acb->host->host_lock);
  167. handle_state = arcmsr_interrupt(acb);
  168. spin_unlock(acb->host->host_lock);
  169. return handle_state;
  170. }
  171. static int arcmsr_bios_param(struct scsi_device *sdev,
  172. struct block_device *bdev, sector_t capacity, int *geom)
  173. {
  174. int ret, heads, sectors, cylinders, total_capacity;
  175. unsigned char *buffer;/* return copy of block device's partition table */
  176. buffer = scsi_bios_ptable(bdev);
  177. if (buffer) {
  178. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  179. kfree(buffer);
  180. if (ret != -1)
  181. return ret;
  182. }
  183. total_capacity = capacity;
  184. heads = 64;
  185. sectors = 32;
  186. cylinders = total_capacity / (heads * sectors);
  187. if (cylinders > 1024) {
  188. heads = 255;
  189. sectors = 63;
  190. cylinders = total_capacity / (heads * sectors);
  191. }
  192. geom[0] = heads;
  193. geom[1] = sectors;
  194. geom[2] = cylinders;
  195. return 0;
  196. }
  197. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  198. {
  199. struct pci_dev *pdev = acb->pdev;
  200. u16 dev_id;
  201. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  202. switch (dev_id) {
  203. case 0x1201 : {
  204. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  205. }
  206. break;
  207. default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
  208. }
  209. }
  210. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  211. {
  212. switch (acb->adapter_type) {
  213. case ACB_ADAPTER_TYPE_A: {
  214. struct pci_dev *pdev = acb->pdev;
  215. void *dma_coherent;
  216. dma_addr_t dma_coherent_handle, dma_addr;
  217. struct CommandControlBlock *ccb_tmp;
  218. uint32_t intmask_org;
  219. int i, j;
  220. acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  221. if (!acb->pmu) {
  222. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
  223. acb->host->host_no);
  224. }
  225. dma_coherent = dma_alloc_coherent(&pdev->dev,
  226. ARCMSR_MAX_FREECCB_NUM *
  227. sizeof (struct CommandControlBlock) + 0x20,
  228. &dma_coherent_handle, GFP_KERNEL);
  229. if (!dma_coherent)
  230. return -ENOMEM;
  231. acb->dma_coherent = dma_coherent;
  232. acb->dma_coherent_handle = dma_coherent_handle;
  233. if (((unsigned long)dma_coherent & 0x1F)) {
  234. dma_coherent = dma_coherent +
  235. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  236. dma_coherent_handle = dma_coherent_handle +
  237. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  238. }
  239. dma_addr = dma_coherent_handle;
  240. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  241. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  242. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  243. ccb_tmp->acb = acb;
  244. acb->pccb_pool[i] = ccb_tmp;
  245. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  246. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  247. ccb_tmp++;
  248. }
  249. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  250. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  251. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  252. acb->devstate[i][j] = ARECA_RAID_GONE;
  253. /*
  254. ** here we need to tell iop 331 our ccb_tmp.HighPart
  255. ** if ccb_tmp.HighPart is not zero
  256. */
  257. intmask_org = arcmsr_disable_outbound_ints(acb);
  258. }
  259. break;
  260. case ACB_ADAPTER_TYPE_B: {
  261. struct pci_dev *pdev = acb->pdev;
  262. struct MessageUnit_B *reg;
  263. void *mem_base0, *mem_base1;
  264. void *dma_coherent;
  265. dma_addr_t dma_coherent_handle, dma_addr;
  266. uint32_t intmask_org;
  267. struct CommandControlBlock *ccb_tmp;
  268. int i, j;
  269. dma_coherent = dma_alloc_coherent(&pdev->dev,
  270. ((ARCMSR_MAX_FREECCB_NUM *
  271. sizeof(struct CommandControlBlock) + 0x20) +
  272. sizeof(struct MessageUnit_B)),
  273. &dma_coherent_handle, GFP_KERNEL);
  274. if (!dma_coherent)
  275. return -ENOMEM;
  276. acb->dma_coherent = dma_coherent;
  277. acb->dma_coherent_handle = dma_coherent_handle;
  278. if (((unsigned long)dma_coherent & 0x1F)) {
  279. dma_coherent = dma_coherent +
  280. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  281. dma_coherent_handle = dma_coherent_handle +
  282. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  283. }
  284. reg = (struct MessageUnit_B *)(dma_coherent +
  285. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  286. dma_addr = dma_coherent_handle;
  287. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  288. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  289. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  290. ccb_tmp->acb = acb;
  291. acb->pccb_pool[i] = ccb_tmp;
  292. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  293. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  294. ccb_tmp++;
  295. }
  296. reg = (struct MessageUnit_B *)(dma_coherent +
  297. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  298. acb->pmu = (struct MessageUnit *)reg;
  299. mem_base0 = ioremap(pci_resource_start(pdev, 0),
  300. pci_resource_len(pdev, 0));
  301. mem_base1 = ioremap(pci_resource_start(pdev, 2),
  302. pci_resource_len(pdev, 2));
  303. reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 +
  304. ARCMSR_DRV2IOP_DOORBELL);
  305. reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
  306. ARCMSR_DRV2IOP_DOORBELL_MASK);
  307. reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 +
  308. ARCMSR_IOP2DRV_DOORBELL);
  309. reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
  310. ARCMSR_IOP2DRV_DOORBELL_MASK);
  311. reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 +
  312. ARCMSR_IOCTL_WBUFFER);
  313. reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 +
  314. ARCMSR_IOCTL_RBUFFER);
  315. reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 +
  316. ARCMSR_MSGCODE_RWBUFFER);
  317. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  318. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  319. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  320. acb->devstate[i][j] = ARECA_RAID_GOOD;
  321. /*
  322. ** here we need to tell iop 331 our ccb_tmp.HighPart
  323. ** if ccb_tmp.HighPart is not zero
  324. */
  325. intmask_org = arcmsr_disable_outbound_ints(acb);
  326. }
  327. break;
  328. }
  329. return 0;
  330. }
  331. static int arcmsr_probe(struct pci_dev *pdev,
  332. const struct pci_device_id *id)
  333. {
  334. struct Scsi_Host *host;
  335. struct AdapterControlBlock *acb;
  336. uint8_t bus, dev_fun;
  337. int error;
  338. error = pci_enable_device(pdev);
  339. if (error)
  340. goto out;
  341. pci_set_master(pdev);
  342. host = scsi_host_alloc(&arcmsr_scsi_host_template,
  343. sizeof(struct AdapterControlBlock));
  344. if (!host) {
  345. error = -ENOMEM;
  346. goto out_disable_device;
  347. }
  348. acb = (struct AdapterControlBlock *)host->hostdata;
  349. memset(acb, 0, sizeof (struct AdapterControlBlock));
  350. error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
  351. if (error) {
  352. error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  353. if (error) {
  354. printk(KERN_WARNING
  355. "scsi%d: No suitable DMA mask available\n",
  356. host->host_no);
  357. goto out_host_put;
  358. }
  359. }
  360. bus = pdev->bus->number;
  361. dev_fun = pdev->devfn;
  362. acb->host = host;
  363. acb->pdev = pdev;
  364. host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
  365. host->max_lun = ARCMSR_MAX_TARGETLUN;
  366. host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
  367. host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
  368. host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
  369. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  370. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  371. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  372. host->unique_id = (bus << 8) | dev_fun;
  373. host->irq = pdev->irq;
  374. error = pci_request_regions(pdev, "arcmsr");
  375. if (error) {
  376. goto out_host_put;
  377. }
  378. arcmsr_define_adapter_type(acb);
  379. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  380. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  381. ACB_F_MESSAGE_WQBUFFER_READED);
  382. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  383. INIT_LIST_HEAD(&acb->ccb_free_list);
  384. error = arcmsr_alloc_ccb_pool(acb);
  385. if (error)
  386. goto out_release_regions;
  387. error = request_irq(pdev->irq, arcmsr_do_interrupt,
  388. IRQF_SHARED, "arcmsr", acb);
  389. if (error)
  390. goto out_free_ccb_pool;
  391. arcmsr_iop_init(acb);
  392. pci_set_drvdata(pdev, host);
  393. if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
  394. host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
  395. error = scsi_add_host(host, &pdev->dev);
  396. if (error)
  397. goto out_free_irq;
  398. error = arcmsr_alloc_sysfs_attr(acb);
  399. if (error)
  400. goto out_free_sysfs;
  401. scsi_scan_host(host);
  402. #ifdef CONFIG_SCSI_ARCMSR_AER
  403. pci_enable_pcie_error_reporting(pdev);
  404. #endif
  405. return 0;
  406. out_free_sysfs:
  407. out_free_irq:
  408. free_irq(pdev->irq, acb);
  409. out_free_ccb_pool:
  410. arcmsr_free_ccb_pool(acb);
  411. iounmap(acb->pmu);
  412. out_release_regions:
  413. pci_release_regions(pdev);
  414. out_host_put:
  415. scsi_host_put(host);
  416. out_disable_device:
  417. pci_disable_device(pdev);
  418. out:
  419. return error;
  420. }
  421. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  422. {
  423. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  424. uint32_t Index;
  425. uint8_t Retries = 0x00;
  426. do {
  427. for (Index = 0; Index < 100; Index++) {
  428. if (readl(&reg->outbound_intstatus) &
  429. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  430. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  431. &reg->outbound_intstatus);
  432. return 0x00;
  433. }
  434. msleep(10);
  435. }/*max 1 seconds*/
  436. } while (Retries++ < 20);/*max 20 sec*/
  437. return 0xff;
  438. }
  439. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  440. {
  441. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  442. uint32_t Index;
  443. uint8_t Retries = 0x00;
  444. do {
  445. for (Index = 0; Index < 100; Index++) {
  446. if (readl(reg->iop2drv_doorbell_reg)
  447. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  448. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
  449. , reg->iop2drv_doorbell_reg);
  450. return 0x00;
  451. }
  452. msleep(10);
  453. }/*max 1 seconds*/
  454. } while (Retries++ < 20);/*max 20 sec*/
  455. return 0xff;
  456. }
  457. static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  458. {
  459. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  460. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  461. if (arcmsr_hba_wait_msgint_ready(acb))
  462. printk(KERN_NOTICE
  463. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  464. , acb->host->host_no);
  465. }
  466. static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  467. {
  468. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  469. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
  470. if (arcmsr_hbb_wait_msgint_ready(acb))
  471. printk(KERN_NOTICE
  472. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  473. , acb->host->host_no);
  474. }
  475. static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  476. {
  477. switch (acb->adapter_type) {
  478. case ACB_ADAPTER_TYPE_A: {
  479. arcmsr_abort_hba_allcmd(acb);
  480. }
  481. break;
  482. case ACB_ADAPTER_TYPE_B: {
  483. arcmsr_abort_hbb_allcmd(acb);
  484. }
  485. }
  486. }
  487. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  488. {
  489. struct scsi_cmnd *pcmd = ccb->pcmd;
  490. scsi_dma_unmap(pcmd);
  491. }
  492. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
  493. {
  494. struct AdapterControlBlock *acb = ccb->acb;
  495. struct scsi_cmnd *pcmd = ccb->pcmd;
  496. arcmsr_pci_unmap_dma(ccb);
  497. if (stand_flag == 1)
  498. atomic_dec(&acb->ccboutstandingcount);
  499. ccb->startdone = ARCMSR_CCB_DONE;
  500. ccb->ccb_flags = 0;
  501. list_add_tail(&ccb->list, &acb->ccb_free_list);
  502. pcmd->scsi_done(pcmd);
  503. }
  504. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  505. {
  506. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  507. int retry_count = 30;
  508. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  509. do {
  510. if (!arcmsr_hba_wait_msgint_ready(acb))
  511. break;
  512. else {
  513. retry_count--;
  514. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  515. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  516. }
  517. } while (retry_count != 0);
  518. }
  519. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  520. {
  521. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  522. int retry_count = 30;
  523. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
  524. do {
  525. if (!arcmsr_hbb_wait_msgint_ready(acb))
  526. break;
  527. else {
  528. retry_count--;
  529. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  530. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  531. }
  532. } while (retry_count != 0);
  533. }
  534. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  535. {
  536. switch (acb->adapter_type) {
  537. case ACB_ADAPTER_TYPE_A: {
  538. arcmsr_flush_hba_cache(acb);
  539. }
  540. break;
  541. case ACB_ADAPTER_TYPE_B: {
  542. arcmsr_flush_hbb_cache(acb);
  543. }
  544. }
  545. }
  546. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  547. {
  548. struct scsi_cmnd *pcmd = ccb->pcmd;
  549. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  550. pcmd->result = DID_OK << 16;
  551. if (sensebuffer) {
  552. int sense_data_length =
  553. sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
  554. ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
  555. memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
  556. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  557. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  558. sensebuffer->Valid = 1;
  559. }
  560. }
  561. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  562. {
  563. u32 orig_mask = 0;
  564. switch (acb->adapter_type) {
  565. case ACB_ADAPTER_TYPE_A : {
  566. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  567. orig_mask = readl(&reg->outbound_intmask)|\
  568. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
  569. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  570. &reg->outbound_intmask);
  571. }
  572. break;
  573. case ACB_ADAPTER_TYPE_B : {
  574. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  575. orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
  576. (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  577. writel(0, reg->iop2drv_doorbell_mask_reg);
  578. }
  579. break;
  580. }
  581. return orig_mask;
  582. }
  583. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
  584. struct CommandControlBlock *ccb, uint32_t flag_ccb)
  585. {
  586. uint8_t id, lun;
  587. id = ccb->pcmd->device->id;
  588. lun = ccb->pcmd->device->lun;
  589. if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
  590. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  591. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  592. ccb->pcmd->result = DID_OK << 16;
  593. arcmsr_ccb_complete(ccb, 1);
  594. } else {
  595. switch (ccb->arcmsr_cdb.DeviceStatus) {
  596. case ARCMSR_DEV_SELECT_TIMEOUT: {
  597. acb->devstate[id][lun] = ARECA_RAID_GONE;
  598. ccb->pcmd->result = DID_NO_CONNECT << 16;
  599. arcmsr_ccb_complete(ccb, 1);
  600. }
  601. break;
  602. case ARCMSR_DEV_ABORTED:
  603. case ARCMSR_DEV_INIT_FAIL: {
  604. acb->devstate[id][lun] = ARECA_RAID_GONE;
  605. ccb->pcmd->result = DID_BAD_TARGET << 16;
  606. arcmsr_ccb_complete(ccb, 1);
  607. }
  608. break;
  609. case ARCMSR_DEV_CHECK_CONDITION: {
  610. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  611. arcmsr_report_sense_info(ccb);
  612. arcmsr_ccb_complete(ccb, 1);
  613. }
  614. break;
  615. default:
  616. printk(KERN_NOTICE
  617. "arcmsr%d: scsi id = %d lun = %d"
  618. " isr get command error done, "
  619. "but got unknown DeviceStatus = 0x%x \n"
  620. , acb->host->host_no
  621. , id
  622. , lun
  623. , ccb->arcmsr_cdb.DeviceStatus);
  624. acb->devstate[id][lun] = ARECA_RAID_GONE;
  625. ccb->pcmd->result = DID_NO_CONNECT << 16;
  626. arcmsr_ccb_complete(ccb, 1);
  627. break;
  628. }
  629. }
  630. }
  631. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
  632. {
  633. struct CommandControlBlock *ccb;
  634. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  635. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  636. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  637. struct scsi_cmnd *abortcmd = ccb->pcmd;
  638. if (abortcmd) {
  639. abortcmd->result |= DID_ABORT << 16;
  640. arcmsr_ccb_complete(ccb, 1);
  641. printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
  642. isr got aborted command \n", acb->host->host_no, ccb);
  643. }
  644. }
  645. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  646. done acb = '0x%p'"
  647. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  648. " ccboutstandingcount = %d \n"
  649. , acb->host->host_no
  650. , acb
  651. , ccb
  652. , ccb->acb
  653. , ccb->startdone
  654. , atomic_read(&acb->ccboutstandingcount));
  655. }
  656. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  657. }
  658. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  659. {
  660. int i = 0;
  661. uint32_t flag_ccb;
  662. switch (acb->adapter_type) {
  663. case ACB_ADAPTER_TYPE_A: {
  664. struct MessageUnit_A __iomem *reg = \
  665. (struct MessageUnit_A *)acb->pmu;
  666. uint32_t outbound_intstatus;
  667. outbound_intstatus = readl(&reg->outbound_intstatus) & \
  668. acb->outbound_int_enable;
  669. /*clear and abort all outbound posted Q*/
  670. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  671. while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) \
  672. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  673. arcmsr_drain_donequeue(acb, flag_ccb);
  674. }
  675. }
  676. break;
  677. case ACB_ADAPTER_TYPE_B: {
  678. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  679. /*clear all outbound posted Q*/
  680. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  681. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  682. writel(0, &reg->done_qbuffer[i]);
  683. arcmsr_drain_donequeue(acb, flag_ccb);
  684. }
  685. writel(0, &reg->post_qbuffer[i]);
  686. }
  687. reg->doneq_index = 0;
  688. reg->postq_index = 0;
  689. }
  690. break;
  691. }
  692. }
  693. static void arcmsr_remove(struct pci_dev *pdev)
  694. {
  695. struct Scsi_Host *host = pci_get_drvdata(pdev);
  696. struct AdapterControlBlock *acb =
  697. (struct AdapterControlBlock *) host->hostdata;
  698. int poll_count = 0;
  699. arcmsr_free_sysfs_attr(acb);
  700. scsi_remove_host(host);
  701. arcmsr_stop_adapter_bgrb(acb);
  702. arcmsr_flush_adapter_cache(acb);
  703. arcmsr_disable_outbound_ints(acb);
  704. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  705. acb->acb_flags &= ~ACB_F_IOP_INITED;
  706. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
  707. if (!atomic_read(&acb->ccboutstandingcount))
  708. break;
  709. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  710. msleep(25);
  711. }
  712. if (atomic_read(&acb->ccboutstandingcount)) {
  713. int i;
  714. arcmsr_abort_allcmd(acb);
  715. arcmsr_done4abort_postqueue(acb);
  716. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  717. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  718. if (ccb->startdone == ARCMSR_CCB_START) {
  719. ccb->startdone = ARCMSR_CCB_ABORTED;
  720. ccb->pcmd->result = DID_ABORT << 16;
  721. arcmsr_ccb_complete(ccb, 1);
  722. }
  723. }
  724. }
  725. free_irq(pdev->irq, acb);
  726. iounmap(acb->pmu);
  727. arcmsr_free_ccb_pool(acb);
  728. pci_release_regions(pdev);
  729. scsi_host_put(host);
  730. pci_disable_device(pdev);
  731. pci_set_drvdata(pdev, NULL);
  732. }
  733. static void arcmsr_shutdown(struct pci_dev *pdev)
  734. {
  735. struct Scsi_Host *host = pci_get_drvdata(pdev);
  736. struct AdapterControlBlock *acb =
  737. (struct AdapterControlBlock *)host->hostdata;
  738. arcmsr_stop_adapter_bgrb(acb);
  739. arcmsr_flush_adapter_cache(acb);
  740. }
  741. static int arcmsr_module_init(void)
  742. {
  743. int error = 0;
  744. error = pci_register_driver(&arcmsr_pci_driver);
  745. return error;
  746. }
  747. static void arcmsr_module_exit(void)
  748. {
  749. pci_unregister_driver(&arcmsr_pci_driver);
  750. }
  751. module_init(arcmsr_module_init);
  752. module_exit(arcmsr_module_exit);
  753. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
  754. u32 intmask_org)
  755. {
  756. u32 mask;
  757. switch (acb->adapter_type) {
  758. case ACB_ADAPTER_TYPE_A : {
  759. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  760. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  761. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
  762. writel(mask, &reg->outbound_intmask);
  763. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  764. }
  765. break;
  766. case ACB_ADAPTER_TYPE_B : {
  767. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  768. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
  769. ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
  770. writel(mask, reg->iop2drv_doorbell_mask_reg);
  771. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  772. }
  773. }
  774. }
  775. static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
  776. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  777. {
  778. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  779. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  780. uint32_t address_lo, address_hi;
  781. int arccdbsize = 0x30;
  782. int nseg;
  783. ccb->pcmd = pcmd;
  784. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  785. arcmsr_cdb->Bus = 0;
  786. arcmsr_cdb->TargetID = pcmd->device->id;
  787. arcmsr_cdb->LUN = pcmd->device->lun;
  788. arcmsr_cdb->Function = 1;
  789. arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
  790. arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
  791. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  792. nseg = scsi_dma_map(pcmd);
  793. BUG_ON(nseg < 0);
  794. if (nseg) {
  795. int length, i, cdb_sgcount = 0;
  796. struct scatterlist *sg;
  797. /* map stor port SG list to our iop SG List. */
  798. scsi_for_each_sg(pcmd, sg, nseg, i) {
  799. /* Get the physical address of the current data pointer */
  800. length = cpu_to_le32(sg_dma_len(sg));
  801. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  802. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  803. if (address_hi == 0) {
  804. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  805. pdma_sg->address = address_lo;
  806. pdma_sg->length = length;
  807. psge += sizeof (struct SG32ENTRY);
  808. arccdbsize += sizeof (struct SG32ENTRY);
  809. } else {
  810. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  811. pdma_sg->addresshigh = address_hi;
  812. pdma_sg->address = address_lo;
  813. pdma_sg->length = length|IS_SG64_ADDR;
  814. psge += sizeof (struct SG64ENTRY);
  815. arccdbsize += sizeof (struct SG64ENTRY);
  816. }
  817. cdb_sgcount++;
  818. }
  819. arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
  820. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  821. if ( arccdbsize > 256)
  822. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  823. }
  824. if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
  825. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  826. ccb->ccb_flags |= CCB_FLAG_WRITE;
  827. }
  828. }
  829. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  830. {
  831. uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
  832. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  833. atomic_inc(&acb->ccboutstandingcount);
  834. ccb->startdone = ARCMSR_CCB_START;
  835. switch (acb->adapter_type) {
  836. case ACB_ADAPTER_TYPE_A: {
  837. struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
  838. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  839. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  840. &reg->inbound_queueport);
  841. else {
  842. writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
  843. }
  844. }
  845. break;
  846. case ACB_ADAPTER_TYPE_B: {
  847. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  848. uint32_t ending_index, index = reg->postq_index;
  849. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  850. writel(0, &reg->post_qbuffer[ending_index]);
  851. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  852. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  853. &reg->post_qbuffer[index]);
  854. }
  855. else {
  856. writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
  857. }
  858. index++;
  859. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  860. reg->postq_index = index;
  861. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
  862. }
  863. break;
  864. }
  865. }
  866. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  867. {
  868. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  869. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  870. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  871. if (arcmsr_hba_wait_msgint_ready(acb)) {
  872. printk(KERN_NOTICE
  873. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  874. , acb->host->host_no);
  875. }
  876. }
  877. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  878. {
  879. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  880. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  881. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
  882. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  883. printk(KERN_NOTICE
  884. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  885. , acb->host->host_no);
  886. }
  887. }
  888. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  889. {
  890. switch (acb->adapter_type) {
  891. case ACB_ADAPTER_TYPE_A: {
  892. arcmsr_stop_hba_bgrb(acb);
  893. }
  894. break;
  895. case ACB_ADAPTER_TYPE_B: {
  896. arcmsr_stop_hbb_bgrb(acb);
  897. }
  898. break;
  899. }
  900. }
  901. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  902. {
  903. dma_free_coherent(&acb->pdev->dev,
  904. ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
  905. acb->dma_coherent,
  906. acb->dma_coherent_handle);
  907. }
  908. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  909. {
  910. switch (acb->adapter_type) {
  911. case ACB_ADAPTER_TYPE_A: {
  912. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  913. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  914. }
  915. break;
  916. case ACB_ADAPTER_TYPE_B: {
  917. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  918. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  919. }
  920. break;
  921. }
  922. }
  923. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  924. {
  925. switch (acb->adapter_type) {
  926. case ACB_ADAPTER_TYPE_A: {
  927. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  928. /*
  929. ** push inbound doorbell tell iop, driver data write ok
  930. ** and wait reply on next hwinterrupt for next Qbuffer post
  931. */
  932. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  933. }
  934. break;
  935. case ACB_ADAPTER_TYPE_B: {
  936. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  937. /*
  938. ** push inbound doorbell tell iop, driver data write ok
  939. ** and wait reply on next hwinterrupt for next Qbuffer post
  940. */
  941. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
  942. }
  943. break;
  944. }
  945. }
  946. struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  947. {
  948. static struct QBUFFER *qbuffer;
  949. switch (acb->adapter_type) {
  950. case ACB_ADAPTER_TYPE_A: {
  951. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  952. qbuffer = (struct QBUFFER __iomem *) &reg->message_rbuffer;
  953. }
  954. break;
  955. case ACB_ADAPTER_TYPE_B: {
  956. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  957. qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg;
  958. }
  959. break;
  960. }
  961. return qbuffer;
  962. }
  963. static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  964. {
  965. static struct QBUFFER *pqbuffer;
  966. switch (acb->adapter_type) {
  967. case ACB_ADAPTER_TYPE_A: {
  968. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  969. pqbuffer = (struct QBUFFER *) &reg->message_wbuffer;
  970. }
  971. break;
  972. case ACB_ADAPTER_TYPE_B: {
  973. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  974. pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
  975. }
  976. break;
  977. }
  978. return pqbuffer;
  979. }
  980. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  981. {
  982. struct QBUFFER *prbuffer;
  983. struct QBUFFER *pQbuffer;
  984. uint8_t *iop_data;
  985. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  986. rqbuf_lastindex = acb->rqbuf_lastindex;
  987. rqbuf_firstindex = acb->rqbuf_firstindex;
  988. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  989. iop_data = (uint8_t *)prbuffer->data;
  990. iop_len = prbuffer->data_len;
  991. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
  992. if (my_empty_len >= iop_len)
  993. {
  994. while (iop_len > 0) {
  995. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  996. memcpy(pQbuffer, iop_data,1);
  997. rqbuf_lastindex++;
  998. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  999. iop_data++;
  1000. iop_len--;
  1001. }
  1002. acb->rqbuf_lastindex = rqbuf_lastindex;
  1003. arcmsr_iop_message_read(acb);
  1004. }
  1005. else {
  1006. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1007. }
  1008. }
  1009. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1010. {
  1011. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1012. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1013. uint8_t *pQbuffer;
  1014. struct QBUFFER *pwbuffer;
  1015. uint8_t *iop_data;
  1016. int32_t allxfer_len = 0;
  1017. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1018. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1019. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1020. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1021. (allxfer_len < 124)) {
  1022. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1023. memcpy(iop_data, pQbuffer, 1);
  1024. acb->wqbuf_firstindex++;
  1025. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1026. iop_data++;
  1027. allxfer_len++;
  1028. }
  1029. pwbuffer->data_len = allxfer_len;
  1030. arcmsr_iop_message_wrote(acb);
  1031. }
  1032. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1033. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1034. }
  1035. }
  1036. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1037. {
  1038. uint32_t outbound_doorbell;
  1039. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1040. outbound_doorbell = readl(&reg->outbound_doorbell);
  1041. writel(outbound_doorbell, &reg->outbound_doorbell);
  1042. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1043. arcmsr_iop2drv_data_wrote_handle(acb);
  1044. }
  1045. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1046. arcmsr_iop2drv_data_read_handle(acb);
  1047. }
  1048. }
  1049. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1050. {
  1051. uint32_t flag_ccb;
  1052. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1053. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1054. arcmsr_drain_donequeue(acb, flag_ccb);
  1055. }
  1056. }
  1057. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1058. {
  1059. uint32_t index;
  1060. uint32_t flag_ccb;
  1061. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1062. index = reg->doneq_index;
  1063. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1064. writel(0, &reg->done_qbuffer[index]);
  1065. arcmsr_drain_donequeue(acb, flag_ccb);
  1066. index++;
  1067. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1068. reg->doneq_index = index;
  1069. }
  1070. }
  1071. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1072. {
  1073. uint32_t outbound_intstatus;
  1074. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1075. outbound_intstatus = readl(&reg->outbound_intstatus) & \
  1076. acb->outbound_int_enable;
  1077. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1078. return 1;
  1079. }
  1080. writel(outbound_intstatus, &reg->outbound_intstatus);
  1081. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1082. arcmsr_hba_doorbell_isr(acb);
  1083. }
  1084. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1085. arcmsr_hba_postqueue_isr(acb);
  1086. }
  1087. return 0;
  1088. }
  1089. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1090. {
  1091. uint32_t outbound_doorbell;
  1092. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1093. outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
  1094. acb->outbound_int_enable;
  1095. if (!outbound_doorbell)
  1096. return 1;
  1097. writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
  1098. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1099. arcmsr_iop2drv_data_wrote_handle(acb);
  1100. }
  1101. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1102. arcmsr_iop2drv_data_read_handle(acb);
  1103. }
  1104. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1105. arcmsr_hbb_postqueue_isr(acb);
  1106. }
  1107. return 0;
  1108. }
  1109. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1110. {
  1111. switch (acb->adapter_type) {
  1112. case ACB_ADAPTER_TYPE_A: {
  1113. if (arcmsr_handle_hba_isr(acb)) {
  1114. return IRQ_NONE;
  1115. }
  1116. }
  1117. break;
  1118. case ACB_ADAPTER_TYPE_B: {
  1119. if (arcmsr_handle_hbb_isr(acb)) {
  1120. return IRQ_NONE;
  1121. }
  1122. }
  1123. break;
  1124. }
  1125. return IRQ_HANDLED;
  1126. }
  1127. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1128. {
  1129. if (acb) {
  1130. /* stop adapter background rebuild */
  1131. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1132. uint32_t intmask_org;
  1133. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1134. intmask_org = arcmsr_disable_outbound_ints(acb);
  1135. arcmsr_stop_adapter_bgrb(acb);
  1136. arcmsr_flush_adapter_cache(acb);
  1137. arcmsr_enable_outbound_ints(acb, intmask_org);
  1138. }
  1139. }
  1140. }
  1141. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1142. {
  1143. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1144. uint8_t *pQbuffer;
  1145. struct QBUFFER *pwbuffer;
  1146. uint8_t *iop_data;
  1147. int32_t allxfer_len = 0;
  1148. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1149. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1150. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1151. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1152. wqbuf_firstindex = acb->wqbuf_firstindex;
  1153. wqbuf_lastindex = acb->wqbuf_lastindex;
  1154. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1155. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1156. memcpy(iop_data, pQbuffer, 1);
  1157. wqbuf_firstindex++;
  1158. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1159. iop_data++;
  1160. allxfer_len++;
  1161. }
  1162. acb->wqbuf_firstindex = wqbuf_firstindex;
  1163. pwbuffer->data_len = allxfer_len;
  1164. arcmsr_iop_message_wrote(acb);
  1165. }
  1166. }
  1167. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
  1168. struct scsi_cmnd *cmd)
  1169. {
  1170. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1171. int retvalue = 0, transfer_len = 0;
  1172. char *buffer;
  1173. struct scatterlist *sg;
  1174. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1175. (uint32_t ) cmd->cmnd[6] << 16 |
  1176. (uint32_t ) cmd->cmnd[7] << 8 |
  1177. (uint32_t ) cmd->cmnd[8];
  1178. /* 4 bytes: Areca io control code */
  1179. sg = scsi_sglist(cmd);
  1180. buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
  1181. if (scsi_sg_count(cmd) > 1) {
  1182. retvalue = ARCMSR_MESSAGE_FAIL;
  1183. goto message_out;
  1184. }
  1185. transfer_len += sg->length;
  1186. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1187. retvalue = ARCMSR_MESSAGE_FAIL;
  1188. goto message_out;
  1189. }
  1190. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1191. switch(controlcode) {
  1192. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1193. unsigned long *ver_addr;
  1194. dma_addr_t buf_handle;
  1195. uint8_t *pQbuffer, *ptmpQbuffer;
  1196. int32_t allxfer_len = 0;
  1197. ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
  1198. if (!ver_addr) {
  1199. retvalue = ARCMSR_MESSAGE_FAIL;
  1200. goto message_out;
  1201. }
  1202. ptmpQbuffer = (uint8_t *) ver_addr;
  1203. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1204. && (allxfer_len < 1031)) {
  1205. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1206. memcpy(ptmpQbuffer, pQbuffer, 1);
  1207. acb->rqbuf_firstindex++;
  1208. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1209. ptmpQbuffer++;
  1210. allxfer_len++;
  1211. }
  1212. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1213. struct QBUFFER *prbuffer;
  1214. uint8_t *iop_data;
  1215. int32_t iop_len;
  1216. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1217. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1218. iop_data = (uint8_t *)prbuffer->data;
  1219. iop_len = readl(&prbuffer->data_len);
  1220. while (iop_len > 0) {
  1221. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1222. acb->rqbuf_lastindex++;
  1223. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1224. iop_data++;
  1225. iop_len--;
  1226. }
  1227. arcmsr_iop_message_read(acb);
  1228. }
  1229. memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
  1230. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1231. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1232. pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
  1233. }
  1234. break;
  1235. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1236. unsigned long *ver_addr;
  1237. dma_addr_t buf_handle;
  1238. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1239. uint8_t *pQbuffer, *ptmpuserbuffer;
  1240. ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
  1241. if (!ver_addr) {
  1242. retvalue = ARCMSR_MESSAGE_FAIL;
  1243. goto message_out;
  1244. }
  1245. ptmpuserbuffer = (uint8_t *)ver_addr;
  1246. user_len = pcmdmessagefld->cmdmessage.Length;
  1247. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1248. wqbuf_lastindex = acb->wqbuf_lastindex;
  1249. wqbuf_firstindex = acb->wqbuf_firstindex;
  1250. if (wqbuf_lastindex != wqbuf_firstindex) {
  1251. struct SENSE_DATA *sensebuffer =
  1252. (struct SENSE_DATA *)cmd->sense_buffer;
  1253. arcmsr_post_ioctldata2iop(acb);
  1254. /* has error report sensedata */
  1255. sensebuffer->ErrorCode = 0x70;
  1256. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1257. sensebuffer->AdditionalSenseLength = 0x0A;
  1258. sensebuffer->AdditionalSenseCode = 0x20;
  1259. sensebuffer->Valid = 1;
  1260. retvalue = ARCMSR_MESSAGE_FAIL;
  1261. } else {
  1262. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1263. &(ARCMSR_MAX_QBUFFER - 1);
  1264. if (my_empty_len >= user_len) {
  1265. while (user_len > 0) {
  1266. pQbuffer =
  1267. &acb->wqbuffer[acb->wqbuf_lastindex];
  1268. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1269. acb->wqbuf_lastindex++;
  1270. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1271. ptmpuserbuffer++;
  1272. user_len--;
  1273. }
  1274. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1275. acb->acb_flags &=
  1276. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1277. arcmsr_post_ioctldata2iop(acb);
  1278. }
  1279. } else {
  1280. /* has error report sensedata */
  1281. struct SENSE_DATA *sensebuffer =
  1282. (struct SENSE_DATA *)cmd->sense_buffer;
  1283. sensebuffer->ErrorCode = 0x70;
  1284. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1285. sensebuffer->AdditionalSenseLength = 0x0A;
  1286. sensebuffer->AdditionalSenseCode = 0x20;
  1287. sensebuffer->Valid = 1;
  1288. retvalue = ARCMSR_MESSAGE_FAIL;
  1289. }
  1290. }
  1291. pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
  1292. }
  1293. break;
  1294. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1295. uint8_t *pQbuffer = acb->rqbuffer;
  1296. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1297. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1298. arcmsr_iop_message_read(acb);
  1299. }
  1300. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1301. acb->rqbuf_firstindex = 0;
  1302. acb->rqbuf_lastindex = 0;
  1303. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1304. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1305. }
  1306. break;
  1307. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1308. uint8_t *pQbuffer = acb->wqbuffer;
  1309. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1310. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1311. arcmsr_iop_message_read(acb);
  1312. }
  1313. acb->acb_flags |=
  1314. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1315. ACB_F_MESSAGE_WQBUFFER_READED);
  1316. acb->wqbuf_firstindex = 0;
  1317. acb->wqbuf_lastindex = 0;
  1318. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1319. pcmdmessagefld->cmdmessage.ReturnCode =
  1320. ARCMSR_MESSAGE_RETURNCODE_OK;
  1321. }
  1322. break;
  1323. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1324. uint8_t *pQbuffer;
  1325. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1326. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1327. arcmsr_iop_message_read(acb);
  1328. }
  1329. acb->acb_flags |=
  1330. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1331. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1332. | ACB_F_MESSAGE_WQBUFFER_READED);
  1333. acb->rqbuf_firstindex = 0;
  1334. acb->rqbuf_lastindex = 0;
  1335. acb->wqbuf_firstindex = 0;
  1336. acb->wqbuf_lastindex = 0;
  1337. pQbuffer = acb->rqbuffer;
  1338. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1339. pQbuffer = acb->wqbuffer;
  1340. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1341. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1342. }
  1343. break;
  1344. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1345. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
  1346. }
  1347. break;
  1348. case ARCMSR_MESSAGE_SAY_HELLO: {
  1349. int8_t *hello_string = "Hello! I am ARCMSR";
  1350. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1351. , (int16_t)strlen(hello_string));
  1352. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1353. }
  1354. break;
  1355. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1356. arcmsr_iop_parking(acb);
  1357. break;
  1358. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1359. arcmsr_flush_adapter_cache(acb);
  1360. break;
  1361. default:
  1362. retvalue = ARCMSR_MESSAGE_FAIL;
  1363. }
  1364. message_out:
  1365. sg = scsi_sglist(cmd);
  1366. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1367. return retvalue;
  1368. }
  1369. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1370. {
  1371. struct list_head *head = &acb->ccb_free_list;
  1372. struct CommandControlBlock *ccb = NULL;
  1373. if (!list_empty(head)) {
  1374. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1375. list_del(head->next);
  1376. }
  1377. return ccb;
  1378. }
  1379. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1380. struct scsi_cmnd *cmd)
  1381. {
  1382. switch (cmd->cmnd[0]) {
  1383. case INQUIRY: {
  1384. unsigned char inqdata[36];
  1385. char *buffer;
  1386. struct scatterlist *sg;
  1387. if (cmd->device->lun) {
  1388. cmd->result = (DID_TIME_OUT << 16);
  1389. cmd->scsi_done(cmd);
  1390. return;
  1391. }
  1392. inqdata[0] = TYPE_PROCESSOR;
  1393. /* Periph Qualifier & Periph Dev Type */
  1394. inqdata[1] = 0;
  1395. /* rem media bit & Dev Type Modifier */
  1396. inqdata[2] = 0;
  1397. /* ISO, ECMA, & ANSI versions */
  1398. inqdata[4] = 31;
  1399. /* length of additional data */
  1400. strncpy(&inqdata[8], "Areca ", 8);
  1401. /* Vendor Identification */
  1402. strncpy(&inqdata[16], "RAID controller ", 16);
  1403. /* Product Identification */
  1404. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1405. sg = scsi_sglist(cmd);
  1406. buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
  1407. memcpy(buffer, inqdata, sizeof(inqdata));
  1408. sg = scsi_sglist(cmd);
  1409. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1410. cmd->scsi_done(cmd);
  1411. }
  1412. break;
  1413. case WRITE_BUFFER:
  1414. case READ_BUFFER: {
  1415. if (arcmsr_iop_message_xfer(acb, cmd))
  1416. cmd->result = (DID_ERROR << 16);
  1417. cmd->scsi_done(cmd);
  1418. }
  1419. break;
  1420. default:
  1421. cmd->scsi_done(cmd);
  1422. }
  1423. }
  1424. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  1425. void (* done)(struct scsi_cmnd *))
  1426. {
  1427. struct Scsi_Host *host = cmd->device->host;
  1428. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1429. struct CommandControlBlock *ccb;
  1430. int target = cmd->device->id;
  1431. int lun = cmd->device->lun;
  1432. cmd->scsi_done = done;
  1433. cmd->host_scribble = NULL;
  1434. cmd->result = 0;
  1435. if (acb->acb_flags & ACB_F_BUS_RESET) {
  1436. printk(KERN_NOTICE "arcmsr%d: bus reset"
  1437. " and return busy \n"
  1438. , acb->host->host_no);
  1439. return SCSI_MLQUEUE_HOST_BUSY;
  1440. }
  1441. if (target == 16) {
  1442. /* virtual device for iop message transfer */
  1443. arcmsr_handle_virtual_command(acb, cmd);
  1444. return 0;
  1445. }
  1446. if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1447. uint8_t block_cmd;
  1448. block_cmd = cmd->cmnd[0] & 0x0f;
  1449. if (block_cmd == 0x08 || block_cmd == 0x0a) {
  1450. printk(KERN_NOTICE
  1451. "arcmsr%d: block 'read/write'"
  1452. "command with gone raid volume"
  1453. " Cmd = %2x, TargetId = %d, Lun = %d \n"
  1454. , acb->host->host_no
  1455. , cmd->cmnd[0]
  1456. , target, lun);
  1457. cmd->result = (DID_NO_CONNECT << 16);
  1458. cmd->scsi_done(cmd);
  1459. return 0;
  1460. }
  1461. }
  1462. if (atomic_read(&acb->ccboutstandingcount) >=
  1463. ARCMSR_MAX_OUTSTANDING_CMD)
  1464. return SCSI_MLQUEUE_HOST_BUSY;
  1465. ccb = arcmsr_get_freeccb(acb);
  1466. if (!ccb)
  1467. return SCSI_MLQUEUE_HOST_BUSY;
  1468. arcmsr_build_ccb(acb, ccb, cmd);
  1469. arcmsr_post_ccb(acb, ccb);
  1470. return 0;
  1471. }
  1472. static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
  1473. {
  1474. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1475. char *acb_firm_model = acb->firm_model;
  1476. char *acb_firm_version = acb->firm_version;
  1477. char *iop_firm_model = (char *) (&reg->message_rwbuffer[15]);
  1478. char *iop_firm_version = (char *) (&reg->message_rwbuffer[17]);
  1479. int count;
  1480. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1481. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1482. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1483. miscellaneous data' timeout \n", acb->host->host_no);
  1484. }
  1485. count = 8;
  1486. while (count) {
  1487. *acb_firm_model = readb(iop_firm_model);
  1488. acb_firm_model++;
  1489. iop_firm_model++;
  1490. count--;
  1491. }
  1492. count = 16;
  1493. while (count) {
  1494. *acb_firm_version = readb(iop_firm_version);
  1495. acb_firm_version++;
  1496. iop_firm_version++;
  1497. count--;
  1498. }
  1499. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
  1500. , acb->host->host_no
  1501. , acb->firm_version);
  1502. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  1503. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  1504. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  1505. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  1506. }
  1507. static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
  1508. {
  1509. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1510. uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg;
  1511. char *acb_firm_model = acb->firm_model;
  1512. char *acb_firm_version = acb->firm_version;
  1513. char *iop_firm_model = (char *) (&lrwbuffer[15]);
  1514. /*firm_model,15,60-67*/
  1515. char *iop_firm_version = (char *) (&lrwbuffer[17]);
  1516. /*firm_version,17,68-83*/
  1517. int count;
  1518. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
  1519. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1520. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1521. miscellaneous data' timeout \n", acb->host->host_no);
  1522. }
  1523. count = 8;
  1524. while (count)
  1525. {
  1526. *acb_firm_model = readb(iop_firm_model);
  1527. acb_firm_model++;
  1528. iop_firm_model++;
  1529. count--;
  1530. }
  1531. count = 16;
  1532. while (count)
  1533. {
  1534. *acb_firm_version = readb(iop_firm_version);
  1535. acb_firm_version++;
  1536. iop_firm_version++;
  1537. count--;
  1538. }
  1539. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
  1540. acb->host->host_no,
  1541. acb->firm_version);
  1542. lrwbuffer++;
  1543. acb->firm_request_len = readl(lrwbuffer++);
  1544. /*firm_request_len,1,04-07*/
  1545. acb->firm_numbers_queue = readl(lrwbuffer++);
  1546. /*firm_numbers_queue,2,08-11*/
  1547. acb->firm_sdram_size = readl(lrwbuffer++);
  1548. /*firm_sdram_size,3,12-15*/
  1549. acb->firm_hd_channels = readl(lrwbuffer);
  1550. /*firm_ide_channels,4,16-19*/
  1551. }
  1552. static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  1553. {
  1554. switch (acb->adapter_type) {
  1555. case ACB_ADAPTER_TYPE_A: {
  1556. arcmsr_get_hba_config(acb);
  1557. }
  1558. break;
  1559. case ACB_ADAPTER_TYPE_B: {
  1560. arcmsr_get_hbb_config(acb);
  1561. }
  1562. break;
  1563. }
  1564. }
  1565. static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  1566. struct CommandControlBlock *poll_ccb)
  1567. {
  1568. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1569. struct CommandControlBlock *ccb;
  1570. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  1571. polling_hba_ccb_retry:
  1572. poll_count++;
  1573. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  1574. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  1575. while (1) {
  1576. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  1577. if (poll_ccb_done)
  1578. break;
  1579. else {
  1580. msleep(25);
  1581. if (poll_count > 100)
  1582. break;
  1583. goto polling_hba_ccb_retry;
  1584. }
  1585. }
  1586. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  1587. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1588. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1589. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1590. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  1591. " poll command abort successfully \n"
  1592. , acb->host->host_no
  1593. , ccb->pcmd->device->id
  1594. , ccb->pcmd->device->lun
  1595. , ccb);
  1596. ccb->pcmd->result = DID_ABORT << 16;
  1597. arcmsr_ccb_complete(ccb, 1);
  1598. poll_ccb_done = 1;
  1599. continue;
  1600. }
  1601. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1602. " command done ccb = '0x%p'"
  1603. "ccboutstandingcount = %d \n"
  1604. , acb->host->host_no
  1605. , ccb
  1606. , atomic_read(&acb->ccboutstandingcount));
  1607. continue;
  1608. }
  1609. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1610. }
  1611. }
  1612. static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
  1613. struct CommandControlBlock *poll_ccb)
  1614. {
  1615. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1616. struct CommandControlBlock *ccb;
  1617. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  1618. int index;
  1619. polling_hbb_ccb_retry:
  1620. poll_count++;
  1621. /* clear doorbell interrupt */
  1622. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1623. while (1) {
  1624. index = reg->doneq_index;
  1625. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  1626. if (poll_ccb_done)
  1627. break;
  1628. else {
  1629. msleep(25);
  1630. if (poll_count > 100)
  1631. break;
  1632. goto polling_hbb_ccb_retry;
  1633. }
  1634. }
  1635. writel(0, &reg->done_qbuffer[index]);
  1636. index++;
  1637. /*if last index number set it to 0 */
  1638. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1639. reg->doneq_index = index;
  1640. /* check ifcommand done with no error*/
  1641. ccb = (struct CommandControlBlock *)\
  1642. (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1643. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1644. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1645. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  1646. printk(KERN_NOTICE "arcmsr%d: \
  1647. scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
  1648. ,acb->host->host_no
  1649. ,ccb->pcmd->device->id
  1650. ,ccb->pcmd->device->lun
  1651. ,ccb);
  1652. ccb->pcmd->result = DID_ABORT << 16;
  1653. arcmsr_ccb_complete(ccb, 1);
  1654. continue;
  1655. }
  1656. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1657. " command done ccb = '0x%p'"
  1658. "ccboutstandingcount = %d \n"
  1659. , acb->host->host_no
  1660. , ccb
  1661. , atomic_read(&acb->ccboutstandingcount));
  1662. continue;
  1663. }
  1664. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1665. } /*drain reply FIFO*/
  1666. }
  1667. static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
  1668. struct CommandControlBlock *poll_ccb)
  1669. {
  1670. switch (acb->adapter_type) {
  1671. case ACB_ADAPTER_TYPE_A: {
  1672. arcmsr_polling_hba_ccbdone(acb,poll_ccb);
  1673. }
  1674. break;
  1675. case ACB_ADAPTER_TYPE_B: {
  1676. arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
  1677. }
  1678. }
  1679. }
  1680. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  1681. {
  1682. uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
  1683. dma_addr_t dma_coherent_handle;
  1684. /*
  1685. ********************************************************************
  1686. ** here we need to tell iop 331 our freeccb.HighPart
  1687. ** if freeccb.HighPart is not zero
  1688. ********************************************************************
  1689. */
  1690. dma_coherent_handle = acb->dma_coherent_handle;
  1691. cdb_phyaddr = (uint32_t)(dma_coherent_handle);
  1692. ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
  1693. /*
  1694. ***********************************************************************
  1695. ** if adapter type B, set window of "post command Q"
  1696. ***********************************************************************
  1697. */
  1698. switch (acb->adapter_type) {
  1699. case ACB_ADAPTER_TYPE_A: {
  1700. if (ccb_phyaddr_hi32 != 0) {
  1701. struct MessageUnit_A __iomem *reg = \
  1702. (struct MessageUnit_A *)acb->pmu;
  1703. uint32_t intmask_org;
  1704. intmask_org = arcmsr_disable_outbound_ints(acb);
  1705. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  1706. &reg->message_rwbuffer[0]);
  1707. writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  1708. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  1709. &reg->inbound_msgaddr0);
  1710. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1711. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  1712. part physical address timeout\n",
  1713. acb->host->host_no);
  1714. return 1;
  1715. }
  1716. arcmsr_enable_outbound_ints(acb, intmask_org);
  1717. }
  1718. }
  1719. break;
  1720. case ACB_ADAPTER_TYPE_B: {
  1721. unsigned long post_queue_phyaddr;
  1722. uint32_t *rwbuffer;
  1723. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1724. uint32_t intmask_org;
  1725. intmask_org = arcmsr_disable_outbound_ints(acb);
  1726. reg->postq_index = 0;
  1727. reg->doneq_index = 0;
  1728. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
  1729. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1730. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  1731. acb->host->host_no);
  1732. return 1;
  1733. }
  1734. post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
  1735. sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
  1736. rwbuffer = reg->msgcode_rwbuffer_reg;
  1737. /* driver "set config" signature */
  1738. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  1739. /* normal should be zero */
  1740. writel(ccb_phyaddr_hi32, rwbuffer++);
  1741. /* postQ size (256 + 8)*4 */
  1742. writel(post_queue_phyaddr, rwbuffer++);
  1743. /* doneQ size (256 + 8)*4 */
  1744. writel(post_queue_phyaddr + 1056, rwbuffer++);
  1745. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  1746. writel(1056, rwbuffer);
  1747. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
  1748. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1749. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  1750. timeout \n",acb->host->host_no);
  1751. return 1;
  1752. }
  1753. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
  1754. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1755. printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
  1756. ,acb->host->host_no);
  1757. return 1;
  1758. }
  1759. arcmsr_enable_outbound_ints(acb, intmask_org);
  1760. }
  1761. break;
  1762. }
  1763. return 0;
  1764. }
  1765. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  1766. {
  1767. uint32_t firmware_state = 0;
  1768. switch (acb->adapter_type) {
  1769. case ACB_ADAPTER_TYPE_A: {
  1770. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1771. do {
  1772. firmware_state = readl(&reg->outbound_msgaddr1);
  1773. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  1774. }
  1775. break;
  1776. case ACB_ADAPTER_TYPE_B: {
  1777. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1778. do {
  1779. firmware_state = readl(reg->iop2drv_doorbell_reg);
  1780. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  1781. }
  1782. break;
  1783. }
  1784. }
  1785. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  1786. {
  1787. struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
  1788. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1789. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  1790. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1791. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1792. rebulid' timeout \n", acb->host->host_no);
  1793. }
  1794. }
  1795. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  1796. {
  1797. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1798. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1799. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
  1800. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1801. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1802. rebulid' timeout \n",acb->host->host_no);
  1803. }
  1804. }
  1805. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  1806. {
  1807. switch (acb->adapter_type) {
  1808. case ACB_ADAPTER_TYPE_A:
  1809. arcmsr_start_hba_bgrb(acb);
  1810. break;
  1811. case ACB_ADAPTER_TYPE_B:
  1812. arcmsr_start_hbb_bgrb(acb);
  1813. break;
  1814. }
  1815. }
  1816. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  1817. {
  1818. switch (acb->adapter_type) {
  1819. case ACB_ADAPTER_TYPE_A: {
  1820. struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
  1821. uint32_t outbound_doorbell;
  1822. /* empty doorbell Qbuffer if door bell ringed */
  1823. outbound_doorbell = readl(&reg->outbound_doorbell);
  1824. /*clear doorbell interrupt */
  1825. writel(outbound_doorbell, &reg->outbound_doorbell);
  1826. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1827. }
  1828. break;
  1829. case ACB_ADAPTER_TYPE_B: {
  1830. struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
  1831. /*clear interrupt and message state*/
  1832. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1833. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  1834. /* let IOP know data has been read */
  1835. }
  1836. break;
  1837. }
  1838. }
  1839. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  1840. {
  1841. uint32_t intmask_org;
  1842. arcmsr_wait_firmware_ready(acb);
  1843. arcmsr_iop_confirm(acb);
  1844. /* disable all outbound interrupt */
  1845. intmask_org = arcmsr_disable_outbound_ints(acb);
  1846. arcmsr_get_firmware_spec(acb);
  1847. /*start background rebuild*/
  1848. arcmsr_start_adapter_bgrb(acb);
  1849. /* empty doorbell Qbuffer if door bell ringed */
  1850. arcmsr_clear_doorbell_queue_buffer(acb);
  1851. /* enable outbound Post Queue,outbound doorbell Interrupt */
  1852. arcmsr_enable_outbound_ints(acb, intmask_org);
  1853. acb->acb_flags |= ACB_F_IOP_INITED;
  1854. }
  1855. static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
  1856. {
  1857. struct CommandControlBlock *ccb;
  1858. uint32_t intmask_org;
  1859. int i = 0;
  1860. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  1861. /* talk to iop 331 outstanding command aborted */
  1862. arcmsr_abort_allcmd(acb);
  1863. /* wait for 3 sec for all command aborted*/
  1864. ssleep(3);
  1865. /* disable all outbound interrupt */
  1866. intmask_org = arcmsr_disable_outbound_ints(acb);
  1867. /* clear all outbound posted Q */
  1868. arcmsr_done4abort_postqueue(acb);
  1869. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1870. ccb = acb->pccb_pool[i];
  1871. if (ccb->startdone == ARCMSR_CCB_START) {
  1872. ccb->startdone = ARCMSR_CCB_ABORTED;
  1873. arcmsr_ccb_complete(ccb, 1);
  1874. }
  1875. }
  1876. /* enable all outbound interrupt */
  1877. arcmsr_enable_outbound_ints(acb, intmask_org);
  1878. }
  1879. }
  1880. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  1881. {
  1882. struct AdapterControlBlock *acb =
  1883. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1884. int i;
  1885. acb->num_resets++;
  1886. acb->acb_flags |= ACB_F_BUS_RESET;
  1887. for (i = 0; i < 400; i++) {
  1888. if (!atomic_read(&acb->ccboutstandingcount))
  1889. break;
  1890. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  1891. msleep(25);
  1892. }
  1893. arcmsr_iop_reset(acb);
  1894. acb->acb_flags &= ~ACB_F_BUS_RESET;
  1895. return SUCCESS;
  1896. }
  1897. static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  1898. struct CommandControlBlock *ccb)
  1899. {
  1900. u32 intmask;
  1901. ccb->startdone = ARCMSR_CCB_ABORTED;
  1902. /*
  1903. ** Wait for 3 sec for all command done.
  1904. */
  1905. ssleep(3);
  1906. intmask = arcmsr_disable_outbound_ints(acb);
  1907. arcmsr_polling_ccbdone(acb, ccb);
  1908. arcmsr_enable_outbound_ints(acb, intmask);
  1909. }
  1910. static int arcmsr_abort(struct scsi_cmnd *cmd)
  1911. {
  1912. struct AdapterControlBlock *acb =
  1913. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1914. int i = 0;
  1915. printk(KERN_NOTICE
  1916. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  1917. acb->host->host_no, cmd->device->id, cmd->device->lun);
  1918. acb->num_aborts++;
  1919. /*
  1920. ************************************************
  1921. ** the all interrupt service routine is locked
  1922. ** we need to handle it as soon as possible and exit
  1923. ************************************************
  1924. */
  1925. if (!atomic_read(&acb->ccboutstandingcount))
  1926. return SUCCESS;
  1927. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1928. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  1929. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  1930. arcmsr_abort_one_cmd(acb, ccb);
  1931. break;
  1932. }
  1933. }
  1934. return SUCCESS;
  1935. }
  1936. static const char *arcmsr_info(struct Scsi_Host *host)
  1937. {
  1938. struct AdapterControlBlock *acb =
  1939. (struct AdapterControlBlock *) host->hostdata;
  1940. static char buf[256];
  1941. char *type;
  1942. int raid6 = 1;
  1943. switch (acb->pdev->device) {
  1944. case PCI_DEVICE_ID_ARECA_1110:
  1945. case PCI_DEVICE_ID_ARECA_1200:
  1946. case PCI_DEVICE_ID_ARECA_1202:
  1947. case PCI_DEVICE_ID_ARECA_1210:
  1948. raid6 = 0;
  1949. /*FALLTHRU*/
  1950. case PCI_DEVICE_ID_ARECA_1120:
  1951. case PCI_DEVICE_ID_ARECA_1130:
  1952. case PCI_DEVICE_ID_ARECA_1160:
  1953. case PCI_DEVICE_ID_ARECA_1170:
  1954. case PCI_DEVICE_ID_ARECA_1201:
  1955. case PCI_DEVICE_ID_ARECA_1220:
  1956. case PCI_DEVICE_ID_ARECA_1230:
  1957. case PCI_DEVICE_ID_ARECA_1260:
  1958. case PCI_DEVICE_ID_ARECA_1270:
  1959. case PCI_DEVICE_ID_ARECA_1280:
  1960. type = "SATA";
  1961. break;
  1962. case PCI_DEVICE_ID_ARECA_1380:
  1963. case PCI_DEVICE_ID_ARECA_1381:
  1964. case PCI_DEVICE_ID_ARECA_1680:
  1965. case PCI_DEVICE_ID_ARECA_1681:
  1966. type = "SAS";
  1967. break;
  1968. default:
  1969. type = "X-TYPE";
  1970. break;
  1971. }
  1972. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  1973. type, raid6 ? "( RAID6 capable)" : "",
  1974. ARCMSR_DRIVER_VERSION);
  1975. return buf;
  1976. }
  1977. #ifdef CONFIG_SCSI_ARCMSR_AER
  1978. static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
  1979. {
  1980. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1981. struct AdapterControlBlock *acb =
  1982. (struct AdapterControlBlock *) host->hostdata;
  1983. uint32_t intmask_org;
  1984. int i, j;
  1985. if (pci_enable_device(pdev)) {
  1986. return PCI_ERS_RESULT_DISCONNECT;
  1987. }
  1988. pci_set_master(pdev);
  1989. intmask_org = arcmsr_disable_outbound_ints(acb);
  1990. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1991. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  1992. ACB_F_MESSAGE_WQBUFFER_READED);
  1993. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  1994. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  1995. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  1996. acb->devstate[i][j] = ARECA_RAID_GONE;
  1997. arcmsr_wait_firmware_ready(acb);
  1998. arcmsr_iop_confirm(acb);
  1999. /* disable all outbound interrupt */
  2000. arcmsr_get_firmware_spec(acb);
  2001. /*start background rebuild*/
  2002. arcmsr_start_adapter_bgrb(acb);
  2003. /* empty doorbell Qbuffer if door bell ringed */
  2004. arcmsr_clear_doorbell_queue_buffer(acb);
  2005. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2006. arcmsr_enable_outbound_ints(acb, intmask_org);
  2007. acb->acb_flags |= ACB_F_IOP_INITED;
  2008. pci_enable_pcie_error_reporting(pdev);
  2009. return PCI_ERS_RESULT_RECOVERED;
  2010. }
  2011. static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
  2012. {
  2013. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2014. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
  2015. struct CommandControlBlock *ccb;
  2016. uint32_t intmask_org;
  2017. int i = 0;
  2018. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2019. /* talk to iop 331 outstanding command aborted */
  2020. arcmsr_abort_allcmd(acb);
  2021. /* wait for 3 sec for all command aborted*/
  2022. ssleep(3);
  2023. /* disable all outbound interrupt */
  2024. intmask_org = arcmsr_disable_outbound_ints(acb);
  2025. /* clear all outbound posted Q */
  2026. arcmsr_done4abort_postqueue(acb);
  2027. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2028. ccb = acb->pccb_pool[i];
  2029. if (ccb->startdone == ARCMSR_CCB_START) {
  2030. ccb->startdone = ARCMSR_CCB_ABORTED;
  2031. arcmsr_ccb_complete(ccb, 1);
  2032. }
  2033. }
  2034. /* enable all outbound interrupt */
  2035. arcmsr_enable_outbound_ints(acb, intmask_org);
  2036. }
  2037. pci_disable_device(pdev);
  2038. }
  2039. static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
  2040. {
  2041. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2042. struct AdapterControlBlock *acb = \
  2043. (struct AdapterControlBlock *)host->hostdata;
  2044. arcmsr_stop_adapter_bgrb(acb);
  2045. arcmsr_flush_adapter_cache(acb);
  2046. }
  2047. static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
  2048. pci_channel_state_t state)
  2049. {
  2050. switch (state) {
  2051. case pci_channel_io_frozen:
  2052. arcmsr_pci_ers_need_reset_forepart(pdev);
  2053. return PCI_ERS_RESULT_NEED_RESET;
  2054. case pci_channel_io_perm_failure:
  2055. arcmsr_pci_ers_disconnect_forepart(pdev);
  2056. return PCI_ERS_RESULT_DISCONNECT;
  2057. break;
  2058. default:
  2059. return PCI_ERS_RESULT_NEED_RESET;
  2060. }
  2061. }
  2062. #endif