arcmsr_hba.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Erich Chen
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/pci.h>
  60. #include <linux/aer.h>
  61. #include <linux/slab.h>
  62. #include <asm/dma.h>
  63. #include <asm/io.h>
  64. #include <asm/system.h>
  65. #include <asm/uaccess.h>
  66. #include <scsi/scsi_host.h>
  67. #include <scsi/scsi.h>
  68. #include <scsi/scsi_cmnd.h>
  69. #include <scsi/scsi_tcq.h>
  70. #include <scsi/scsi_device.h>
  71. #include <scsi/scsi_transport.h>
  72. #include <scsi/scsicam.h>
  73. #include "arcmsr.h"
  74. MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
  75. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
  76. MODULE_LICENSE("Dual BSD/GPL");
  77. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  78. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  79. struct scsi_cmnd *cmd);
  80. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  81. static int arcmsr_abort(struct scsi_cmnd *);
  82. static int arcmsr_bus_reset(struct scsi_cmnd *);
  83. static int arcmsr_bios_param(struct scsi_device *sdev,
  84. struct block_device *bdev, sector_t capacity, int *info);
  85. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  86. void (*done) (struct scsi_cmnd *));
  87. static int arcmsr_probe(struct pci_dev *pdev,
  88. const struct pci_device_id *id);
  89. static void arcmsr_remove(struct pci_dev *pdev);
  90. static void arcmsr_shutdown(struct pci_dev *pdev);
  91. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  92. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  93. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  94. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  95. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  96. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  97. static const char *arcmsr_info(struct Scsi_Host *);
  98. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  99. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  100. int queue_depth, int reason)
  101. {
  102. if (reason != SCSI_QDEPTH_DEFAULT)
  103. return -EOPNOTSUPP;
  104. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  105. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  106. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  107. return queue_depth;
  108. }
  109. static struct scsi_host_template arcmsr_scsi_host_template = {
  110. .module = THIS_MODULE,
  111. .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
  112. ARCMSR_DRIVER_VERSION,
  113. .info = arcmsr_info,
  114. .queuecommand = arcmsr_queue_command,
  115. .eh_abort_handler = arcmsr_abort,
  116. .eh_bus_reset_handler = arcmsr_bus_reset,
  117. .bios_param = arcmsr_bios_param,
  118. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  119. .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
  120. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  121. .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
  122. .max_sectors = ARCMSR_MAX_XFER_SECTORS,
  123. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  124. .use_clustering = ENABLE_CLUSTERING,
  125. .shost_attrs = arcmsr_host_attrs,
  126. };
  127. #ifdef CONFIG_SCSI_ARCMSR_AER
  128. static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
  129. static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
  130. pci_channel_state_t state);
  131. static struct pci_error_handlers arcmsr_pci_error_handlers = {
  132. .error_detected = arcmsr_pci_error_detected,
  133. .slot_reset = arcmsr_pci_slot_reset,
  134. };
  135. #endif
  136. static struct pci_device_id arcmsr_device_id_table[] = {
  137. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  138. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  139. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  153. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  154. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  155. {0, 0}, /* Terminating entry */
  156. };
  157. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  158. static struct pci_driver arcmsr_pci_driver = {
  159. .name = "arcmsr",
  160. .id_table = arcmsr_device_id_table,
  161. .probe = arcmsr_probe,
  162. .remove = arcmsr_remove,
  163. .shutdown = arcmsr_shutdown,
  164. #ifdef CONFIG_SCSI_ARCMSR_AER
  165. .err_handler = &arcmsr_pci_error_handlers,
  166. #endif
  167. };
  168. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  169. {
  170. irqreturn_t handle_state;
  171. struct AdapterControlBlock *acb = dev_id;
  172. spin_lock(acb->host->host_lock);
  173. handle_state = arcmsr_interrupt(acb);
  174. spin_unlock(acb->host->host_lock);
  175. return handle_state;
  176. }
  177. static int arcmsr_bios_param(struct scsi_device *sdev,
  178. struct block_device *bdev, sector_t capacity, int *geom)
  179. {
  180. int ret, heads, sectors, cylinders, total_capacity;
  181. unsigned char *buffer;/* return copy of block device's partition table */
  182. buffer = scsi_bios_ptable(bdev);
  183. if (buffer) {
  184. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  185. kfree(buffer);
  186. if (ret != -1)
  187. return ret;
  188. }
  189. total_capacity = capacity;
  190. heads = 64;
  191. sectors = 32;
  192. cylinders = total_capacity / (heads * sectors);
  193. if (cylinders > 1024) {
  194. heads = 255;
  195. sectors = 63;
  196. cylinders = total_capacity / (heads * sectors);
  197. }
  198. geom[0] = heads;
  199. geom[1] = sectors;
  200. geom[2] = cylinders;
  201. return 0;
  202. }
  203. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  204. {
  205. struct pci_dev *pdev = acb->pdev;
  206. u16 dev_id;
  207. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  208. switch (dev_id) {
  209. case 0x1201 : {
  210. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  211. }
  212. break;
  213. default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
  214. }
  215. }
  216. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  217. {
  218. switch (acb->adapter_type) {
  219. case ACB_ADAPTER_TYPE_A: {
  220. struct pci_dev *pdev = acb->pdev;
  221. void *dma_coherent;
  222. dma_addr_t dma_coherent_handle, dma_addr;
  223. struct CommandControlBlock *ccb_tmp;
  224. uint32_t intmask_org;
  225. int i, j;
  226. acb->pmuA = pci_ioremap_bar(pdev, 0);
  227. if (!acb->pmuA) {
  228. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
  229. acb->host->host_no);
  230. return -ENOMEM;
  231. }
  232. dma_coherent = dma_alloc_coherent(&pdev->dev,
  233. ARCMSR_MAX_FREECCB_NUM *
  234. sizeof (struct CommandControlBlock) + 0x20,
  235. &dma_coherent_handle, GFP_KERNEL);
  236. if (!dma_coherent) {
  237. iounmap(acb->pmuA);
  238. return -ENOMEM;
  239. }
  240. acb->dma_coherent = dma_coherent;
  241. acb->dma_coherent_handle = dma_coherent_handle;
  242. if (((unsigned long)dma_coherent & 0x1F)) {
  243. dma_coherent = dma_coherent +
  244. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  245. dma_coherent_handle = dma_coherent_handle +
  246. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  247. }
  248. dma_addr = dma_coherent_handle;
  249. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  250. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  251. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  252. ccb_tmp->acb = acb;
  253. acb->pccb_pool[i] = ccb_tmp;
  254. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  255. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  256. ccb_tmp++;
  257. }
  258. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  259. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  260. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  261. acb->devstate[i][j] = ARECA_RAID_GONE;
  262. /*
  263. ** here we need to tell iop 331 our ccb_tmp.HighPart
  264. ** if ccb_tmp.HighPart is not zero
  265. */
  266. intmask_org = arcmsr_disable_outbound_ints(acb);
  267. }
  268. break;
  269. case ACB_ADAPTER_TYPE_B: {
  270. struct pci_dev *pdev = acb->pdev;
  271. struct MessageUnit_B *reg;
  272. void __iomem *mem_base0, *mem_base1;
  273. void *dma_coherent;
  274. dma_addr_t dma_coherent_handle, dma_addr;
  275. uint32_t intmask_org;
  276. struct CommandControlBlock *ccb_tmp;
  277. int i, j;
  278. dma_coherent = dma_alloc_coherent(&pdev->dev,
  279. ((ARCMSR_MAX_FREECCB_NUM *
  280. sizeof(struct CommandControlBlock) + 0x20) +
  281. sizeof(struct MessageUnit_B)),
  282. &dma_coherent_handle, GFP_KERNEL);
  283. if (!dma_coherent)
  284. return -ENOMEM;
  285. acb->dma_coherent = dma_coherent;
  286. acb->dma_coherent_handle = dma_coherent_handle;
  287. if (((unsigned long)dma_coherent & 0x1F)) {
  288. dma_coherent = dma_coherent +
  289. (0x20 - ((unsigned long)dma_coherent & 0x1F));
  290. dma_coherent_handle = dma_coherent_handle +
  291. (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
  292. }
  293. dma_addr = dma_coherent_handle;
  294. ccb_tmp = (struct CommandControlBlock *)dma_coherent;
  295. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  296. ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
  297. ccb_tmp->acb = acb;
  298. acb->pccb_pool[i] = ccb_tmp;
  299. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  300. dma_addr = dma_addr + sizeof(struct CommandControlBlock);
  301. ccb_tmp++;
  302. }
  303. reg = (struct MessageUnit_B *)(dma_coherent +
  304. ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
  305. acb->pmuB = reg;
  306. mem_base0 = pci_ioremap_bar(pdev, 0);
  307. if (!mem_base0)
  308. goto out;
  309. mem_base1 = pci_ioremap_bar(pdev, 2);
  310. if (!mem_base1) {
  311. iounmap(mem_base0);
  312. goto out;
  313. }
  314. reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
  315. reg->drv2iop_doorbell_mask_reg = mem_base0 +
  316. ARCMSR_DRV2IOP_DOORBELL_MASK;
  317. reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
  318. reg->iop2drv_doorbell_mask_reg = mem_base0 +
  319. ARCMSR_IOP2DRV_DOORBELL_MASK;
  320. reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
  321. reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
  322. reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
  323. acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
  324. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  325. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  326. acb->devstate[i][j] = ARECA_RAID_GOOD;
  327. /*
  328. ** here we need to tell iop 331 our ccb_tmp.HighPart
  329. ** if ccb_tmp.HighPart is not zero
  330. */
  331. intmask_org = arcmsr_disable_outbound_ints(acb);
  332. }
  333. break;
  334. }
  335. return 0;
  336. out:
  337. dma_free_coherent(&acb->pdev->dev,
  338. (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
  339. sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
  340. return -ENOMEM;
  341. }
  342. static int arcmsr_probe(struct pci_dev *pdev,
  343. const struct pci_device_id *id)
  344. {
  345. struct Scsi_Host *host;
  346. struct AdapterControlBlock *acb;
  347. uint8_t bus, dev_fun;
  348. int error;
  349. error = pci_enable_device(pdev);
  350. if (error)
  351. goto out;
  352. pci_set_master(pdev);
  353. host = scsi_host_alloc(&arcmsr_scsi_host_template,
  354. sizeof(struct AdapterControlBlock));
  355. if (!host) {
  356. error = -ENOMEM;
  357. goto out_disable_device;
  358. }
  359. acb = (struct AdapterControlBlock *)host->hostdata;
  360. memset(acb, 0, sizeof (struct AdapterControlBlock));
  361. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  362. if (error) {
  363. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  364. if (error) {
  365. printk(KERN_WARNING
  366. "scsi%d: No suitable DMA mask available\n",
  367. host->host_no);
  368. goto out_host_put;
  369. }
  370. }
  371. bus = pdev->bus->number;
  372. dev_fun = pdev->devfn;
  373. acb->host = host;
  374. acb->pdev = pdev;
  375. host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
  376. host->max_lun = ARCMSR_MAX_TARGETLUN;
  377. host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
  378. host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
  379. host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
  380. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  381. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  382. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  383. host->unique_id = (bus << 8) | dev_fun;
  384. host->irq = pdev->irq;
  385. error = pci_request_regions(pdev, "arcmsr");
  386. if (error) {
  387. goto out_host_put;
  388. }
  389. arcmsr_define_adapter_type(acb);
  390. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  391. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  392. ACB_F_MESSAGE_WQBUFFER_READED);
  393. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  394. INIT_LIST_HEAD(&acb->ccb_free_list);
  395. error = arcmsr_alloc_ccb_pool(acb);
  396. if (error)
  397. goto out_release_regions;
  398. error = request_irq(pdev->irq, arcmsr_do_interrupt,
  399. IRQF_SHARED, "arcmsr", acb);
  400. if (error)
  401. goto out_free_ccb_pool;
  402. arcmsr_iop_init(acb);
  403. pci_set_drvdata(pdev, host);
  404. if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
  405. host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
  406. error = scsi_add_host(host, &pdev->dev);
  407. if (error)
  408. goto out_free_irq;
  409. error = arcmsr_alloc_sysfs_attr(acb);
  410. if (error)
  411. goto out_free_sysfs;
  412. scsi_scan_host(host);
  413. #ifdef CONFIG_SCSI_ARCMSR_AER
  414. pci_enable_pcie_error_reporting(pdev);
  415. #endif
  416. return 0;
  417. out_free_sysfs:
  418. out_free_irq:
  419. free_irq(pdev->irq, acb);
  420. out_free_ccb_pool:
  421. arcmsr_free_ccb_pool(acb);
  422. out_release_regions:
  423. pci_release_regions(pdev);
  424. out_host_put:
  425. scsi_host_put(host);
  426. out_disable_device:
  427. pci_disable_device(pdev);
  428. out:
  429. return error;
  430. }
  431. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  432. {
  433. struct MessageUnit_A __iomem *reg = acb->pmuA;
  434. uint32_t Index;
  435. uint8_t Retries = 0x00;
  436. do {
  437. for (Index = 0; Index < 100; Index++) {
  438. if (readl(&reg->outbound_intstatus) &
  439. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  440. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  441. &reg->outbound_intstatus);
  442. return 0x00;
  443. }
  444. msleep(10);
  445. }/*max 1 seconds*/
  446. } while (Retries++ < 20);/*max 20 sec*/
  447. return 0xff;
  448. }
  449. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  450. {
  451. struct MessageUnit_B *reg = acb->pmuB;
  452. uint32_t Index;
  453. uint8_t Retries = 0x00;
  454. do {
  455. for (Index = 0; Index < 100; Index++) {
  456. if (readl(reg->iop2drv_doorbell_reg)
  457. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  458. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
  459. , reg->iop2drv_doorbell_reg);
  460. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  461. return 0x00;
  462. }
  463. msleep(10);
  464. }/*max 1 seconds*/
  465. } while (Retries++ < 20);/*max 20 sec*/
  466. return 0xff;
  467. }
  468. static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  469. {
  470. struct MessageUnit_A __iomem *reg = acb->pmuA;
  471. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  472. if (arcmsr_hba_wait_msgint_ready(acb))
  473. printk(KERN_NOTICE
  474. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  475. , acb->host->host_no);
  476. }
  477. static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  478. {
  479. struct MessageUnit_B *reg = acb->pmuB;
  480. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
  481. if (arcmsr_hbb_wait_msgint_ready(acb))
  482. printk(KERN_NOTICE
  483. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  484. , acb->host->host_no);
  485. }
  486. static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  487. {
  488. switch (acb->adapter_type) {
  489. case ACB_ADAPTER_TYPE_A: {
  490. arcmsr_abort_hba_allcmd(acb);
  491. }
  492. break;
  493. case ACB_ADAPTER_TYPE_B: {
  494. arcmsr_abort_hbb_allcmd(acb);
  495. }
  496. }
  497. }
  498. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  499. {
  500. struct scsi_cmnd *pcmd = ccb->pcmd;
  501. scsi_dma_unmap(pcmd);
  502. }
  503. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
  504. {
  505. struct AdapterControlBlock *acb = ccb->acb;
  506. struct scsi_cmnd *pcmd = ccb->pcmd;
  507. arcmsr_pci_unmap_dma(ccb);
  508. if (stand_flag == 1)
  509. atomic_dec(&acb->ccboutstandingcount);
  510. ccb->startdone = ARCMSR_CCB_DONE;
  511. ccb->ccb_flags = 0;
  512. list_add_tail(&ccb->list, &acb->ccb_free_list);
  513. pcmd->scsi_done(pcmd);
  514. }
  515. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  516. {
  517. struct MessageUnit_A __iomem *reg = acb->pmuA;
  518. int retry_count = 30;
  519. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  520. do {
  521. if (!arcmsr_hba_wait_msgint_ready(acb))
  522. break;
  523. else {
  524. retry_count--;
  525. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  526. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  527. }
  528. } while (retry_count != 0);
  529. }
  530. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  531. {
  532. struct MessageUnit_B *reg = acb->pmuB;
  533. int retry_count = 30;
  534. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
  535. do {
  536. if (!arcmsr_hbb_wait_msgint_ready(acb))
  537. break;
  538. else {
  539. retry_count--;
  540. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  541. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  542. }
  543. } while (retry_count != 0);
  544. }
  545. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  546. {
  547. switch (acb->adapter_type) {
  548. case ACB_ADAPTER_TYPE_A: {
  549. arcmsr_flush_hba_cache(acb);
  550. }
  551. break;
  552. case ACB_ADAPTER_TYPE_B: {
  553. arcmsr_flush_hbb_cache(acb);
  554. }
  555. }
  556. }
  557. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  558. {
  559. struct scsi_cmnd *pcmd = ccb->pcmd;
  560. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  561. pcmd->result = DID_OK << 16;
  562. if (sensebuffer) {
  563. int sense_data_length =
  564. sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
  565. ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
  566. memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
  567. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  568. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  569. sensebuffer->Valid = 1;
  570. }
  571. }
  572. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  573. {
  574. u32 orig_mask = 0;
  575. switch (acb->adapter_type) {
  576. case ACB_ADAPTER_TYPE_A : {
  577. struct MessageUnit_A __iomem *reg = acb->pmuA;
  578. orig_mask = readl(&reg->outbound_intmask)|\
  579. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
  580. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  581. &reg->outbound_intmask);
  582. }
  583. break;
  584. case ACB_ADAPTER_TYPE_B : {
  585. struct MessageUnit_B *reg = acb->pmuB;
  586. orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
  587. (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  588. writel(0, reg->iop2drv_doorbell_mask_reg);
  589. }
  590. break;
  591. }
  592. return orig_mask;
  593. }
  594. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
  595. struct CommandControlBlock *ccb, uint32_t flag_ccb)
  596. {
  597. uint8_t id, lun;
  598. id = ccb->pcmd->device->id;
  599. lun = ccb->pcmd->device->lun;
  600. if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
  601. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  602. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  603. ccb->pcmd->result = DID_OK << 16;
  604. arcmsr_ccb_complete(ccb, 1);
  605. } else {
  606. switch (ccb->arcmsr_cdb.DeviceStatus) {
  607. case ARCMSR_DEV_SELECT_TIMEOUT: {
  608. acb->devstate[id][lun] = ARECA_RAID_GONE;
  609. ccb->pcmd->result = DID_NO_CONNECT << 16;
  610. arcmsr_ccb_complete(ccb, 1);
  611. }
  612. break;
  613. case ARCMSR_DEV_ABORTED:
  614. case ARCMSR_DEV_INIT_FAIL: {
  615. acb->devstate[id][lun] = ARECA_RAID_GONE;
  616. ccb->pcmd->result = DID_BAD_TARGET << 16;
  617. arcmsr_ccb_complete(ccb, 1);
  618. }
  619. break;
  620. case ARCMSR_DEV_CHECK_CONDITION: {
  621. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  622. arcmsr_report_sense_info(ccb);
  623. arcmsr_ccb_complete(ccb, 1);
  624. }
  625. break;
  626. default:
  627. printk(KERN_NOTICE
  628. "arcmsr%d: scsi id = %d lun = %d"
  629. " isr get command error done, "
  630. "but got unknown DeviceStatus = 0x%x \n"
  631. , acb->host->host_no
  632. , id
  633. , lun
  634. , ccb->arcmsr_cdb.DeviceStatus);
  635. acb->devstate[id][lun] = ARECA_RAID_GONE;
  636. ccb->pcmd->result = DID_NO_CONNECT << 16;
  637. arcmsr_ccb_complete(ccb, 1);
  638. break;
  639. }
  640. }
  641. }
  642. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
  643. {
  644. struct CommandControlBlock *ccb;
  645. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  646. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  647. if (ccb->startdone == ARCMSR_CCB_ABORTED) {
  648. struct scsi_cmnd *abortcmd = ccb->pcmd;
  649. if (abortcmd) {
  650. abortcmd->result |= DID_ABORT << 16;
  651. arcmsr_ccb_complete(ccb, 1);
  652. printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
  653. isr got aborted command \n", acb->host->host_no, ccb);
  654. }
  655. }
  656. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  657. done acb = '0x%p'"
  658. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  659. " ccboutstandingcount = %d \n"
  660. , acb->host->host_no
  661. , acb
  662. , ccb
  663. , ccb->acb
  664. , ccb->startdone
  665. , atomic_read(&acb->ccboutstandingcount));
  666. }
  667. else
  668. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  669. }
  670. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  671. {
  672. int i = 0;
  673. uint32_t flag_ccb;
  674. switch (acb->adapter_type) {
  675. case ACB_ADAPTER_TYPE_A: {
  676. struct MessageUnit_A __iomem *reg = acb->pmuA;
  677. uint32_t outbound_intstatus;
  678. outbound_intstatus = readl(&reg->outbound_intstatus) &
  679. acb->outbound_int_enable;
  680. /*clear and abort all outbound posted Q*/
  681. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  682. while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  683. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  684. arcmsr_drain_donequeue(acb, flag_ccb);
  685. }
  686. }
  687. break;
  688. case ACB_ADAPTER_TYPE_B: {
  689. struct MessageUnit_B *reg = acb->pmuB;
  690. /*clear all outbound posted Q*/
  691. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  692. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  693. writel(0, &reg->done_qbuffer[i]);
  694. arcmsr_drain_donequeue(acb, flag_ccb);
  695. }
  696. writel(0, &reg->post_qbuffer[i]);
  697. }
  698. reg->doneq_index = 0;
  699. reg->postq_index = 0;
  700. }
  701. break;
  702. }
  703. }
  704. static void arcmsr_remove(struct pci_dev *pdev)
  705. {
  706. struct Scsi_Host *host = pci_get_drvdata(pdev);
  707. struct AdapterControlBlock *acb =
  708. (struct AdapterControlBlock *) host->hostdata;
  709. int poll_count = 0;
  710. arcmsr_free_sysfs_attr(acb);
  711. scsi_remove_host(host);
  712. arcmsr_stop_adapter_bgrb(acb);
  713. arcmsr_flush_adapter_cache(acb);
  714. arcmsr_disable_outbound_ints(acb);
  715. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  716. acb->acb_flags &= ~ACB_F_IOP_INITED;
  717. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
  718. if (!atomic_read(&acb->ccboutstandingcount))
  719. break;
  720. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  721. msleep(25);
  722. }
  723. if (atomic_read(&acb->ccboutstandingcount)) {
  724. int i;
  725. arcmsr_abort_allcmd(acb);
  726. arcmsr_done4abort_postqueue(acb);
  727. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  728. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  729. if (ccb->startdone == ARCMSR_CCB_START) {
  730. ccb->startdone = ARCMSR_CCB_ABORTED;
  731. ccb->pcmd->result = DID_ABORT << 16;
  732. arcmsr_ccb_complete(ccb, 1);
  733. }
  734. }
  735. }
  736. free_irq(pdev->irq, acb);
  737. arcmsr_free_ccb_pool(acb);
  738. pci_release_regions(pdev);
  739. scsi_host_put(host);
  740. pci_disable_device(pdev);
  741. pci_set_drvdata(pdev, NULL);
  742. }
  743. static void arcmsr_shutdown(struct pci_dev *pdev)
  744. {
  745. struct Scsi_Host *host = pci_get_drvdata(pdev);
  746. struct AdapterControlBlock *acb =
  747. (struct AdapterControlBlock *)host->hostdata;
  748. arcmsr_stop_adapter_bgrb(acb);
  749. arcmsr_flush_adapter_cache(acb);
  750. }
  751. static int arcmsr_module_init(void)
  752. {
  753. int error = 0;
  754. error = pci_register_driver(&arcmsr_pci_driver);
  755. return error;
  756. }
  757. static void arcmsr_module_exit(void)
  758. {
  759. pci_unregister_driver(&arcmsr_pci_driver);
  760. }
  761. module_init(arcmsr_module_init);
  762. module_exit(arcmsr_module_exit);
  763. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
  764. u32 intmask_org)
  765. {
  766. u32 mask;
  767. switch (acb->adapter_type) {
  768. case ACB_ADAPTER_TYPE_A : {
  769. struct MessageUnit_A __iomem *reg = acb->pmuA;
  770. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  771. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
  772. writel(mask, &reg->outbound_intmask);
  773. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  774. }
  775. break;
  776. case ACB_ADAPTER_TYPE_B : {
  777. struct MessageUnit_B *reg = acb->pmuB;
  778. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
  779. ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
  780. writel(mask, reg->iop2drv_doorbell_mask_reg);
  781. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  782. }
  783. }
  784. }
  785. static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
  786. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  787. {
  788. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  789. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  790. __le32 address_lo, address_hi;
  791. int arccdbsize = 0x30;
  792. int nseg;
  793. ccb->pcmd = pcmd;
  794. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  795. arcmsr_cdb->Bus = 0;
  796. arcmsr_cdb->TargetID = pcmd->device->id;
  797. arcmsr_cdb->LUN = pcmd->device->lun;
  798. arcmsr_cdb->Function = 1;
  799. arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
  800. arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
  801. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  802. nseg = scsi_dma_map(pcmd);
  803. if (nseg > ARCMSR_MAX_SG_ENTRIES)
  804. return FAILED;
  805. BUG_ON(nseg < 0);
  806. if (nseg) {
  807. __le32 length;
  808. int i, cdb_sgcount = 0;
  809. struct scatterlist *sg;
  810. /* map stor port SG list to our iop SG List. */
  811. scsi_for_each_sg(pcmd, sg, nseg, i) {
  812. /* Get the physical address of the current data pointer */
  813. length = cpu_to_le32(sg_dma_len(sg));
  814. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  815. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  816. if (address_hi == 0) {
  817. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  818. pdma_sg->address = address_lo;
  819. pdma_sg->length = length;
  820. psge += sizeof (struct SG32ENTRY);
  821. arccdbsize += sizeof (struct SG32ENTRY);
  822. } else {
  823. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  824. pdma_sg->addresshigh = address_hi;
  825. pdma_sg->address = address_lo;
  826. pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
  827. psge += sizeof (struct SG64ENTRY);
  828. arccdbsize += sizeof (struct SG64ENTRY);
  829. }
  830. cdb_sgcount++;
  831. }
  832. arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
  833. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  834. if ( arccdbsize > 256)
  835. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  836. }
  837. if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
  838. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  839. ccb->ccb_flags |= CCB_FLAG_WRITE;
  840. }
  841. return SUCCESS;
  842. }
  843. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  844. {
  845. uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
  846. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  847. atomic_inc(&acb->ccboutstandingcount);
  848. ccb->startdone = ARCMSR_CCB_START;
  849. switch (acb->adapter_type) {
  850. case ACB_ADAPTER_TYPE_A: {
  851. struct MessageUnit_A __iomem *reg = acb->pmuA;
  852. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  853. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  854. &reg->inbound_queueport);
  855. else {
  856. writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
  857. }
  858. }
  859. break;
  860. case ACB_ADAPTER_TYPE_B: {
  861. struct MessageUnit_B *reg = acb->pmuB;
  862. uint32_t ending_index, index = reg->postq_index;
  863. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  864. writel(0, &reg->post_qbuffer[ending_index]);
  865. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  866. writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  867. &reg->post_qbuffer[index]);
  868. }
  869. else {
  870. writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
  871. }
  872. index++;
  873. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  874. reg->postq_index = index;
  875. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
  876. }
  877. break;
  878. }
  879. }
  880. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  881. {
  882. struct MessageUnit_A __iomem *reg = acb->pmuA;
  883. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  884. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  885. if (arcmsr_hba_wait_msgint_ready(acb)) {
  886. printk(KERN_NOTICE
  887. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  888. , acb->host->host_no);
  889. }
  890. }
  891. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  892. {
  893. struct MessageUnit_B *reg = acb->pmuB;
  894. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  895. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
  896. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  897. printk(KERN_NOTICE
  898. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  899. , acb->host->host_no);
  900. }
  901. }
  902. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  903. {
  904. switch (acb->adapter_type) {
  905. case ACB_ADAPTER_TYPE_A: {
  906. arcmsr_stop_hba_bgrb(acb);
  907. }
  908. break;
  909. case ACB_ADAPTER_TYPE_B: {
  910. arcmsr_stop_hbb_bgrb(acb);
  911. }
  912. break;
  913. }
  914. }
  915. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  916. {
  917. switch (acb->adapter_type) {
  918. case ACB_ADAPTER_TYPE_A: {
  919. iounmap(acb->pmuA);
  920. dma_free_coherent(&acb->pdev->dev,
  921. ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
  922. acb->dma_coherent,
  923. acb->dma_coherent_handle);
  924. break;
  925. }
  926. case ACB_ADAPTER_TYPE_B: {
  927. struct MessageUnit_B *reg = acb->pmuB;
  928. iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
  929. iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
  930. dma_free_coherent(&acb->pdev->dev,
  931. (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
  932. sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
  933. }
  934. }
  935. }
  936. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  937. {
  938. switch (acb->adapter_type) {
  939. case ACB_ADAPTER_TYPE_A: {
  940. struct MessageUnit_A __iomem *reg = acb->pmuA;
  941. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  942. }
  943. break;
  944. case ACB_ADAPTER_TYPE_B: {
  945. struct MessageUnit_B *reg = acb->pmuB;
  946. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  947. }
  948. break;
  949. }
  950. }
  951. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  952. {
  953. switch (acb->adapter_type) {
  954. case ACB_ADAPTER_TYPE_A: {
  955. struct MessageUnit_A __iomem *reg = acb->pmuA;
  956. /*
  957. ** push inbound doorbell tell iop, driver data write ok
  958. ** and wait reply on next hwinterrupt for next Qbuffer post
  959. */
  960. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  961. }
  962. break;
  963. case ACB_ADAPTER_TYPE_B: {
  964. struct MessageUnit_B *reg = acb->pmuB;
  965. /*
  966. ** push inbound doorbell tell iop, driver data write ok
  967. ** and wait reply on next hwinterrupt for next Qbuffer post
  968. */
  969. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
  970. }
  971. break;
  972. }
  973. }
  974. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  975. {
  976. struct QBUFFER __iomem *qbuffer = NULL;
  977. switch (acb->adapter_type) {
  978. case ACB_ADAPTER_TYPE_A: {
  979. struct MessageUnit_A __iomem *reg = acb->pmuA;
  980. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  981. }
  982. break;
  983. case ACB_ADAPTER_TYPE_B: {
  984. struct MessageUnit_B *reg = acb->pmuB;
  985. qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
  986. }
  987. break;
  988. }
  989. return qbuffer;
  990. }
  991. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  992. {
  993. struct QBUFFER __iomem *pqbuffer = NULL;
  994. switch (acb->adapter_type) {
  995. case ACB_ADAPTER_TYPE_A: {
  996. struct MessageUnit_A __iomem *reg = acb->pmuA;
  997. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  998. }
  999. break;
  1000. case ACB_ADAPTER_TYPE_B: {
  1001. struct MessageUnit_B *reg = acb->pmuB;
  1002. pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
  1003. }
  1004. break;
  1005. }
  1006. return pqbuffer;
  1007. }
  1008. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  1009. {
  1010. struct QBUFFER __iomem *prbuffer;
  1011. struct QBUFFER *pQbuffer;
  1012. uint8_t __iomem *iop_data;
  1013. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  1014. rqbuf_lastindex = acb->rqbuf_lastindex;
  1015. rqbuf_firstindex = acb->rqbuf_firstindex;
  1016. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1017. iop_data = (uint8_t __iomem *)prbuffer->data;
  1018. iop_len = prbuffer->data_len;
  1019. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
  1020. if (my_empty_len >= iop_len)
  1021. {
  1022. while (iop_len > 0) {
  1023. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  1024. memcpy(pQbuffer, iop_data,1);
  1025. rqbuf_lastindex++;
  1026. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1027. iop_data++;
  1028. iop_len--;
  1029. }
  1030. acb->rqbuf_lastindex = rqbuf_lastindex;
  1031. arcmsr_iop_message_read(acb);
  1032. }
  1033. else {
  1034. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1035. }
  1036. }
  1037. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1038. {
  1039. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1040. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1041. uint8_t *pQbuffer;
  1042. struct QBUFFER __iomem *pwbuffer;
  1043. uint8_t __iomem *iop_data;
  1044. int32_t allxfer_len = 0;
  1045. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1046. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1047. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1048. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1049. (allxfer_len < 124)) {
  1050. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1051. memcpy(iop_data, pQbuffer, 1);
  1052. acb->wqbuf_firstindex++;
  1053. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1054. iop_data++;
  1055. allxfer_len++;
  1056. }
  1057. pwbuffer->data_len = allxfer_len;
  1058. arcmsr_iop_message_wrote(acb);
  1059. }
  1060. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1061. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1062. }
  1063. }
  1064. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1065. {
  1066. uint32_t outbound_doorbell;
  1067. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1068. outbound_doorbell = readl(&reg->outbound_doorbell);
  1069. writel(outbound_doorbell, &reg->outbound_doorbell);
  1070. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1071. arcmsr_iop2drv_data_wrote_handle(acb);
  1072. }
  1073. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1074. arcmsr_iop2drv_data_read_handle(acb);
  1075. }
  1076. }
  1077. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1078. {
  1079. uint32_t flag_ccb;
  1080. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1081. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1082. arcmsr_drain_donequeue(acb, flag_ccb);
  1083. }
  1084. }
  1085. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1086. {
  1087. uint32_t index;
  1088. uint32_t flag_ccb;
  1089. struct MessageUnit_B *reg = acb->pmuB;
  1090. index = reg->doneq_index;
  1091. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1092. writel(0, &reg->done_qbuffer[index]);
  1093. arcmsr_drain_donequeue(acb, flag_ccb);
  1094. index++;
  1095. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1096. reg->doneq_index = index;
  1097. }
  1098. }
  1099. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1100. {
  1101. uint32_t outbound_intstatus;
  1102. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1103. outbound_intstatus = readl(&reg->outbound_intstatus) & \
  1104. acb->outbound_int_enable;
  1105. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1106. return 1;
  1107. }
  1108. writel(outbound_intstatus, &reg->outbound_intstatus);
  1109. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1110. arcmsr_hba_doorbell_isr(acb);
  1111. }
  1112. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1113. arcmsr_hba_postqueue_isr(acb);
  1114. }
  1115. return 0;
  1116. }
  1117. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1118. {
  1119. uint32_t outbound_doorbell;
  1120. struct MessageUnit_B *reg = acb->pmuB;
  1121. outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
  1122. acb->outbound_int_enable;
  1123. if (!outbound_doorbell)
  1124. return 1;
  1125. writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
  1126. /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
  1127. readl(reg->iop2drv_doorbell_reg);
  1128. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  1129. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1130. arcmsr_iop2drv_data_wrote_handle(acb);
  1131. }
  1132. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1133. arcmsr_iop2drv_data_read_handle(acb);
  1134. }
  1135. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1136. arcmsr_hbb_postqueue_isr(acb);
  1137. }
  1138. return 0;
  1139. }
  1140. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1141. {
  1142. switch (acb->adapter_type) {
  1143. case ACB_ADAPTER_TYPE_A: {
  1144. if (arcmsr_handle_hba_isr(acb)) {
  1145. return IRQ_NONE;
  1146. }
  1147. }
  1148. break;
  1149. case ACB_ADAPTER_TYPE_B: {
  1150. if (arcmsr_handle_hbb_isr(acb)) {
  1151. return IRQ_NONE;
  1152. }
  1153. }
  1154. break;
  1155. }
  1156. return IRQ_HANDLED;
  1157. }
  1158. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1159. {
  1160. if (acb) {
  1161. /* stop adapter background rebuild */
  1162. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1163. uint32_t intmask_org;
  1164. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1165. intmask_org = arcmsr_disable_outbound_ints(acb);
  1166. arcmsr_stop_adapter_bgrb(acb);
  1167. arcmsr_flush_adapter_cache(acb);
  1168. arcmsr_enable_outbound_ints(acb, intmask_org);
  1169. }
  1170. }
  1171. }
  1172. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1173. {
  1174. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1175. uint8_t *pQbuffer;
  1176. struct QBUFFER __iomem *pwbuffer;
  1177. uint8_t __iomem *iop_data;
  1178. int32_t allxfer_len = 0;
  1179. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1180. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1181. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1182. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1183. wqbuf_firstindex = acb->wqbuf_firstindex;
  1184. wqbuf_lastindex = acb->wqbuf_lastindex;
  1185. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1186. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1187. memcpy(iop_data, pQbuffer, 1);
  1188. wqbuf_firstindex++;
  1189. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1190. iop_data++;
  1191. allxfer_len++;
  1192. }
  1193. acb->wqbuf_firstindex = wqbuf_firstindex;
  1194. pwbuffer->data_len = allxfer_len;
  1195. arcmsr_iop_message_wrote(acb);
  1196. }
  1197. }
  1198. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
  1199. struct scsi_cmnd *cmd)
  1200. {
  1201. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1202. int retvalue = 0, transfer_len = 0;
  1203. char *buffer;
  1204. struct scatterlist *sg;
  1205. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1206. (uint32_t ) cmd->cmnd[6] << 16 |
  1207. (uint32_t ) cmd->cmnd[7] << 8 |
  1208. (uint32_t ) cmd->cmnd[8];
  1209. /* 4 bytes: Areca io control code */
  1210. sg = scsi_sglist(cmd);
  1211. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1212. if (scsi_sg_count(cmd) > 1) {
  1213. retvalue = ARCMSR_MESSAGE_FAIL;
  1214. goto message_out;
  1215. }
  1216. transfer_len += sg->length;
  1217. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1218. retvalue = ARCMSR_MESSAGE_FAIL;
  1219. goto message_out;
  1220. }
  1221. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1222. switch(controlcode) {
  1223. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1224. unsigned char *ver_addr;
  1225. uint8_t *pQbuffer, *ptmpQbuffer;
  1226. int32_t allxfer_len = 0;
  1227. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1228. if (!ver_addr) {
  1229. retvalue = ARCMSR_MESSAGE_FAIL;
  1230. goto message_out;
  1231. }
  1232. ptmpQbuffer = ver_addr;
  1233. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1234. && (allxfer_len < 1031)) {
  1235. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1236. memcpy(ptmpQbuffer, pQbuffer, 1);
  1237. acb->rqbuf_firstindex++;
  1238. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1239. ptmpQbuffer++;
  1240. allxfer_len++;
  1241. }
  1242. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1243. struct QBUFFER __iomem *prbuffer;
  1244. uint8_t __iomem *iop_data;
  1245. int32_t iop_len;
  1246. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1247. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1248. iop_data = prbuffer->data;
  1249. iop_len = readl(&prbuffer->data_len);
  1250. while (iop_len > 0) {
  1251. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1252. acb->rqbuf_lastindex++;
  1253. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1254. iop_data++;
  1255. iop_len--;
  1256. }
  1257. arcmsr_iop_message_read(acb);
  1258. }
  1259. memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
  1260. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1261. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1262. kfree(ver_addr);
  1263. }
  1264. break;
  1265. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1266. unsigned char *ver_addr;
  1267. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1268. uint8_t *pQbuffer, *ptmpuserbuffer;
  1269. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1270. if (!ver_addr) {
  1271. retvalue = ARCMSR_MESSAGE_FAIL;
  1272. goto message_out;
  1273. }
  1274. ptmpuserbuffer = ver_addr;
  1275. user_len = pcmdmessagefld->cmdmessage.Length;
  1276. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1277. wqbuf_lastindex = acb->wqbuf_lastindex;
  1278. wqbuf_firstindex = acb->wqbuf_firstindex;
  1279. if (wqbuf_lastindex != wqbuf_firstindex) {
  1280. struct SENSE_DATA *sensebuffer =
  1281. (struct SENSE_DATA *)cmd->sense_buffer;
  1282. arcmsr_post_ioctldata2iop(acb);
  1283. /* has error report sensedata */
  1284. sensebuffer->ErrorCode = 0x70;
  1285. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1286. sensebuffer->AdditionalSenseLength = 0x0A;
  1287. sensebuffer->AdditionalSenseCode = 0x20;
  1288. sensebuffer->Valid = 1;
  1289. retvalue = ARCMSR_MESSAGE_FAIL;
  1290. } else {
  1291. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1292. &(ARCMSR_MAX_QBUFFER - 1);
  1293. if (my_empty_len >= user_len) {
  1294. while (user_len > 0) {
  1295. pQbuffer =
  1296. &acb->wqbuffer[acb->wqbuf_lastindex];
  1297. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1298. acb->wqbuf_lastindex++;
  1299. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1300. ptmpuserbuffer++;
  1301. user_len--;
  1302. }
  1303. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1304. acb->acb_flags &=
  1305. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1306. arcmsr_post_ioctldata2iop(acb);
  1307. }
  1308. } else {
  1309. /* has error report sensedata */
  1310. struct SENSE_DATA *sensebuffer =
  1311. (struct SENSE_DATA *)cmd->sense_buffer;
  1312. sensebuffer->ErrorCode = 0x70;
  1313. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1314. sensebuffer->AdditionalSenseLength = 0x0A;
  1315. sensebuffer->AdditionalSenseCode = 0x20;
  1316. sensebuffer->Valid = 1;
  1317. retvalue = ARCMSR_MESSAGE_FAIL;
  1318. }
  1319. }
  1320. kfree(ver_addr);
  1321. }
  1322. break;
  1323. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1324. uint8_t *pQbuffer = acb->rqbuffer;
  1325. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1326. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1327. arcmsr_iop_message_read(acb);
  1328. }
  1329. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1330. acb->rqbuf_firstindex = 0;
  1331. acb->rqbuf_lastindex = 0;
  1332. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1333. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1334. }
  1335. break;
  1336. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1337. uint8_t *pQbuffer = acb->wqbuffer;
  1338. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1339. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1340. arcmsr_iop_message_read(acb);
  1341. }
  1342. acb->acb_flags |=
  1343. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1344. ACB_F_MESSAGE_WQBUFFER_READED);
  1345. acb->wqbuf_firstindex = 0;
  1346. acb->wqbuf_lastindex = 0;
  1347. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1348. pcmdmessagefld->cmdmessage.ReturnCode =
  1349. ARCMSR_MESSAGE_RETURNCODE_OK;
  1350. }
  1351. break;
  1352. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1353. uint8_t *pQbuffer;
  1354. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1355. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1356. arcmsr_iop_message_read(acb);
  1357. }
  1358. acb->acb_flags |=
  1359. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1360. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1361. | ACB_F_MESSAGE_WQBUFFER_READED);
  1362. acb->rqbuf_firstindex = 0;
  1363. acb->rqbuf_lastindex = 0;
  1364. acb->wqbuf_firstindex = 0;
  1365. acb->wqbuf_lastindex = 0;
  1366. pQbuffer = acb->rqbuffer;
  1367. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1368. pQbuffer = acb->wqbuffer;
  1369. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1370. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1371. }
  1372. break;
  1373. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1374. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
  1375. }
  1376. break;
  1377. case ARCMSR_MESSAGE_SAY_HELLO: {
  1378. int8_t *hello_string = "Hello! I am ARCMSR";
  1379. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1380. , (int16_t)strlen(hello_string));
  1381. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1382. }
  1383. break;
  1384. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1385. arcmsr_iop_parking(acb);
  1386. break;
  1387. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1388. arcmsr_flush_adapter_cache(acb);
  1389. break;
  1390. default:
  1391. retvalue = ARCMSR_MESSAGE_FAIL;
  1392. }
  1393. message_out:
  1394. sg = scsi_sglist(cmd);
  1395. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1396. return retvalue;
  1397. }
  1398. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1399. {
  1400. struct list_head *head = &acb->ccb_free_list;
  1401. struct CommandControlBlock *ccb = NULL;
  1402. if (!list_empty(head)) {
  1403. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1404. list_del(head->next);
  1405. }
  1406. return ccb;
  1407. }
  1408. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1409. struct scsi_cmnd *cmd)
  1410. {
  1411. switch (cmd->cmnd[0]) {
  1412. case INQUIRY: {
  1413. unsigned char inqdata[36];
  1414. char *buffer;
  1415. struct scatterlist *sg;
  1416. if (cmd->device->lun) {
  1417. cmd->result = (DID_TIME_OUT << 16);
  1418. cmd->scsi_done(cmd);
  1419. return;
  1420. }
  1421. inqdata[0] = TYPE_PROCESSOR;
  1422. /* Periph Qualifier & Periph Dev Type */
  1423. inqdata[1] = 0;
  1424. /* rem media bit & Dev Type Modifier */
  1425. inqdata[2] = 0;
  1426. /* ISO, ECMA, & ANSI versions */
  1427. inqdata[4] = 31;
  1428. /* length of additional data */
  1429. strncpy(&inqdata[8], "Areca ", 8);
  1430. /* Vendor Identification */
  1431. strncpy(&inqdata[16], "RAID controller ", 16);
  1432. /* Product Identification */
  1433. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1434. sg = scsi_sglist(cmd);
  1435. buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
  1436. memcpy(buffer, inqdata, sizeof(inqdata));
  1437. sg = scsi_sglist(cmd);
  1438. kunmap_atomic(buffer - sg->offset, KM_IRQ0);
  1439. cmd->scsi_done(cmd);
  1440. }
  1441. break;
  1442. case WRITE_BUFFER:
  1443. case READ_BUFFER: {
  1444. if (arcmsr_iop_message_xfer(acb, cmd))
  1445. cmd->result = (DID_ERROR << 16);
  1446. cmd->scsi_done(cmd);
  1447. }
  1448. break;
  1449. default:
  1450. cmd->scsi_done(cmd);
  1451. }
  1452. }
  1453. static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  1454. void (* done)(struct scsi_cmnd *))
  1455. {
  1456. struct Scsi_Host *host = cmd->device->host;
  1457. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1458. struct CommandControlBlock *ccb;
  1459. int target = cmd->device->id;
  1460. int lun = cmd->device->lun;
  1461. cmd->scsi_done = done;
  1462. cmd->host_scribble = NULL;
  1463. cmd->result = 0;
  1464. if (acb->acb_flags & ACB_F_BUS_RESET) {
  1465. printk(KERN_NOTICE "arcmsr%d: bus reset"
  1466. " and return busy \n"
  1467. , acb->host->host_no);
  1468. return SCSI_MLQUEUE_HOST_BUSY;
  1469. }
  1470. if (target == 16) {
  1471. /* virtual device for iop message transfer */
  1472. arcmsr_handle_virtual_command(acb, cmd);
  1473. return 0;
  1474. }
  1475. if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1476. uint8_t block_cmd;
  1477. block_cmd = cmd->cmnd[0] & 0x0f;
  1478. if (block_cmd == 0x08 || block_cmd == 0x0a) {
  1479. printk(KERN_NOTICE
  1480. "arcmsr%d: block 'read/write'"
  1481. "command with gone raid volume"
  1482. " Cmd = %2x, TargetId = %d, Lun = %d \n"
  1483. , acb->host->host_no
  1484. , cmd->cmnd[0]
  1485. , target, lun);
  1486. cmd->result = (DID_NO_CONNECT << 16);
  1487. cmd->scsi_done(cmd);
  1488. return 0;
  1489. }
  1490. }
  1491. if (atomic_read(&acb->ccboutstandingcount) >=
  1492. ARCMSR_MAX_OUTSTANDING_CMD)
  1493. return SCSI_MLQUEUE_HOST_BUSY;
  1494. ccb = arcmsr_get_freeccb(acb);
  1495. if (!ccb)
  1496. return SCSI_MLQUEUE_HOST_BUSY;
  1497. if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
  1498. cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
  1499. cmd->scsi_done(cmd);
  1500. return 0;
  1501. }
  1502. arcmsr_post_ccb(acb, ccb);
  1503. return 0;
  1504. }
  1505. static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
  1506. {
  1507. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1508. char *acb_firm_model = acb->firm_model;
  1509. char *acb_firm_version = acb->firm_version;
  1510. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  1511. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  1512. int count;
  1513. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1514. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1515. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1516. miscellaneous data' timeout \n", acb->host->host_no);
  1517. }
  1518. count = 8;
  1519. while (count) {
  1520. *acb_firm_model = readb(iop_firm_model);
  1521. acb_firm_model++;
  1522. iop_firm_model++;
  1523. count--;
  1524. }
  1525. count = 16;
  1526. while (count) {
  1527. *acb_firm_version = readb(iop_firm_version);
  1528. acb_firm_version++;
  1529. iop_firm_version++;
  1530. count--;
  1531. }
  1532. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
  1533. , acb->host->host_no
  1534. , acb->firm_version);
  1535. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  1536. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  1537. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  1538. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  1539. }
  1540. static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
  1541. {
  1542. struct MessageUnit_B *reg = acb->pmuB;
  1543. uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
  1544. char *acb_firm_model = acb->firm_model;
  1545. char *acb_firm_version = acb->firm_version;
  1546. char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
  1547. /*firm_model,15,60-67*/
  1548. char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
  1549. /*firm_version,17,68-83*/
  1550. int count;
  1551. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
  1552. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1553. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1554. miscellaneous data' timeout \n", acb->host->host_no);
  1555. }
  1556. count = 8;
  1557. while (count)
  1558. {
  1559. *acb_firm_model = readb(iop_firm_model);
  1560. acb_firm_model++;
  1561. iop_firm_model++;
  1562. count--;
  1563. }
  1564. count = 16;
  1565. while (count)
  1566. {
  1567. *acb_firm_version = readb(iop_firm_version);
  1568. acb_firm_version++;
  1569. iop_firm_version++;
  1570. count--;
  1571. }
  1572. printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
  1573. acb->host->host_no,
  1574. acb->firm_version);
  1575. lrwbuffer++;
  1576. acb->firm_request_len = readl(lrwbuffer++);
  1577. /*firm_request_len,1,04-07*/
  1578. acb->firm_numbers_queue = readl(lrwbuffer++);
  1579. /*firm_numbers_queue,2,08-11*/
  1580. acb->firm_sdram_size = readl(lrwbuffer++);
  1581. /*firm_sdram_size,3,12-15*/
  1582. acb->firm_hd_channels = readl(lrwbuffer);
  1583. /*firm_ide_channels,4,16-19*/
  1584. }
  1585. static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  1586. {
  1587. switch (acb->adapter_type) {
  1588. case ACB_ADAPTER_TYPE_A: {
  1589. arcmsr_get_hba_config(acb);
  1590. }
  1591. break;
  1592. case ACB_ADAPTER_TYPE_B: {
  1593. arcmsr_get_hbb_config(acb);
  1594. }
  1595. break;
  1596. }
  1597. }
  1598. static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  1599. struct CommandControlBlock *poll_ccb)
  1600. {
  1601. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1602. struct CommandControlBlock *ccb;
  1603. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  1604. polling_hba_ccb_retry:
  1605. poll_count++;
  1606. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  1607. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  1608. while (1) {
  1609. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  1610. if (poll_ccb_done)
  1611. break;
  1612. else {
  1613. msleep(25);
  1614. if (poll_count > 100)
  1615. break;
  1616. goto polling_hba_ccb_retry;
  1617. }
  1618. }
  1619. ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
  1620. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1621. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1622. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1623. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  1624. " poll command abort successfully \n"
  1625. , acb->host->host_no
  1626. , ccb->pcmd->device->id
  1627. , ccb->pcmd->device->lun
  1628. , ccb);
  1629. ccb->pcmd->result = DID_ABORT << 16;
  1630. arcmsr_ccb_complete(ccb, 1);
  1631. poll_ccb_done = 1;
  1632. continue;
  1633. }
  1634. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1635. " command done ccb = '0x%p'"
  1636. "ccboutstandingcount = %d \n"
  1637. , acb->host->host_no
  1638. , ccb
  1639. , atomic_read(&acb->ccboutstandingcount));
  1640. continue;
  1641. }
  1642. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1643. }
  1644. }
  1645. static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
  1646. struct CommandControlBlock *poll_ccb)
  1647. {
  1648. struct MessageUnit_B *reg = acb->pmuB;
  1649. struct CommandControlBlock *ccb;
  1650. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  1651. int index;
  1652. polling_hbb_ccb_retry:
  1653. poll_count++;
  1654. /* clear doorbell interrupt */
  1655. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1656. while (1) {
  1657. index = reg->doneq_index;
  1658. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  1659. if (poll_ccb_done)
  1660. break;
  1661. else {
  1662. msleep(25);
  1663. if (poll_count > 100)
  1664. break;
  1665. goto polling_hbb_ccb_retry;
  1666. }
  1667. }
  1668. writel(0, &reg->done_qbuffer[index]);
  1669. index++;
  1670. /*if last index number set it to 0 */
  1671. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1672. reg->doneq_index = index;
  1673. /* check ifcommand done with no error*/
  1674. ccb = (struct CommandControlBlock *)\
  1675. (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1676. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  1677. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  1678. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  1679. printk(KERN_NOTICE "arcmsr%d: \
  1680. scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
  1681. ,acb->host->host_no
  1682. ,ccb->pcmd->device->id
  1683. ,ccb->pcmd->device->lun
  1684. ,ccb);
  1685. ccb->pcmd->result = DID_ABORT << 16;
  1686. arcmsr_ccb_complete(ccb, 1);
  1687. continue;
  1688. }
  1689. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  1690. " command done ccb = '0x%p'"
  1691. "ccboutstandingcount = %d \n"
  1692. , acb->host->host_no
  1693. , ccb
  1694. , atomic_read(&acb->ccboutstandingcount));
  1695. continue;
  1696. }
  1697. arcmsr_report_ccb_state(acb, ccb, flag_ccb);
  1698. } /*drain reply FIFO*/
  1699. }
  1700. static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
  1701. struct CommandControlBlock *poll_ccb)
  1702. {
  1703. switch (acb->adapter_type) {
  1704. case ACB_ADAPTER_TYPE_A: {
  1705. arcmsr_polling_hba_ccbdone(acb,poll_ccb);
  1706. }
  1707. break;
  1708. case ACB_ADAPTER_TYPE_B: {
  1709. arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
  1710. }
  1711. }
  1712. }
  1713. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  1714. {
  1715. uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
  1716. dma_addr_t dma_coherent_handle;
  1717. /*
  1718. ********************************************************************
  1719. ** here we need to tell iop 331 our freeccb.HighPart
  1720. ** if freeccb.HighPart is not zero
  1721. ********************************************************************
  1722. */
  1723. dma_coherent_handle = acb->dma_coherent_handle;
  1724. cdb_phyaddr = (uint32_t)(dma_coherent_handle);
  1725. ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
  1726. /*
  1727. ***********************************************************************
  1728. ** if adapter type B, set window of "post command Q"
  1729. ***********************************************************************
  1730. */
  1731. switch (acb->adapter_type) {
  1732. case ACB_ADAPTER_TYPE_A: {
  1733. if (ccb_phyaddr_hi32 != 0) {
  1734. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1735. uint32_t intmask_org;
  1736. intmask_org = arcmsr_disable_outbound_ints(acb);
  1737. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  1738. &reg->message_rwbuffer[0]);
  1739. writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  1740. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  1741. &reg->inbound_msgaddr0);
  1742. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1743. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  1744. part physical address timeout\n",
  1745. acb->host->host_no);
  1746. return 1;
  1747. }
  1748. arcmsr_enable_outbound_ints(acb, intmask_org);
  1749. }
  1750. }
  1751. break;
  1752. case ACB_ADAPTER_TYPE_B: {
  1753. unsigned long post_queue_phyaddr;
  1754. uint32_t __iomem *rwbuffer;
  1755. struct MessageUnit_B *reg = acb->pmuB;
  1756. uint32_t intmask_org;
  1757. intmask_org = arcmsr_disable_outbound_ints(acb);
  1758. reg->postq_index = 0;
  1759. reg->doneq_index = 0;
  1760. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
  1761. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1762. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  1763. acb->host->host_no);
  1764. return 1;
  1765. }
  1766. post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
  1767. sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
  1768. rwbuffer = reg->msgcode_rwbuffer_reg;
  1769. /* driver "set config" signature */
  1770. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  1771. /* normal should be zero */
  1772. writel(ccb_phyaddr_hi32, rwbuffer++);
  1773. /* postQ size (256 + 8)*4 */
  1774. writel(post_queue_phyaddr, rwbuffer++);
  1775. /* doneQ size (256 + 8)*4 */
  1776. writel(post_queue_phyaddr + 1056, rwbuffer++);
  1777. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  1778. writel(1056, rwbuffer);
  1779. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
  1780. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1781. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  1782. timeout \n",acb->host->host_no);
  1783. return 1;
  1784. }
  1785. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
  1786. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1787. printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
  1788. ,acb->host->host_no);
  1789. return 1;
  1790. }
  1791. arcmsr_enable_outbound_ints(acb, intmask_org);
  1792. }
  1793. break;
  1794. }
  1795. return 0;
  1796. }
  1797. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  1798. {
  1799. uint32_t firmware_state = 0;
  1800. switch (acb->adapter_type) {
  1801. case ACB_ADAPTER_TYPE_A: {
  1802. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1803. do {
  1804. firmware_state = readl(&reg->outbound_msgaddr1);
  1805. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  1806. }
  1807. break;
  1808. case ACB_ADAPTER_TYPE_B: {
  1809. struct MessageUnit_B *reg = acb->pmuB;
  1810. do {
  1811. firmware_state = readl(reg->iop2drv_doorbell_reg);
  1812. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  1813. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
  1814. }
  1815. break;
  1816. }
  1817. }
  1818. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  1819. {
  1820. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1821. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1822. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  1823. if (arcmsr_hba_wait_msgint_ready(acb)) {
  1824. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1825. rebulid' timeout \n", acb->host->host_no);
  1826. }
  1827. }
  1828. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  1829. {
  1830. struct MessageUnit_B *reg = acb->pmuB;
  1831. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  1832. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
  1833. if (arcmsr_hbb_wait_msgint_ready(acb)) {
  1834. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  1835. rebulid' timeout \n",acb->host->host_no);
  1836. }
  1837. }
  1838. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  1839. {
  1840. switch (acb->adapter_type) {
  1841. case ACB_ADAPTER_TYPE_A:
  1842. arcmsr_start_hba_bgrb(acb);
  1843. break;
  1844. case ACB_ADAPTER_TYPE_B:
  1845. arcmsr_start_hbb_bgrb(acb);
  1846. break;
  1847. }
  1848. }
  1849. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  1850. {
  1851. switch (acb->adapter_type) {
  1852. case ACB_ADAPTER_TYPE_A: {
  1853. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1854. uint32_t outbound_doorbell;
  1855. /* empty doorbell Qbuffer if door bell ringed */
  1856. outbound_doorbell = readl(&reg->outbound_doorbell);
  1857. /*clear doorbell interrupt */
  1858. writel(outbound_doorbell, &reg->outbound_doorbell);
  1859. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1860. }
  1861. break;
  1862. case ACB_ADAPTER_TYPE_B: {
  1863. struct MessageUnit_B *reg = acb->pmuB;
  1864. /*clear interrupt and message state*/
  1865. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
  1866. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
  1867. /* let IOP know data has been read */
  1868. }
  1869. break;
  1870. }
  1871. }
  1872. static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
  1873. {
  1874. switch (acb->adapter_type) {
  1875. case ACB_ADAPTER_TYPE_A:
  1876. return;
  1877. case ACB_ADAPTER_TYPE_B:
  1878. {
  1879. struct MessageUnit_B *reg = acb->pmuB;
  1880. writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
  1881. if(arcmsr_hbb_wait_msgint_ready(acb)) {
  1882. printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
  1883. return;
  1884. }
  1885. }
  1886. break;
  1887. }
  1888. return;
  1889. }
  1890. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  1891. {
  1892. uint32_t intmask_org;
  1893. /* disable all outbound interrupt */
  1894. intmask_org = arcmsr_disable_outbound_ints(acb);
  1895. arcmsr_wait_firmware_ready(acb);
  1896. arcmsr_iop_confirm(acb);
  1897. arcmsr_get_firmware_spec(acb);
  1898. /*start background rebuild*/
  1899. arcmsr_start_adapter_bgrb(acb);
  1900. /* empty doorbell Qbuffer if door bell ringed */
  1901. arcmsr_clear_doorbell_queue_buffer(acb);
  1902. arcmsr_enable_eoi_mode(acb);
  1903. /* enable outbound Post Queue,outbound doorbell Interrupt */
  1904. arcmsr_enable_outbound_ints(acb, intmask_org);
  1905. acb->acb_flags |= ACB_F_IOP_INITED;
  1906. }
  1907. static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
  1908. {
  1909. struct CommandControlBlock *ccb;
  1910. uint32_t intmask_org;
  1911. int i = 0;
  1912. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  1913. /* talk to iop 331 outstanding command aborted */
  1914. arcmsr_abort_allcmd(acb);
  1915. /* wait for 3 sec for all command aborted*/
  1916. ssleep(3);
  1917. /* disable all outbound interrupt */
  1918. intmask_org = arcmsr_disable_outbound_ints(acb);
  1919. /* clear all outbound posted Q */
  1920. arcmsr_done4abort_postqueue(acb);
  1921. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1922. ccb = acb->pccb_pool[i];
  1923. if (ccb->startdone == ARCMSR_CCB_START) {
  1924. ccb->startdone = ARCMSR_CCB_ABORTED;
  1925. arcmsr_ccb_complete(ccb, 1);
  1926. }
  1927. }
  1928. /* enable all outbound interrupt */
  1929. arcmsr_enable_outbound_ints(acb, intmask_org);
  1930. }
  1931. }
  1932. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  1933. {
  1934. struct AdapterControlBlock *acb =
  1935. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1936. int i;
  1937. acb->num_resets++;
  1938. acb->acb_flags |= ACB_F_BUS_RESET;
  1939. for (i = 0; i < 400; i++) {
  1940. if (!atomic_read(&acb->ccboutstandingcount))
  1941. break;
  1942. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  1943. msleep(25);
  1944. }
  1945. arcmsr_iop_reset(acb);
  1946. acb->acb_flags &= ~ACB_F_BUS_RESET;
  1947. return SUCCESS;
  1948. }
  1949. static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  1950. struct CommandControlBlock *ccb)
  1951. {
  1952. u32 intmask;
  1953. ccb->startdone = ARCMSR_CCB_ABORTED;
  1954. /*
  1955. ** Wait for 3 sec for all command done.
  1956. */
  1957. ssleep(3);
  1958. intmask = arcmsr_disable_outbound_ints(acb);
  1959. arcmsr_polling_ccbdone(acb, ccb);
  1960. arcmsr_enable_outbound_ints(acb, intmask);
  1961. }
  1962. static int arcmsr_abort(struct scsi_cmnd *cmd)
  1963. {
  1964. struct AdapterControlBlock *acb =
  1965. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  1966. int i = 0;
  1967. printk(KERN_NOTICE
  1968. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  1969. acb->host->host_no, cmd->device->id, cmd->device->lun);
  1970. acb->num_aborts++;
  1971. /*
  1972. ************************************************
  1973. ** the all interrupt service routine is locked
  1974. ** we need to handle it as soon as possible and exit
  1975. ************************************************
  1976. */
  1977. if (!atomic_read(&acb->ccboutstandingcount))
  1978. return SUCCESS;
  1979. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1980. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  1981. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  1982. arcmsr_abort_one_cmd(acb, ccb);
  1983. break;
  1984. }
  1985. }
  1986. return SUCCESS;
  1987. }
  1988. static const char *arcmsr_info(struct Scsi_Host *host)
  1989. {
  1990. struct AdapterControlBlock *acb =
  1991. (struct AdapterControlBlock *) host->hostdata;
  1992. static char buf[256];
  1993. char *type;
  1994. int raid6 = 1;
  1995. switch (acb->pdev->device) {
  1996. case PCI_DEVICE_ID_ARECA_1110:
  1997. case PCI_DEVICE_ID_ARECA_1200:
  1998. case PCI_DEVICE_ID_ARECA_1202:
  1999. case PCI_DEVICE_ID_ARECA_1210:
  2000. raid6 = 0;
  2001. /*FALLTHRU*/
  2002. case PCI_DEVICE_ID_ARECA_1120:
  2003. case PCI_DEVICE_ID_ARECA_1130:
  2004. case PCI_DEVICE_ID_ARECA_1160:
  2005. case PCI_DEVICE_ID_ARECA_1170:
  2006. case PCI_DEVICE_ID_ARECA_1201:
  2007. case PCI_DEVICE_ID_ARECA_1220:
  2008. case PCI_DEVICE_ID_ARECA_1230:
  2009. case PCI_DEVICE_ID_ARECA_1260:
  2010. case PCI_DEVICE_ID_ARECA_1270:
  2011. case PCI_DEVICE_ID_ARECA_1280:
  2012. type = "SATA";
  2013. break;
  2014. case PCI_DEVICE_ID_ARECA_1380:
  2015. case PCI_DEVICE_ID_ARECA_1381:
  2016. case PCI_DEVICE_ID_ARECA_1680:
  2017. case PCI_DEVICE_ID_ARECA_1681:
  2018. type = "SAS";
  2019. break;
  2020. default:
  2021. type = "X-TYPE";
  2022. break;
  2023. }
  2024. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  2025. type, raid6 ? "( RAID6 capable)" : "",
  2026. ARCMSR_DRIVER_VERSION);
  2027. return buf;
  2028. }
  2029. #ifdef CONFIG_SCSI_ARCMSR_AER
  2030. static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
  2031. {
  2032. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2033. struct AdapterControlBlock *acb =
  2034. (struct AdapterControlBlock *) host->hostdata;
  2035. uint32_t intmask_org;
  2036. int i, j;
  2037. if (pci_enable_device(pdev)) {
  2038. return PCI_ERS_RESULT_DISCONNECT;
  2039. }
  2040. pci_set_master(pdev);
  2041. intmask_org = arcmsr_disable_outbound_ints(acb);
  2042. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  2043. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  2044. ACB_F_MESSAGE_WQBUFFER_READED);
  2045. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  2046. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  2047. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  2048. acb->devstate[i][j] = ARECA_RAID_GONE;
  2049. arcmsr_wait_firmware_ready(acb);
  2050. arcmsr_iop_confirm(acb);
  2051. /* disable all outbound interrupt */
  2052. arcmsr_get_firmware_spec(acb);
  2053. /*start background rebuild*/
  2054. arcmsr_start_adapter_bgrb(acb);
  2055. /* empty doorbell Qbuffer if door bell ringed */
  2056. arcmsr_clear_doorbell_queue_buffer(acb);
  2057. arcmsr_enable_eoi_mode(acb);
  2058. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2059. arcmsr_enable_outbound_ints(acb, intmask_org);
  2060. acb->acb_flags |= ACB_F_IOP_INITED;
  2061. pci_enable_pcie_error_reporting(pdev);
  2062. return PCI_ERS_RESULT_RECOVERED;
  2063. }
  2064. static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
  2065. {
  2066. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2067. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
  2068. struct CommandControlBlock *ccb;
  2069. uint32_t intmask_org;
  2070. int i = 0;
  2071. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2072. /* talk to iop 331 outstanding command aborted */
  2073. arcmsr_abort_allcmd(acb);
  2074. /* wait for 3 sec for all command aborted*/
  2075. ssleep(3);
  2076. /* disable all outbound interrupt */
  2077. intmask_org = arcmsr_disable_outbound_ints(acb);
  2078. /* clear all outbound posted Q */
  2079. arcmsr_done4abort_postqueue(acb);
  2080. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2081. ccb = acb->pccb_pool[i];
  2082. if (ccb->startdone == ARCMSR_CCB_START) {
  2083. ccb->startdone = ARCMSR_CCB_ABORTED;
  2084. arcmsr_ccb_complete(ccb, 1);
  2085. }
  2086. }
  2087. /* enable all outbound interrupt */
  2088. arcmsr_enable_outbound_ints(acb, intmask_org);
  2089. }
  2090. pci_disable_device(pdev);
  2091. }
  2092. static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
  2093. {
  2094. struct Scsi_Host *host = pci_get_drvdata(pdev);
  2095. struct AdapterControlBlock *acb = \
  2096. (struct AdapterControlBlock *)host->hostdata;
  2097. arcmsr_stop_adapter_bgrb(acb);
  2098. arcmsr_flush_adapter_cache(acb);
  2099. }
  2100. static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
  2101. pci_channel_state_t state)
  2102. {
  2103. switch (state) {
  2104. case pci_channel_io_frozen:
  2105. arcmsr_pci_ers_need_reset_forepart(pdev);
  2106. return PCI_ERS_RESULT_NEED_RESET;
  2107. case pci_channel_io_perm_failure:
  2108. arcmsr_pci_ers_disconnect_forepart(pdev);
  2109. return PCI_ERS_RESULT_DISCONNECT;
  2110. break;
  2111. default:
  2112. return PCI_ERS_RESULT_NEED_RESET;
  2113. }
  2114. }
  2115. #endif