esas2r_init.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_init.c
  3. * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * NO WARRANTY
  19. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23. * solely responsible for determining the appropriateness of using and
  24. * distributing the Program and assumes all risks associated with its
  25. * exercise of rights under this Agreement, including but not limited to
  26. * the risks and costs of program errors, damage to or loss of data,
  27. * programs or equipment, and unavailability or interruption of operations.
  28. *
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. *
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include "esas2r.h"
  44. static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
  45. struct esas2r_mem_desc *mem_desc,
  46. u32 align)
  47. {
  48. mem_desc->esas2r_param = mem_desc->size + align;
  49. mem_desc->virt_addr = NULL;
  50. mem_desc->phys_addr = 0;
  51. mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
  52. (size_t)mem_desc->
  53. esas2r_param,
  54. (dma_addr_t *)&mem_desc->
  55. phys_addr,
  56. GFP_KERNEL);
  57. if (mem_desc->esas2r_data == NULL) {
  58. esas2r_log(ESAS2R_LOG_CRIT,
  59. "failed to allocate %lu bytes of consistent memory!",
  60. (long
  61. unsigned
  62. int)mem_desc->esas2r_param);
  63. return false;
  64. }
  65. mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
  66. mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
  67. memset(mem_desc->virt_addr, 0, mem_desc->size);
  68. return true;
  69. }
  70. static void esas2r_initmem_free(struct esas2r_adapter *a,
  71. struct esas2r_mem_desc *mem_desc)
  72. {
  73. if (mem_desc->virt_addr == NULL)
  74. return;
  75. /*
  76. * Careful! phys_addr and virt_addr may have been adjusted from the
  77. * original allocation in order to return the desired alignment. That
  78. * means we have to use the original address (in esas2r_data) and size
  79. * (esas2r_param) and calculate the original physical address based on
  80. * the difference between the requested and actual allocation size.
  81. */
  82. if (mem_desc->phys_addr) {
  83. int unalign = ((u8 *)mem_desc->virt_addr) -
  84. ((u8 *)mem_desc->esas2r_data);
  85. dma_free_coherent(&a->pcid->dev,
  86. (size_t)mem_desc->esas2r_param,
  87. mem_desc->esas2r_data,
  88. (dma_addr_t)(mem_desc->phys_addr - unalign));
  89. } else {
  90. kfree(mem_desc->esas2r_data);
  91. }
  92. mem_desc->virt_addr = NULL;
  93. }
  94. static bool alloc_vda_req(struct esas2r_adapter *a,
  95. struct esas2r_request *rq)
  96. {
  97. struct esas2r_mem_desc *memdesc = kzalloc(
  98. sizeof(struct esas2r_mem_desc), GFP_KERNEL);
  99. if (memdesc == NULL) {
  100. esas2r_hdebug("could not alloc mem for vda request memdesc\n");
  101. return false;
  102. }
  103. memdesc->size = sizeof(union atto_vda_req) +
  104. ESAS2R_DATA_BUF_LEN;
  105. if (!esas2r_initmem_alloc(a, memdesc, 256)) {
  106. esas2r_hdebug("could not alloc mem for vda request\n");
  107. kfree(memdesc);
  108. return false;
  109. }
  110. a->num_vrqs++;
  111. list_add(&memdesc->next_desc, &a->vrq_mds_head);
  112. rq->vrq_md = memdesc;
  113. rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
  114. rq->vrq->scsi.handle = a->num_vrqs;
  115. return true;
  116. }
  117. static void esas2r_unmap_regions(struct esas2r_adapter *a)
  118. {
  119. if (a->regs)
  120. iounmap((void __iomem *)a->regs);
  121. a->regs = NULL;
  122. pci_release_region(a->pcid, 2);
  123. if (a->data_window)
  124. iounmap((void __iomem *)a->data_window);
  125. a->data_window = NULL;
  126. pci_release_region(a->pcid, 0);
  127. }
  128. static int esas2r_map_regions(struct esas2r_adapter *a)
  129. {
  130. int error;
  131. a->regs = NULL;
  132. a->data_window = NULL;
  133. error = pci_request_region(a->pcid, 2, a->name);
  134. if (error != 0) {
  135. esas2r_log(ESAS2R_LOG_CRIT,
  136. "pci_request_region(2) failed, error %d",
  137. error);
  138. return error;
  139. }
  140. a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
  141. pci_resource_len(a->pcid, 2));
  142. if (a->regs == NULL) {
  143. esas2r_log(ESAS2R_LOG_CRIT,
  144. "ioremap failed for regs mem region\n");
  145. pci_release_region(a->pcid, 2);
  146. return -EFAULT;
  147. }
  148. error = pci_request_region(a->pcid, 0, a->name);
  149. if (error != 0) {
  150. esas2r_log(ESAS2R_LOG_CRIT,
  151. "pci_request_region(2) failed, error %d",
  152. error);
  153. esas2r_unmap_regions(a);
  154. return error;
  155. }
  156. a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
  157. 0),
  158. pci_resource_len(a->pcid, 0));
  159. if (a->data_window == NULL) {
  160. esas2r_log(ESAS2R_LOG_CRIT,
  161. "ioremap failed for data_window mem region\n");
  162. esas2r_unmap_regions(a);
  163. return -EFAULT;
  164. }
  165. return 0;
  166. }
  167. static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
  168. {
  169. int i;
  170. /* Set up interrupt mode based on the requested value */
  171. switch (intr_mode) {
  172. case INTR_MODE_LEGACY:
  173. use_legacy_interrupts:
  174. a->intr_mode = INTR_MODE_LEGACY;
  175. break;
  176. case INTR_MODE_MSI:
  177. i = pci_enable_msi(a->pcid);
  178. if (i != 0) {
  179. esas2r_log(ESAS2R_LOG_WARN,
  180. "failed to enable MSI for adapter %d, "
  181. "falling back to legacy interrupts "
  182. "(err=%d)", a->index,
  183. i);
  184. goto use_legacy_interrupts;
  185. }
  186. a->intr_mode = INTR_MODE_MSI;
  187. esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
  188. break;
  189. default:
  190. esas2r_log(ESAS2R_LOG_WARN,
  191. "unknown interrupt_mode %d requested, "
  192. "falling back to legacy interrupt",
  193. interrupt_mode);
  194. goto use_legacy_interrupts;
  195. }
  196. }
  197. static void esas2r_claim_interrupts(struct esas2r_adapter *a)
  198. {
  199. unsigned long flags = IRQF_DISABLED;
  200. if (a->intr_mode == INTR_MODE_LEGACY)
  201. flags |= IRQF_SHARED;
  202. esas2r_log(ESAS2R_LOG_INFO,
  203. "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
  204. a->pcid->irq, a, a->name, flags);
  205. if (request_irq(a->pcid->irq,
  206. (a->intr_mode ==
  207. INTR_MODE_LEGACY) ? esas2r_interrupt :
  208. esas2r_msi_interrupt,
  209. flags,
  210. a->name,
  211. a)) {
  212. esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
  213. a->pcid->irq);
  214. return;
  215. }
  216. esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
  217. esas2r_log(ESAS2R_LOG_INFO,
  218. "claimed IRQ %d flags: 0x%lx",
  219. a->pcid->irq, flags);
  220. }
  221. int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
  222. int index)
  223. {
  224. struct esas2r_adapter *a;
  225. u64 bus_addr = 0;
  226. int i;
  227. void *next_uncached;
  228. struct esas2r_request *first_request, *last_request;
  229. if (index >= MAX_ADAPTERS) {
  230. esas2r_log(ESAS2R_LOG_CRIT,
  231. "tried to init invalid adapter index %u!",
  232. index);
  233. return 0;
  234. }
  235. if (esas2r_adapters[index]) {
  236. esas2r_log(ESAS2R_LOG_CRIT,
  237. "tried to init existing adapter index %u!",
  238. index);
  239. return 0;
  240. }
  241. a = (struct esas2r_adapter *)host->hostdata;
  242. memset(a, 0, sizeof(struct esas2r_adapter));
  243. a->pcid = pcid;
  244. a->host = host;
  245. if (sizeof(dma_addr_t) > 4) {
  246. const uint64_t required_mask = dma_get_required_mask
  247. (&pcid->dev);
  248. if (required_mask > DMA_BIT_MASK(32)
  249. && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
  250. && !pci_set_consistent_dma_mask(pcid,
  251. DMA_BIT_MASK(64))) {
  252. esas2r_log_dev(ESAS2R_LOG_INFO,
  253. &(a->pcid->dev),
  254. "64-bit PCI addressing enabled\n");
  255. } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
  256. && !pci_set_consistent_dma_mask(pcid,
  257. DMA_BIT_MASK(32))) {
  258. esas2r_log_dev(ESAS2R_LOG_INFO,
  259. &(a->pcid->dev),
  260. "32-bit PCI addressing enabled\n");
  261. } else {
  262. esas2r_log(ESAS2R_LOG_CRIT,
  263. "failed to set DMA mask");
  264. esas2r_kill_adapter(index);
  265. return 0;
  266. }
  267. } else {
  268. if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
  269. && !pci_set_consistent_dma_mask(pcid,
  270. DMA_BIT_MASK(32))) {
  271. esas2r_log_dev(ESAS2R_LOG_INFO,
  272. &(a->pcid->dev),
  273. "32-bit PCI addressing enabled\n");
  274. } else {
  275. esas2r_log(ESAS2R_LOG_CRIT,
  276. "failed to set DMA mask");
  277. esas2r_kill_adapter(index);
  278. return 0;
  279. }
  280. }
  281. esas2r_adapters[index] = a;
  282. sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
  283. esas2r_debug("new adapter %p, name %s", a, a->name);
  284. spin_lock_init(&a->request_lock);
  285. spin_lock_init(&a->fw_event_lock);
  286. sema_init(&a->fm_api_semaphore, 1);
  287. sema_init(&a->fs_api_semaphore, 1);
  288. sema_init(&a->nvram_semaphore, 1);
  289. esas2r_fw_event_off(a);
  290. snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
  291. a->index);
  292. a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
  293. init_waitqueue_head(&a->buffered_ioctl_waiter);
  294. init_waitqueue_head(&a->nvram_waiter);
  295. init_waitqueue_head(&a->fm_api_waiter);
  296. init_waitqueue_head(&a->fs_api_waiter);
  297. init_waitqueue_head(&a->vda_waiter);
  298. INIT_LIST_HEAD(&a->general_req.req_list);
  299. INIT_LIST_HEAD(&a->active_list);
  300. INIT_LIST_HEAD(&a->defer_list);
  301. INIT_LIST_HEAD(&a->free_sg_list_head);
  302. INIT_LIST_HEAD(&a->avail_request);
  303. INIT_LIST_HEAD(&a->vrq_mds_head);
  304. INIT_LIST_HEAD(&a->fw_event_list);
  305. first_request = (struct esas2r_request *)((u8 *)(a + 1));
  306. for (last_request = first_request, i = 1; i < num_requests;
  307. last_request++, i++) {
  308. INIT_LIST_HEAD(&last_request->req_list);
  309. list_add_tail(&last_request->comp_list, &a->avail_request);
  310. if (!alloc_vda_req(a, last_request)) {
  311. esas2r_log(ESAS2R_LOG_CRIT,
  312. "failed to allocate a VDA request!");
  313. esas2r_kill_adapter(index);
  314. return 0;
  315. }
  316. }
  317. esas2r_debug("requests: %p to %p (%d, %d)", first_request,
  318. last_request,
  319. sizeof(*first_request),
  320. num_requests);
  321. if (esas2r_map_regions(a) != 0) {
  322. esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
  323. esas2r_kill_adapter(index);
  324. return 0;
  325. }
  326. a->index = index;
  327. /* interrupts will be disabled until we are done with init */
  328. atomic_inc(&a->dis_ints_cnt);
  329. atomic_inc(&a->disable_cnt);
  330. a->flags |= AF_CHPRST_PENDING
  331. | AF_DISC_PENDING
  332. | AF_FIRST_INIT
  333. | AF_LEGACY_SGE_MODE;
  334. a->init_msg = ESAS2R_INIT_MSG_START;
  335. a->max_vdareq_size = 128;
  336. a->build_sgl = esas2r_build_sg_list_sge;
  337. esas2r_setup_interrupts(a, interrupt_mode);
  338. a->uncached_size = esas2r_get_uncached_size(a);
  339. a->uncached = dma_alloc_coherent(&pcid->dev,
  340. (size_t)a->uncached_size,
  341. (dma_addr_t *)&bus_addr,
  342. GFP_KERNEL);
  343. if (a->uncached == NULL) {
  344. esas2r_log(ESAS2R_LOG_CRIT,
  345. "failed to allocate %d bytes of consistent memory!",
  346. a->uncached_size);
  347. esas2r_kill_adapter(index);
  348. return 0;
  349. }
  350. a->uncached_phys = bus_addr;
  351. esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
  352. a->uncached_size,
  353. a->uncached,
  354. upper_32_bits(bus_addr),
  355. lower_32_bits(bus_addr));
  356. memset(a->uncached, 0, a->uncached_size);
  357. next_uncached = a->uncached;
  358. if (!esas2r_init_adapter_struct(a,
  359. &next_uncached)) {
  360. esas2r_log(ESAS2R_LOG_CRIT,
  361. "failed to initialize adapter structure (2)!");
  362. esas2r_kill_adapter(index);
  363. return 0;
  364. }
  365. tasklet_init(&a->tasklet,
  366. esas2r_adapter_tasklet,
  367. (unsigned long)a);
  368. /*
  369. * Disable chip interrupts to prevent spurious interrupts
  370. * until we claim the IRQ.
  371. */
  372. esas2r_disable_chip_interrupts(a);
  373. esas2r_check_adapter(a);
  374. if (!esas2r_init_adapter_hw(a, true))
  375. esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
  376. else
  377. esas2r_debug("esas2r_init_adapter ok");
  378. esas2r_claim_interrupts(a);
  379. if (a->flags2 & AF2_IRQ_CLAIMED)
  380. esas2r_enable_chip_interrupts(a);
  381. esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
  382. if (!(a->flags & AF_DEGRADED_MODE))
  383. esas2r_kickoff_timer(a);
  384. esas2r_debug("esas2r_init_adapter done for %p (%d)",
  385. a, a->disable_cnt);
  386. return 1;
  387. }
  388. static void esas2r_adapter_power_down(struct esas2r_adapter *a,
  389. int power_management)
  390. {
  391. struct esas2r_mem_desc *memdesc, *next;
  392. if ((a->flags2 & AF2_INIT_DONE)
  393. && (!(a->flags & AF_DEGRADED_MODE))) {
  394. if (!power_management) {
  395. del_timer_sync(&a->timer);
  396. tasklet_kill(&a->tasklet);
  397. }
  398. esas2r_power_down(a);
  399. /*
  400. * There are versions of firmware that do not handle the sync
  401. * cache command correctly. Stall here to ensure that the
  402. * cache is lazily flushed.
  403. */
  404. mdelay(500);
  405. esas2r_debug("chip halted");
  406. }
  407. /* Remove sysfs binary files */
  408. if (a->sysfs_fw_created) {
  409. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
  410. a->sysfs_fw_created = 0;
  411. }
  412. if (a->sysfs_fs_created) {
  413. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
  414. a->sysfs_fs_created = 0;
  415. }
  416. if (a->sysfs_vda_created) {
  417. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
  418. a->sysfs_vda_created = 0;
  419. }
  420. if (a->sysfs_hw_created) {
  421. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
  422. a->sysfs_hw_created = 0;
  423. }
  424. if (a->sysfs_live_nvram_created) {
  425. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  426. &bin_attr_live_nvram);
  427. a->sysfs_live_nvram_created = 0;
  428. }
  429. if (a->sysfs_default_nvram_created) {
  430. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  431. &bin_attr_default_nvram);
  432. a->sysfs_default_nvram_created = 0;
  433. }
  434. /* Clean up interrupts */
  435. if (a->flags2 & AF2_IRQ_CLAIMED) {
  436. esas2r_log_dev(ESAS2R_LOG_INFO,
  437. &(a->pcid->dev),
  438. "free_irq(%d) called", a->pcid->irq);
  439. free_irq(a->pcid->irq, a);
  440. esas2r_debug("IRQ released");
  441. esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
  442. }
  443. if (a->flags2 & AF2_MSI_ENABLED) {
  444. pci_disable_msi(a->pcid);
  445. esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
  446. esas2r_debug("MSI disabled");
  447. }
  448. if (a->inbound_list_md.virt_addr)
  449. esas2r_initmem_free(a, &a->inbound_list_md);
  450. if (a->outbound_list_md.virt_addr)
  451. esas2r_initmem_free(a, &a->outbound_list_md);
  452. list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
  453. next_desc) {
  454. esas2r_initmem_free(a, memdesc);
  455. }
  456. /* Following frees everything allocated via alloc_vda_req */
  457. list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
  458. esas2r_initmem_free(a, memdesc);
  459. list_del(&memdesc->next_desc);
  460. kfree(memdesc);
  461. }
  462. kfree(a->first_ae_req);
  463. a->first_ae_req = NULL;
  464. kfree(a->sg_list_mds);
  465. a->sg_list_mds = NULL;
  466. kfree(a->req_table);
  467. a->req_table = NULL;
  468. if (a->regs) {
  469. esas2r_unmap_regions(a);
  470. a->regs = NULL;
  471. a->data_window = NULL;
  472. esas2r_debug("regions unmapped");
  473. }
  474. }
  475. /* Release/free allocated resources for specified adapters. */
  476. void esas2r_kill_adapter(int i)
  477. {
  478. struct esas2r_adapter *a = esas2r_adapters[i];
  479. if (a) {
  480. unsigned long flags;
  481. struct workqueue_struct *wq;
  482. esas2r_debug("killing adapter %p [%d] ", a, i);
  483. esas2r_fw_event_off(a);
  484. esas2r_adapter_power_down(a, 0);
  485. if (esas2r_buffered_ioctl &&
  486. (a->pcid == esas2r_buffered_ioctl_pcid)) {
  487. dma_free_coherent(&a->pcid->dev,
  488. (size_t)esas2r_buffered_ioctl_size,
  489. esas2r_buffered_ioctl,
  490. esas2r_buffered_ioctl_addr);
  491. esas2r_buffered_ioctl = NULL;
  492. }
  493. if (a->vda_buffer) {
  494. dma_free_coherent(&a->pcid->dev,
  495. (size_t)VDA_MAX_BUFFER_SIZE,
  496. a->vda_buffer,
  497. (dma_addr_t)a->ppvda_buffer);
  498. a->vda_buffer = NULL;
  499. }
  500. if (a->fs_api_buffer) {
  501. dma_free_coherent(&a->pcid->dev,
  502. (size_t)a->fs_api_buffer_size,
  503. a->fs_api_buffer,
  504. (dma_addr_t)a->ppfs_api_buffer);
  505. a->fs_api_buffer = NULL;
  506. }
  507. kfree(a->local_atto_ioctl);
  508. a->local_atto_ioctl = NULL;
  509. spin_lock_irqsave(&a->fw_event_lock, flags);
  510. wq = a->fw_event_q;
  511. a->fw_event_q = NULL;
  512. spin_unlock_irqrestore(&a->fw_event_lock, flags);
  513. if (wq)
  514. destroy_workqueue(wq);
  515. if (a->uncached) {
  516. dma_free_coherent(&a->pcid->dev,
  517. (size_t)a->uncached_size,
  518. a->uncached,
  519. (dma_addr_t)a->uncached_phys);
  520. a->uncached = NULL;
  521. esas2r_debug("uncached area freed");
  522. }
  523. esas2r_log_dev(ESAS2R_LOG_INFO,
  524. &(a->pcid->dev),
  525. "pci_disable_device() called. msix_enabled: %d "
  526. "msi_enabled: %d irq: %d pin: %d",
  527. a->pcid->msix_enabled,
  528. a->pcid->msi_enabled,
  529. a->pcid->irq,
  530. a->pcid->pin);
  531. esas2r_log_dev(ESAS2R_LOG_INFO,
  532. &(a->pcid->dev),
  533. "before pci_disable_device() enable_cnt: %d",
  534. a->pcid->enable_cnt.counter);
  535. pci_disable_device(a->pcid);
  536. esas2r_log_dev(ESAS2R_LOG_INFO,
  537. &(a->pcid->dev),
  538. "after pci_disable_device() enable_cnt: %d",
  539. a->pcid->enable_cnt.counter);
  540. esas2r_log_dev(ESAS2R_LOG_INFO,
  541. &(a->pcid->dev),
  542. "pci_set_drv_data(%p, NULL) called",
  543. a->pcid);
  544. pci_set_drvdata(a->pcid, NULL);
  545. esas2r_adapters[i] = NULL;
  546. if (a->flags2 & AF2_INIT_DONE) {
  547. esas2r_lock_clear_flags(&a->flags2,
  548. AF2_INIT_DONE);
  549. esas2r_lock_set_flags(&a->flags,
  550. AF_DEGRADED_MODE);
  551. esas2r_log_dev(ESAS2R_LOG_INFO,
  552. &(a->host->shost_gendev),
  553. "scsi_remove_host() called");
  554. scsi_remove_host(a->host);
  555. esas2r_log_dev(ESAS2R_LOG_INFO,
  556. &(a->host->shost_gendev),
  557. "scsi_host_put() called");
  558. scsi_host_put(a->host);
  559. }
  560. }
  561. }
  562. int esas2r_cleanup(struct Scsi_Host *host)
  563. {
  564. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  565. int index;
  566. if (host == NULL) {
  567. int i;
  568. esas2r_debug("esas2r_cleanup everything");
  569. for (i = 0; i < MAX_ADAPTERS; i++)
  570. esas2r_kill_adapter(i);
  571. return -1;
  572. }
  573. esas2r_debug("esas2r_cleanup called for host %p", host);
  574. index = a->index;
  575. esas2r_kill_adapter(index);
  576. return index;
  577. }
  578. int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
  579. {
  580. struct Scsi_Host *host = pci_get_drvdata(pdev);
  581. u32 device_state;
  582. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  583. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
  584. if (!a)
  585. return -ENODEV;
  586. esas2r_adapter_power_down(a, 1);
  587. device_state = pci_choose_state(pdev, state);
  588. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  589. "pci_save_state() called");
  590. pci_save_state(pdev);
  591. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  592. "pci_disable_device() called");
  593. pci_disable_device(pdev);
  594. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  595. "pci_set_power_state() called");
  596. pci_set_power_state(pdev, device_state);
  597. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
  598. return 0;
  599. }
  600. int esas2r_resume(struct pci_dev *pdev)
  601. {
  602. struct Scsi_Host *host = pci_get_drvdata(pdev);
  603. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  604. int rez;
  605. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
  606. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  607. "pci_set_power_state(PCI_D0) "
  608. "called");
  609. pci_set_power_state(pdev, PCI_D0);
  610. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  611. "pci_enable_wake(PCI_D0, 0) "
  612. "called");
  613. pci_enable_wake(pdev, PCI_D0, 0);
  614. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  615. "pci_restore_state() called");
  616. pci_restore_state(pdev);
  617. esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
  618. "pci_enable_device() called");
  619. rez = pci_enable_device(pdev);
  620. pci_set_master(pdev);
  621. if (!a) {
  622. rez = -ENODEV;
  623. goto error_exit;
  624. }
  625. if (esas2r_map_regions(a) != 0) {
  626. esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
  627. rez = -ENOMEM;
  628. goto error_exit;
  629. }
  630. /* Set up interupt mode */
  631. esas2r_setup_interrupts(a, a->intr_mode);
  632. /*
  633. * Disable chip interrupts to prevent spurious interrupts until we
  634. * claim the IRQ.
  635. */
  636. esas2r_disable_chip_interrupts(a);
  637. if (!esas2r_power_up(a, true)) {
  638. esas2r_debug("yikes, esas2r_power_up failed");
  639. rez = -ENOMEM;
  640. goto error_exit;
  641. }
  642. esas2r_claim_interrupts(a);
  643. if (a->flags2 & AF2_IRQ_CLAIMED) {
  644. /*
  645. * Now that system interrupt(s) are claimed, we can enable
  646. * chip interrupts.
  647. */
  648. esas2r_enable_chip_interrupts(a);
  649. esas2r_kickoff_timer(a);
  650. } else {
  651. esas2r_debug("yikes, unable to claim IRQ");
  652. esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
  653. rez = -ENOMEM;
  654. goto error_exit;
  655. }
  656. error_exit:
  657. esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
  658. rez);
  659. return rez;
  660. }
  661. bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
  662. {
  663. esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
  664. esas2r_log(ESAS2R_LOG_CRIT,
  665. "setting adapter to degraded mode: %s\n", error_str);
  666. return false;
  667. }
  668. u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
  669. {
  670. return sizeof(struct esas2r_sas_nvram)
  671. + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
  672. + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
  673. + 8
  674. + (num_sg_lists * (u16)sgl_page_size)
  675. + ALIGN((num_requests + num_ae_requests + 1 +
  676. ESAS2R_LIST_EXTRA) *
  677. sizeof(struct esas2r_inbound_list_source_entry),
  678. 8)
  679. + ALIGN((num_requests + num_ae_requests + 1 +
  680. ESAS2R_LIST_EXTRA) *
  681. sizeof(struct atto_vda_ob_rsp), 8)
  682. + 256; /* VDA request and buffer align */
  683. }
  684. static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
  685. {
  686. int pcie_cap_reg;
  687. pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
  688. if (0xffff && pcie_cap_reg) {
  689. u16 devcontrol;
  690. pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
  691. &devcontrol);
  692. if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
  693. esas2r_log(ESAS2R_LOG_INFO,
  694. "max read request size > 512B");
  695. devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
  696. devcontrol |= 0x2000;
  697. pci_write_config_word(a->pcid,
  698. pcie_cap_reg + PCI_EXP_DEVCTL,
  699. devcontrol);
  700. }
  701. }
  702. }
  703. /*
  704. * Determine the organization of the uncached data area and
  705. * finish initializing the adapter structure
  706. */
  707. bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
  708. void **uncached_area)
  709. {
  710. u32 i;
  711. u8 *high;
  712. struct esas2r_inbound_list_source_entry *element;
  713. struct esas2r_request *rq;
  714. struct esas2r_mem_desc *sgl;
  715. spin_lock_init(&a->sg_list_lock);
  716. spin_lock_init(&a->mem_lock);
  717. spin_lock_init(&a->queue_lock);
  718. a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
  719. if (!alloc_vda_req(a, &a->general_req)) {
  720. esas2r_hdebug(
  721. "failed to allocate a VDA request for the general req!");
  722. return false;
  723. }
  724. /* allocate requests for asynchronous events */
  725. a->first_ae_req =
  726. kzalloc(num_ae_requests * sizeof(struct esas2r_request),
  727. GFP_KERNEL);
  728. if (a->first_ae_req == NULL) {
  729. esas2r_log(ESAS2R_LOG_CRIT,
  730. "failed to allocate memory for asynchronous events");
  731. return false;
  732. }
  733. /* allocate the S/G list memory descriptors */
  734. a->sg_list_mds = kzalloc(
  735. num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
  736. if (a->sg_list_mds == NULL) {
  737. esas2r_log(ESAS2R_LOG_CRIT,
  738. "failed to allocate memory for s/g list descriptors");
  739. return false;
  740. }
  741. /* allocate the request table */
  742. a->req_table =
  743. kzalloc((num_requests + num_ae_requests +
  744. 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
  745. if (a->req_table == NULL) {
  746. esas2r_log(ESAS2R_LOG_CRIT,
  747. "failed to allocate memory for the request table");
  748. return false;
  749. }
  750. /* initialize PCI configuration space */
  751. esas2r_init_pci_cfg_space(a);
  752. /*
  753. * the thunder_stream boards all have a serial flash part that has a
  754. * different base address on the AHB bus.
  755. */
  756. if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
  757. && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
  758. a->flags2 |= AF2_THUNDERBOLT;
  759. if (a->flags2 & AF2_THUNDERBOLT)
  760. a->flags2 |= AF2_SERIAL_FLASH;
  761. if (a->pcid->subsystem_device == ATTO_TLSH_1068)
  762. a->flags2 |= AF2_THUNDERLINK;
  763. /* Uncached Area */
  764. high = (u8 *)*uncached_area;
  765. /* initialize the scatter/gather table pages */
  766. for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
  767. sgl->size = sgl_page_size;
  768. list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
  769. if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
  770. /* Allow the driver to load if the minimum count met. */
  771. if (i < NUM_SGL_MIN)
  772. return false;
  773. break;
  774. }
  775. }
  776. /* compute the size of the lists */
  777. a->list_size = num_requests + ESAS2R_LIST_EXTRA;
  778. /* allocate the inbound list */
  779. a->inbound_list_md.size = a->list_size *
  780. sizeof(struct
  781. esas2r_inbound_list_source_entry);
  782. if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
  783. esas2r_hdebug("failed to allocate IB list");
  784. return false;
  785. }
  786. /* allocate the outbound list */
  787. a->outbound_list_md.size = a->list_size *
  788. sizeof(struct atto_vda_ob_rsp);
  789. if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
  790. ESAS2R_LIST_ALIGN)) {
  791. esas2r_hdebug("failed to allocate IB list");
  792. return false;
  793. }
  794. /* allocate the NVRAM structure */
  795. a->nvram = (struct esas2r_sas_nvram *)high;
  796. high += sizeof(struct esas2r_sas_nvram);
  797. /* allocate the discovery buffer */
  798. a->disc_buffer = high;
  799. high += ESAS2R_DISC_BUF_LEN;
  800. high = PTR_ALIGN(high, 8);
  801. /* allocate the outbound list copy pointer */
  802. a->outbound_copy = (u32 volatile *)high;
  803. high += sizeof(u32);
  804. if (!(a->flags & AF_NVR_VALID))
  805. esas2r_nvram_set_defaults(a);
  806. /* update the caller's uncached memory area pointer */
  807. *uncached_area = (void *)high;
  808. /* initialize the allocated memory */
  809. if (a->flags & AF_FIRST_INIT) {
  810. memset(a->req_table, 0,
  811. (num_requests + num_ae_requests +
  812. 1) * sizeof(struct esas2r_request *));
  813. esas2r_targ_db_initialize(a);
  814. /* prime parts of the inbound list */
  815. element =
  816. (struct esas2r_inbound_list_source_entry *)a->
  817. inbound_list_md.
  818. virt_addr;
  819. for (i = 0; i < a->list_size; i++) {
  820. element->address = 0;
  821. element->reserved = 0;
  822. element->length = cpu_to_le32(HWILSE_INTERFACE_F0
  823. | (sizeof(union
  824. atto_vda_req)
  825. /
  826. sizeof(u32)));
  827. element++;
  828. }
  829. /* init the AE requests */
  830. for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
  831. i++) {
  832. INIT_LIST_HEAD(&rq->req_list);
  833. if (!alloc_vda_req(a, rq)) {
  834. esas2r_hdebug(
  835. "failed to allocate a VDA request!");
  836. return false;
  837. }
  838. esas2r_rq_init_request(rq, a);
  839. /* override the completion function */
  840. rq->comp_cb = esas2r_ae_complete;
  841. }
  842. }
  843. return true;
  844. }
  845. /* This code will verify that the chip is operational. */
  846. bool esas2r_check_adapter(struct esas2r_adapter *a)
  847. {
  848. u32 starttime;
  849. u32 doorbell;
  850. u64 ppaddr;
  851. u32 dw;
  852. /*
  853. * if the chip reset detected flag is set, we can bypass a bunch of
  854. * stuff.
  855. */
  856. if (a->flags & AF_CHPRST_DETECTED)
  857. goto skip_chip_reset;
  858. /*
  859. * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
  860. * may have left them enabled or we may be recovering from a fault.
  861. */
  862. esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
  863. esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
  864. /*
  865. * wait for the firmware to become ready by forcing an interrupt and
  866. * waiting for a response.
  867. */
  868. starttime = jiffies_to_msecs(jiffies);
  869. while (true) {
  870. esas2r_force_interrupt(a);
  871. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  872. if (doorbell == 0xFFFFFFFF) {
  873. /*
  874. * Give the firmware up to two seconds to enable
  875. * register access after a reset.
  876. */
  877. if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
  878. return esas2r_set_degraded_mode(a,
  879. "unable to access registers");
  880. } else if (doorbell & DRBL_FORCE_INT) {
  881. u32 ver = (doorbell & DRBL_FW_VER_MSK);
  882. /*
  883. * This driver supports version 0 and version 1 of
  884. * the API
  885. */
  886. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  887. doorbell);
  888. if (ver == DRBL_FW_VER_0) {
  889. esas2r_lock_set_flags(&a->flags,
  890. AF_LEGACY_SGE_MODE);
  891. a->max_vdareq_size = 128;
  892. a->build_sgl = esas2r_build_sg_list_sge;
  893. } else if (ver == DRBL_FW_VER_1) {
  894. esas2r_lock_clear_flags(&a->flags,
  895. AF_LEGACY_SGE_MODE);
  896. a->max_vdareq_size = 1024;
  897. a->build_sgl = esas2r_build_sg_list_prd;
  898. } else {
  899. return esas2r_set_degraded_mode(a,
  900. "unknown firmware version");
  901. }
  902. break;
  903. }
  904. schedule_timeout_interruptible(msecs_to_jiffies(100));
  905. if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
  906. esas2r_hdebug("FW ready TMO");
  907. esas2r_bugon();
  908. return esas2r_set_degraded_mode(a,
  909. "firmware start has timed out");
  910. }
  911. }
  912. /* purge any asynchronous events since we will repost them later */
  913. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
  914. starttime = jiffies_to_msecs(jiffies);
  915. while (true) {
  916. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  917. if (doorbell & DRBL_MSG_IFC_DOWN) {
  918. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  919. doorbell);
  920. break;
  921. }
  922. schedule_timeout_interruptible(msecs_to_jiffies(50));
  923. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  924. esas2r_hdebug("timeout waiting for interface down");
  925. break;
  926. }
  927. }
  928. skip_chip_reset:
  929. /*
  930. * first things first, before we go changing any of these registers
  931. * disable the communication lists.
  932. */
  933. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  934. dw &= ~MU_ILC_ENABLE;
  935. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  936. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  937. dw &= ~MU_OLC_ENABLE;
  938. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  939. /* configure the communication list addresses */
  940. ppaddr = a->inbound_list_md.phys_addr;
  941. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
  942. lower_32_bits(ppaddr));
  943. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
  944. upper_32_bits(ppaddr));
  945. ppaddr = a->outbound_list_md.phys_addr;
  946. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
  947. lower_32_bits(ppaddr));
  948. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
  949. upper_32_bits(ppaddr));
  950. ppaddr = a->uncached_phys +
  951. ((u8 *)a->outbound_copy - a->uncached);
  952. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
  953. lower_32_bits(ppaddr));
  954. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
  955. upper_32_bits(ppaddr));
  956. /* reset the read and write pointers */
  957. *a->outbound_copy =
  958. a->last_write =
  959. a->last_read = a->list_size - 1;
  960. esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
  961. esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
  962. a->last_write);
  963. esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
  964. a->last_write);
  965. esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
  966. a->last_write);
  967. esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
  968. MU_OLW_TOGGLE | a->last_write);
  969. /* configure the interface select fields */
  970. dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
  971. dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
  972. esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
  973. (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
  974. dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
  975. dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
  976. esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
  977. (dw | MU_OLIC_LIST_F0 |
  978. MU_OLIC_SOURCE_DDR));
  979. /* finish configuring the communication lists */
  980. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  981. dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
  982. dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
  983. | (a->list_size << MU_ILC_NUMBER_SHIFT);
  984. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  985. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  986. dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
  987. dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
  988. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  989. /*
  990. * notify the firmware that we're done setting up the communication
  991. * list registers. wait here until the firmware is done configuring
  992. * its lists. it will signal that it is done by enabling the lists.
  993. */
  994. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
  995. starttime = jiffies_to_msecs(jiffies);
  996. while (true) {
  997. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  998. if (doorbell & DRBL_MSG_IFC_INIT) {
  999. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1000. doorbell);
  1001. break;
  1002. }
  1003. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1004. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  1005. esas2r_hdebug(
  1006. "timeout waiting for communication list init");
  1007. esas2r_bugon();
  1008. return esas2r_set_degraded_mode(a,
  1009. "timeout waiting for communication list init");
  1010. }
  1011. }
  1012. /*
  1013. * flag whether the firmware supports the power down doorbell. we
  1014. * determine this by reading the inbound doorbell enable mask.
  1015. */
  1016. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
  1017. if (doorbell & DRBL_POWER_DOWN)
  1018. esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
  1019. else
  1020. esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
  1021. /*
  1022. * enable assertion of outbound queue and doorbell interrupts in the
  1023. * main interrupt cause register.
  1024. */
  1025. esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
  1026. esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
  1027. return true;
  1028. }
  1029. /* Process the initialization message just completed and format the next one. */
  1030. static bool esas2r_format_init_msg(struct esas2r_adapter *a,
  1031. struct esas2r_request *rq)
  1032. {
  1033. u32 msg = a->init_msg;
  1034. struct atto_vda_cfg_init *ci;
  1035. a->init_msg = 0;
  1036. switch (msg) {
  1037. case ESAS2R_INIT_MSG_START:
  1038. case ESAS2R_INIT_MSG_REINIT:
  1039. {
  1040. struct timeval now;
  1041. do_gettimeofday(&now);
  1042. esas2r_hdebug("CFG init");
  1043. esas2r_build_cfg_req(a,
  1044. rq,
  1045. VDA_CFG_INIT,
  1046. 0,
  1047. NULL);
  1048. ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
  1049. ci->sgl_page_size = sgl_page_size;
  1050. ci->epoch_time = now.tv_sec;
  1051. rq->flags |= RF_FAILURE_OK;
  1052. a->init_msg = ESAS2R_INIT_MSG_INIT;
  1053. break;
  1054. }
  1055. case ESAS2R_INIT_MSG_INIT:
  1056. if (rq->req_stat == RS_SUCCESS) {
  1057. u32 major;
  1058. u32 minor;
  1059. a->fw_version = le16_to_cpu(
  1060. rq->func_rsp.cfg_rsp.vda_version);
  1061. a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
  1062. major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
  1063. minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
  1064. a->fw_version += (major << 16) + (minor << 24);
  1065. } else {
  1066. esas2r_hdebug("FAILED");
  1067. }
  1068. /*
  1069. * the 2.71 and earlier releases of R6xx firmware did not error
  1070. * unsupported config requests correctly.
  1071. */
  1072. if ((a->flags2 & AF2_THUNDERBOLT)
  1073. || (be32_to_cpu(a->fw_version) >
  1074. be32_to_cpu(0x47020052))) {
  1075. esas2r_hdebug("CFG get init");
  1076. esas2r_build_cfg_req(a,
  1077. rq,
  1078. VDA_CFG_GET_INIT2,
  1079. sizeof(struct atto_vda_cfg_init),
  1080. NULL);
  1081. rq->vrq->cfg.sg_list_offset = offsetof(
  1082. struct atto_vda_cfg_req,
  1083. data.sge);
  1084. rq->vrq->cfg.data.prde.ctl_len =
  1085. cpu_to_le32(sizeof(struct atto_vda_cfg_init));
  1086. rq->vrq->cfg.data.prde.address = cpu_to_le64(
  1087. rq->vrq_md->phys_addr +
  1088. sizeof(union atto_vda_req));
  1089. rq->flags |= RF_FAILURE_OK;
  1090. a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
  1091. break;
  1092. }
  1093. case ESAS2R_INIT_MSG_GET_INIT:
  1094. if (msg == ESAS2R_INIT_MSG_GET_INIT) {
  1095. ci = (struct atto_vda_cfg_init *)rq->data_buf;
  1096. if (rq->req_stat == RS_SUCCESS) {
  1097. a->num_targets_backend =
  1098. le32_to_cpu(ci->num_targets_backend);
  1099. a->ioctl_tunnel =
  1100. le32_to_cpu(ci->ioctl_tunnel);
  1101. } else {
  1102. esas2r_hdebug("FAILED");
  1103. }
  1104. }
  1105. /* fall through */
  1106. default:
  1107. rq->req_stat = RS_SUCCESS;
  1108. return false;
  1109. }
  1110. return true;
  1111. }
  1112. /*
  1113. * Perform initialization messages via the request queue. Messages are
  1114. * performed with interrupts disabled.
  1115. */
  1116. bool esas2r_init_msgs(struct esas2r_adapter *a)
  1117. {
  1118. bool success = true;
  1119. struct esas2r_request *rq = &a->general_req;
  1120. esas2r_rq_init_request(rq, a);
  1121. rq->comp_cb = esas2r_dummy_complete;
  1122. if (a->init_msg == 0)
  1123. a->init_msg = ESAS2R_INIT_MSG_REINIT;
  1124. while (a->init_msg) {
  1125. if (esas2r_format_init_msg(a, rq)) {
  1126. unsigned long flags;
  1127. while (true) {
  1128. spin_lock_irqsave(&a->queue_lock, flags);
  1129. esas2r_start_vda_request(a, rq);
  1130. spin_unlock_irqrestore(&a->queue_lock, flags);
  1131. esas2r_wait_request(a, rq);
  1132. if (rq->req_stat != RS_PENDING)
  1133. break;
  1134. }
  1135. }
  1136. if (rq->req_stat == RS_SUCCESS
  1137. || ((rq->flags & RF_FAILURE_OK)
  1138. && rq->req_stat != RS_TIMEOUT))
  1139. continue;
  1140. esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
  1141. a->init_msg, rq->req_stat, rq->flags);
  1142. a->init_msg = ESAS2R_INIT_MSG_START;
  1143. success = false;
  1144. break;
  1145. }
  1146. esas2r_rq_destroy_request(rq, a);
  1147. return success;
  1148. }
  1149. /* Initialize the adapter chip */
  1150. bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
  1151. {
  1152. bool rslt = false;
  1153. struct esas2r_request *rq;
  1154. u32 i;
  1155. if (a->flags & AF_DEGRADED_MODE)
  1156. goto exit;
  1157. if (!(a->flags & AF_NVR_VALID)) {
  1158. if (!esas2r_nvram_read_direct(a))
  1159. esas2r_log(ESAS2R_LOG_WARN,
  1160. "invalid/missing NVRAM parameters");
  1161. }
  1162. if (!esas2r_init_msgs(a)) {
  1163. esas2r_set_degraded_mode(a, "init messages failed");
  1164. goto exit;
  1165. }
  1166. /* The firmware is ready. */
  1167. esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
  1168. esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
  1169. /* Post all the async event requests */
  1170. for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
  1171. esas2r_start_ae_request(a, rq);
  1172. if (!a->flash_rev[0])
  1173. esas2r_read_flash_rev(a);
  1174. if (!a->image_type[0])
  1175. esas2r_read_image_type(a);
  1176. if (a->fw_version == 0)
  1177. a->fw_rev[0] = 0;
  1178. else
  1179. sprintf(a->fw_rev, "%1d.%02d",
  1180. (int)LOBYTE(HIWORD(a->fw_version)),
  1181. (int)HIBYTE(HIWORD(a->fw_version)));
  1182. esas2r_hdebug("firmware revision: %s", a->fw_rev);
  1183. if ((a->flags & AF_CHPRST_DETECTED)
  1184. && (a->flags & AF_FIRST_INIT)) {
  1185. esas2r_enable_chip_interrupts(a);
  1186. return true;
  1187. }
  1188. /* initialize discovery */
  1189. esas2r_disc_initialize(a);
  1190. /*
  1191. * wait for the device wait time to expire here if requested. this is
  1192. * usually requested during initial driver load and possibly when
  1193. * resuming from a low power state. deferred device waiting will use
  1194. * interrupts. chip reset recovery always defers device waiting to
  1195. * avoid being in a TASKLET too long.
  1196. */
  1197. if (init_poll) {
  1198. u32 currtime = a->disc_start_time;
  1199. u32 nexttick = 100;
  1200. u32 deltatime;
  1201. /*
  1202. * Block Tasklets from getting scheduled and indicate this is
  1203. * polled discovery.
  1204. */
  1205. esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
  1206. esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
  1207. /*
  1208. * Temporarily bring the disable count to zero to enable
  1209. * deferred processing. Note that the count is already zero
  1210. * after the first initialization.
  1211. */
  1212. if (a->flags & AF_FIRST_INIT)
  1213. atomic_dec(&a->disable_cnt);
  1214. while (a->flags & AF_DISC_PENDING) {
  1215. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1216. /*
  1217. * Determine the need for a timer tick based on the
  1218. * delta time between this and the last iteration of
  1219. * this loop. We don't use the absolute time because
  1220. * then we would have to worry about when nexttick
  1221. * wraps and currtime hasn't yet.
  1222. */
  1223. deltatime = jiffies_to_msecs(jiffies) - currtime;
  1224. currtime += deltatime;
  1225. /*
  1226. * Process any waiting discovery as long as the chip is
  1227. * up. If a chip reset happens during initial polling,
  1228. * we have to make sure the timer tick processes the
  1229. * doorbell indicating the firmware is ready.
  1230. */
  1231. if (!(a->flags & AF_CHPRST_PENDING))
  1232. esas2r_disc_check_for_work(a);
  1233. /* Simulate a timer tick. */
  1234. if (nexttick <= deltatime) {
  1235. /* Time for a timer tick */
  1236. nexttick += 100;
  1237. esas2r_timer_tick(a);
  1238. }
  1239. if (nexttick > deltatime)
  1240. nexttick -= deltatime;
  1241. /* Do any deferred processing */
  1242. if (esas2r_is_tasklet_pending(a))
  1243. esas2r_do_tasklet_tasks(a);
  1244. }
  1245. if (a->flags & AF_FIRST_INIT)
  1246. atomic_inc(&a->disable_cnt);
  1247. esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
  1248. esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
  1249. }
  1250. esas2r_targ_db_report_changes(a);
  1251. /*
  1252. * For cases where (a) the initialization messages processing may
  1253. * handle an interrupt for a port event and a discovery is waiting, but
  1254. * we are not waiting for devices, or (b) the device wait time has been
  1255. * exhausted but there is still discovery pending, start any leftover
  1256. * discovery in interrupt driven mode.
  1257. */
  1258. esas2r_disc_start_waiting(a);
  1259. /* Enable chip interrupts */
  1260. a->int_mask = ESAS2R_INT_STS_MASK;
  1261. esas2r_enable_chip_interrupts(a);
  1262. esas2r_enable_heartbeat(a);
  1263. rslt = true;
  1264. exit:
  1265. /*
  1266. * Regardless of whether initialization was successful, certain things
  1267. * need to get done before we exit.
  1268. */
  1269. if ((a->flags & AF_CHPRST_DETECTED)
  1270. && (a->flags & AF_FIRST_INIT)) {
  1271. /*
  1272. * Reinitialization was performed during the first
  1273. * initialization. Only clear the chip reset flag so the
  1274. * original device polling is not cancelled.
  1275. */
  1276. if (!rslt)
  1277. esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
  1278. } else {
  1279. /* First initialization or a subsequent re-init is complete. */
  1280. if (!rslt) {
  1281. esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
  1282. esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
  1283. }
  1284. /* Enable deferred processing after the first initialization. */
  1285. if (a->flags & AF_FIRST_INIT) {
  1286. esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
  1287. if (atomic_dec_return(&a->disable_cnt) == 0)
  1288. esas2r_do_deferred_processes(a);
  1289. }
  1290. }
  1291. return rslt;
  1292. }
  1293. void esas2r_reset_adapter(struct esas2r_adapter *a)
  1294. {
  1295. esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
  1296. esas2r_local_reset_adapter(a);
  1297. esas2r_schedule_tasklet(a);
  1298. }
  1299. void esas2r_reset_chip(struct esas2r_adapter *a)
  1300. {
  1301. if (!esas2r_is_adapter_present(a))
  1302. return;
  1303. /*
  1304. * Before we reset the chip, save off the VDA core dump. The VDA core
  1305. * dump is located in the upper 512KB of the onchip SRAM. Make sure
  1306. * to not overwrite a previous crash that was saved.
  1307. */
  1308. if ((a->flags2 & AF2_COREDUMP_AVAIL)
  1309. && !(a->flags2 & AF2_COREDUMP_SAVED)
  1310. && a->fw_coredump_buff) {
  1311. esas2r_read_mem_block(a,
  1312. a->fw_coredump_buff,
  1313. MW_DATA_ADDR_SRAM + 0x80000,
  1314. ESAS2R_FWCOREDUMP_SZ);
  1315. esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
  1316. }
  1317. esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
  1318. /* Reset the chip */
  1319. if (a->pcid->revision == MVR_FREY_B2)
  1320. esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
  1321. MU_CTL_IN_FULL_RST2);
  1322. else
  1323. esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
  1324. MU_CTL_IN_FULL_RST);
  1325. /* Stall a little while to let the reset condition clear */
  1326. mdelay(10);
  1327. }
  1328. static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
  1329. {
  1330. u32 starttime;
  1331. u32 doorbell;
  1332. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
  1333. starttime = jiffies_to_msecs(jiffies);
  1334. while (true) {
  1335. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1336. if (doorbell & DRBL_POWER_DOWN) {
  1337. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1338. doorbell);
  1339. break;
  1340. }
  1341. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1342. if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
  1343. esas2r_hdebug("Timeout waiting for power down");
  1344. break;
  1345. }
  1346. }
  1347. }
  1348. /*
  1349. * Perform power management processing including managing device states, adapter
  1350. * states, interrupts, and I/O.
  1351. */
  1352. void esas2r_power_down(struct esas2r_adapter *a)
  1353. {
  1354. esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
  1355. esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
  1356. if (!(a->flags & AF_DEGRADED_MODE)) {
  1357. u32 starttime;
  1358. u32 doorbell;
  1359. /*
  1360. * We are currently running OK and will be reinitializing later.
  1361. * increment the disable count to coordinate with
  1362. * esas2r_init_adapter. We don't have to do this in degraded
  1363. * mode since we never enabled interrupts in the first place.
  1364. */
  1365. esas2r_disable_chip_interrupts(a);
  1366. esas2r_disable_heartbeat(a);
  1367. /* wait for any VDA activity to clear before continuing */
  1368. esas2r_write_register_dword(a, MU_DOORBELL_IN,
  1369. DRBL_MSG_IFC_DOWN);
  1370. starttime = jiffies_to_msecs(jiffies);
  1371. while (true) {
  1372. doorbell =
  1373. esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1374. if (doorbell & DRBL_MSG_IFC_DOWN) {
  1375. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1376. doorbell);
  1377. break;
  1378. }
  1379. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1380. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  1381. esas2r_hdebug(
  1382. "timeout waiting for interface down");
  1383. break;
  1384. }
  1385. }
  1386. /*
  1387. * For versions of firmware that support it tell them the driver
  1388. * is powering down.
  1389. */
  1390. if (a->flags2 & AF2_VDA_POWER_DOWN)
  1391. esas2r_power_down_notify_firmware(a);
  1392. }
  1393. /* Suspend I/O processing. */
  1394. esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
  1395. esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
  1396. esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
  1397. esas2r_process_adapter_reset(a);
  1398. /* Remove devices now that I/O is cleaned up. */
  1399. a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
  1400. esas2r_targ_db_remove_all(a, false);
  1401. }
  1402. /*
  1403. * Perform power management processing including managing device states, adapter
  1404. * states, interrupts, and I/O.
  1405. */
  1406. bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
  1407. {
  1408. bool ret;
  1409. esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
  1410. esas2r_init_pci_cfg_space(a);
  1411. esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
  1412. atomic_inc(&a->disable_cnt);
  1413. /* reinitialize the adapter */
  1414. ret = esas2r_check_adapter(a);
  1415. if (!esas2r_init_adapter_hw(a, init_poll))
  1416. ret = false;
  1417. /* send the reset asynchronous event */
  1418. esas2r_send_reset_ae(a, true);
  1419. /* clear this flag after initialization. */
  1420. esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
  1421. return ret;
  1422. }
  1423. bool esas2r_is_adapter_present(struct esas2r_adapter *a)
  1424. {
  1425. if (a->flags & AF_NOT_PRESENT)
  1426. return false;
  1427. if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
  1428. esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
  1429. return false;
  1430. }
  1431. return true;
  1432. }
  1433. const char *esas2r_get_model_name(struct esas2r_adapter *a)
  1434. {
  1435. switch (a->pcid->subsystem_device) {
  1436. case ATTO_ESAS_R680:
  1437. return "ATTO ExpressSAS R680";
  1438. case ATTO_ESAS_R608:
  1439. return "ATTO ExpressSAS R608";
  1440. case ATTO_ESAS_R60F:
  1441. return "ATTO ExpressSAS R60F";
  1442. case ATTO_ESAS_R6F0:
  1443. return "ATTO ExpressSAS R6F0";
  1444. case ATTO_ESAS_R644:
  1445. return "ATTO ExpressSAS R644";
  1446. case ATTO_ESAS_R648:
  1447. return "ATTO ExpressSAS R648";
  1448. case ATTO_TSSC_3808:
  1449. return "ATTO ThunderStream SC 3808D";
  1450. case ATTO_TSSC_3808E:
  1451. return "ATTO ThunderStream SC 3808E";
  1452. case ATTO_TLSH_1068:
  1453. return "ATTO ThunderLink SH 1068";
  1454. }
  1455. return "ATTO SAS Controller";
  1456. }
  1457. const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
  1458. {
  1459. switch (a->pcid->subsystem_device) {
  1460. case ATTO_ESAS_R680:
  1461. return "R680";
  1462. case ATTO_ESAS_R608:
  1463. return "R608";
  1464. case ATTO_ESAS_R60F:
  1465. return "R60F";
  1466. case ATTO_ESAS_R6F0:
  1467. return "R6F0";
  1468. case ATTO_ESAS_R644:
  1469. return "R644";
  1470. case ATTO_ESAS_R648:
  1471. return "R648";
  1472. case ATTO_TSSC_3808:
  1473. return "SC 3808D";
  1474. case ATTO_TSSC_3808E:
  1475. return "SC 3808E";
  1476. case ATTO_TLSH_1068:
  1477. return "SH 1068";
  1478. }
  1479. return "unknown";
  1480. }