commsup.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. * Module Name:
  26. * commsup.c
  27. *
  28. * Abstract: Contain all routines that are required for FSA host/adapter
  29. * communication.
  30. *
  31. */
  32. #include <linux/kernel.h>
  33. #include <linux/init.h>
  34. #include <linux/types.h>
  35. #include <linux/sched.h>
  36. #include <linux/pci.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/completion.h>
  40. #include <linux/blkdev.h>
  41. #include <linux/delay.h>
  42. #include <linux/kthread.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/semaphore.h>
  45. #include <scsi/scsi.h>
  46. #include <scsi/scsi_host.h>
  47. #include <scsi/scsi_device.h>
  48. #include <scsi/scsi_cmnd.h>
  49. #include "aacraid.h"
  50. /**
  51. * fib_map_alloc - allocate the fib objects
  52. * @dev: Adapter to allocate for
  53. *
  54. * Allocate and map the shared PCI space for the FIB blocks used to
  55. * talk to the Adaptec firmware.
  56. */
  57. static int fib_map_alloc(struct aac_dev *dev)
  58. {
  59. dprintk((KERN_INFO
  60. "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
  61. dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
  62. AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  63. dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
  64. (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
  65. * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  66. &dev->hw_fib_pa);
  67. if (dev->hw_fib_va == NULL)
  68. return -ENOMEM;
  69. return 0;
  70. }
  71. /**
  72. * aac_fib_map_free - free the fib objects
  73. * @dev: Adapter to free
  74. *
  75. * Free the PCI mappings and the memory allocated for FIB blocks
  76. * on this adapter.
  77. */
  78. void aac_fib_map_free(struct aac_dev *dev)
  79. {
  80. pci_free_consistent(dev->pdev,
  81. dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
  82. dev->hw_fib_va, dev->hw_fib_pa);
  83. dev->hw_fib_va = NULL;
  84. dev->hw_fib_pa = 0;
  85. }
  86. /**
  87. * aac_fib_setup - setup the fibs
  88. * @dev: Adapter to set up
  89. *
  90. * Allocate the PCI space for the fibs, map it and then initialise the
  91. * fib area, the unmapped fib data and also the free list
  92. */
  93. int aac_fib_setup(struct aac_dev * dev)
  94. {
  95. struct fib *fibptr;
  96. struct hw_fib *hw_fib;
  97. dma_addr_t hw_fib_pa;
  98. int i;
  99. while (((i = fib_map_alloc(dev)) == -ENOMEM)
  100. && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
  101. dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
  102. dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
  103. }
  104. if (i<0)
  105. return -ENOMEM;
  106. /* 32 byte alignment for PMC */
  107. hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
  108. dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
  109. (hw_fib_pa - dev->hw_fib_pa));
  110. dev->hw_fib_pa = hw_fib_pa;
  111. memset(dev->hw_fib_va, 0,
  112. (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
  113. (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
  114. /* add Xport header */
  115. dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
  116. sizeof(struct aac_fib_xporthdr));
  117. dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
  118. hw_fib = dev->hw_fib_va;
  119. hw_fib_pa = dev->hw_fib_pa;
  120. /*
  121. * Initialise the fibs
  122. */
  123. for (i = 0, fibptr = &dev->fibs[i];
  124. i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
  125. i++, fibptr++)
  126. {
  127. fibptr->flags = 0;
  128. fibptr->dev = dev;
  129. fibptr->hw_fib_va = hw_fib;
  130. fibptr->data = (void *) fibptr->hw_fib_va->data;
  131. fibptr->next = fibptr+1; /* Forward chain the fibs */
  132. sema_init(&fibptr->event_wait, 0);
  133. spin_lock_init(&fibptr->event_lock);
  134. hw_fib->header.XferState = cpu_to_le32(0xffffffff);
  135. hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
  136. fibptr->hw_fib_pa = hw_fib_pa;
  137. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
  138. dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
  139. hw_fib_pa = hw_fib_pa +
  140. dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
  141. }
  142. /*
  143. * Add the fib chain to the free list
  144. */
  145. dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
  146. /*
  147. * Enable this to debug out of queue space
  148. */
  149. dev->free_fib = &dev->fibs[0];
  150. return 0;
  151. }
  152. /**
  153. * aac_fib_alloc - allocate a fib
  154. * @dev: Adapter to allocate the fib for
  155. *
  156. * Allocate a fib from the adapter fib pool. If the pool is empty we
  157. * return NULL.
  158. */
  159. struct fib *aac_fib_alloc(struct aac_dev *dev)
  160. {
  161. struct fib * fibptr;
  162. unsigned long flags;
  163. spin_lock_irqsave(&dev->fib_lock, flags);
  164. fibptr = dev->free_fib;
  165. if(!fibptr){
  166. spin_unlock_irqrestore(&dev->fib_lock, flags);
  167. return fibptr;
  168. }
  169. dev->free_fib = fibptr->next;
  170. spin_unlock_irqrestore(&dev->fib_lock, flags);
  171. /*
  172. * Set the proper node type code and node byte size
  173. */
  174. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  175. fibptr->size = sizeof(struct fib);
  176. /*
  177. * Null out fields that depend on being zero at the start of
  178. * each I/O
  179. */
  180. fibptr->hw_fib_va->header.XferState = 0;
  181. fibptr->flags = 0;
  182. fibptr->callback = NULL;
  183. fibptr->callback_data = NULL;
  184. return fibptr;
  185. }
  186. /**
  187. * aac_fib_free - free a fib
  188. * @fibptr: fib to free up
  189. *
  190. * Frees up a fib and places it on the appropriate queue
  191. */
  192. void aac_fib_free(struct fib *fibptr)
  193. {
  194. unsigned long flags, flagsv;
  195. spin_lock_irqsave(&fibptr->event_lock, flagsv);
  196. if (fibptr->done == 2) {
  197. spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
  198. return;
  199. }
  200. spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
  201. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  202. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  203. aac_config.fib_timeouts++;
  204. if (fibptr->hw_fib_va->header.XferState != 0) {
  205. printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  206. (void*)fibptr,
  207. le32_to_cpu(fibptr->hw_fib_va->header.XferState));
  208. }
  209. fibptr->next = fibptr->dev->free_fib;
  210. fibptr->dev->free_fib = fibptr;
  211. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  212. }
  213. /**
  214. * aac_fib_init - initialise a fib
  215. * @fibptr: The fib to initialize
  216. *
  217. * Set up the generic fib fields ready for use
  218. */
  219. void aac_fib_init(struct fib *fibptr)
  220. {
  221. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  222. memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
  223. hw_fib->header.StructType = FIB_MAGIC;
  224. hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
  225. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  226. hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  227. hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
  228. }
  229. /**
  230. * fib_deallocate - deallocate a fib
  231. * @fibptr: fib to deallocate
  232. *
  233. * Will deallocate and return to the free pool the FIB pointed to by the
  234. * caller.
  235. */
  236. static void fib_dealloc(struct fib * fibptr)
  237. {
  238. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  239. hw_fib->header.XferState = 0;
  240. }
  241. /*
  242. * Commuication primitives define and support the queuing method we use to
  243. * support host to adapter commuication. All queue accesses happen through
  244. * these routines and are the only routines which have a knowledge of the
  245. * how these queues are implemented.
  246. */
  247. /**
  248. * aac_get_entry - get a queue entry
  249. * @dev: Adapter
  250. * @qid: Queue Number
  251. * @entry: Entry return
  252. * @index: Index return
  253. * @nonotify: notification control
  254. *
  255. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  256. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  257. * returned.
  258. */
  259. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  260. {
  261. struct aac_queue * q;
  262. unsigned long idx;
  263. /*
  264. * All of the queues wrap when they reach the end, so we check
  265. * to see if they have reached the end and if they have we just
  266. * set the index back to zero. This is a wrap. You could or off
  267. * the high bits in all updates but this is a bit faster I think.
  268. */
  269. q = &dev->queues->queue[qid];
  270. idx = *index = le32_to_cpu(*(q->headers.producer));
  271. /* Interrupt Moderation, only interrupt for first two entries */
  272. if (idx != le32_to_cpu(*(q->headers.consumer))) {
  273. if (--idx == 0) {
  274. if (qid == AdapNormCmdQueue)
  275. idx = ADAP_NORM_CMD_ENTRIES;
  276. else
  277. idx = ADAP_NORM_RESP_ENTRIES;
  278. }
  279. if (idx != le32_to_cpu(*(q->headers.consumer)))
  280. *nonotify = 1;
  281. }
  282. if (qid == AdapNormCmdQueue) {
  283. if (*index >= ADAP_NORM_CMD_ENTRIES)
  284. *index = 0; /* Wrap to front of the Producer Queue. */
  285. } else {
  286. if (*index >= ADAP_NORM_RESP_ENTRIES)
  287. *index = 0; /* Wrap to front of the Producer Queue. */
  288. }
  289. /* Queue is full */
  290. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
  291. printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
  292. qid, q->numpending);
  293. return 0;
  294. } else {
  295. *entry = q->base + *index;
  296. return 1;
  297. }
  298. }
  299. /**
  300. * aac_queue_get - get the next free QE
  301. * @dev: Adapter
  302. * @index: Returned index
  303. * @priority: Priority of fib
  304. * @fib: Fib to associate with the queue entry
  305. * @wait: Wait if queue full
  306. * @fibptr: Driver fib object to go with fib
  307. * @nonotify: Don't notify the adapter
  308. *
  309. * Gets the next free QE off the requested priorty adapter command
  310. * queue and associates the Fib with the QE. The QE represented by
  311. * index is ready to insert on the queue when this routine returns
  312. * success.
  313. */
  314. int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  315. {
  316. struct aac_entry * entry = NULL;
  317. int map = 0;
  318. if (qid == AdapNormCmdQueue) {
  319. /* if no entries wait for some if caller wants to */
  320. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  321. printk(KERN_ERR "GetEntries failed\n");
  322. }
  323. /*
  324. * Setup queue entry with a command, status and fib mapped
  325. */
  326. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  327. map = 1;
  328. } else {
  329. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  330. /* if no entries wait for some if caller wants to */
  331. }
  332. /*
  333. * Setup queue entry with command, status and fib mapped
  334. */
  335. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  336. entry->addr = hw_fib->header.SenderFibAddress;
  337. /* Restore adapters pointer to the FIB */
  338. hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  339. map = 0;
  340. }
  341. /*
  342. * If MapFib is true than we need to map the Fib and put pointers
  343. * in the queue entry.
  344. */
  345. if (map)
  346. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  347. return 0;
  348. }
  349. /*
  350. * Define the highest level of host to adapter communication routines.
  351. * These routines will support host to adapter FS commuication. These
  352. * routines have no knowledge of the commuication method used. This level
  353. * sends and receives FIBs. This level has no knowledge of how these FIBs
  354. * get passed back and forth.
  355. */
  356. /**
  357. * aac_fib_send - send a fib to the adapter
  358. * @command: Command to send
  359. * @fibptr: The fib
  360. * @size: Size of fib data area
  361. * @priority: Priority of Fib
  362. * @wait: Async/sync select
  363. * @reply: True if a reply is wanted
  364. * @callback: Called with reply
  365. * @callback_data: Passed to callback
  366. *
  367. * Sends the requested FIB to the adapter and optionally will wait for a
  368. * response FIB. If the caller does not wish to wait for a response than
  369. * an event to wait on must be supplied. This event will be set when a
  370. * response FIB is received from the adapter.
  371. */
  372. int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
  373. int priority, int wait, int reply, fib_callback callback,
  374. void *callback_data)
  375. {
  376. struct aac_dev * dev = fibptr->dev;
  377. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  378. unsigned long flags = 0;
  379. unsigned long qflags;
  380. unsigned long mflags = 0;
  381. unsigned long sflags = 0;
  382. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  383. return -EBUSY;
  384. /*
  385. * There are 5 cases with the wait and response requested flags.
  386. * The only invalid cases are if the caller requests to wait and
  387. * does not request a response and if the caller does not want a
  388. * response and the Fib is not allocated from pool. If a response
  389. * is not requesed the Fib will just be deallocaed by the DPC
  390. * routine when the response comes back from the adapter. No
  391. * further processing will be done besides deleting the Fib. We
  392. * will have a debug mode where the adapter can notify the host
  393. * it had a problem and the host can log that fact.
  394. */
  395. fibptr->flags = 0;
  396. if (wait && !reply) {
  397. return -EINVAL;
  398. } else if (!wait && reply) {
  399. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  400. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  401. } else if (!wait && !reply) {
  402. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  403. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  404. } else if (wait && reply) {
  405. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  406. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  407. }
  408. /*
  409. * Map the fib into 32bits by using the fib number
  410. */
  411. hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
  412. hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
  413. /*
  414. * Set FIB state to indicate where it came from and if we want a
  415. * response from the adapter. Also load the command from the
  416. * caller.
  417. *
  418. * Map the hw fib pointer as a 32bit value
  419. */
  420. hw_fib->header.Command = cpu_to_le16(command);
  421. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  422. /*
  423. * Set the size of the Fib we want to send to the adapter
  424. */
  425. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  426. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  427. return -EMSGSIZE;
  428. }
  429. /*
  430. * Get a queue entry connect the FIB to it and send an notify
  431. * the adapter a command is ready.
  432. */
  433. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  434. /*
  435. * Fill in the Callback and CallbackContext if we are not
  436. * going to wait.
  437. */
  438. if (!wait) {
  439. fibptr->callback = callback;
  440. fibptr->callback_data = callback_data;
  441. fibptr->flags = FIB_CONTEXT_FLAG;
  442. }
  443. fibptr->done = 0;
  444. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  445. dprintk((KERN_DEBUG "Fib contents:.\n"));
  446. dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
  447. dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
  448. dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
  449. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
  450. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  451. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  452. if (!dev->queues)
  453. return -EBUSY;
  454. if (wait) {
  455. spin_lock_irqsave(&dev->manage_lock, mflags);
  456. if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
  457. printk(KERN_INFO "No management Fibs Available:%d\n",
  458. dev->management_fib_count);
  459. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  460. return -EBUSY;
  461. }
  462. dev->management_fib_count++;
  463. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  464. spin_lock_irqsave(&fibptr->event_lock, flags);
  465. }
  466. if (dev->sync_mode) {
  467. if (wait)
  468. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  469. spin_lock_irqsave(&dev->sync_lock, sflags);
  470. if (dev->sync_fib) {
  471. list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
  472. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  473. } else {
  474. dev->sync_fib = fibptr;
  475. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  476. aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
  477. (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
  478. NULL, NULL, NULL, NULL, NULL);
  479. }
  480. if (wait) {
  481. fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
  482. if (down_interruptible(&fibptr->event_wait)) {
  483. fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
  484. return -EFAULT;
  485. }
  486. return 0;
  487. }
  488. return -EINPROGRESS;
  489. }
  490. if (aac_adapter_deliver(fibptr) != 0) {
  491. printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
  492. if (wait) {
  493. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  494. spin_lock_irqsave(&dev->manage_lock, mflags);
  495. dev->management_fib_count--;
  496. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  497. }
  498. return -EBUSY;
  499. }
  500. /*
  501. * If the caller wanted us to wait for response wait now.
  502. */
  503. if (wait) {
  504. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  505. /* Only set for first known interruptable command */
  506. if (wait < 0) {
  507. /*
  508. * *VERY* Dangerous to time out a command, the
  509. * assumption is made that we have no hope of
  510. * functioning because an interrupt routing or other
  511. * hardware failure has occurred.
  512. */
  513. unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
  514. while (down_trylock(&fibptr->event_wait)) {
  515. int blink;
  516. if (time_is_before_eq_jiffies(timeout)) {
  517. struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
  518. spin_lock_irqsave(q->lock, qflags);
  519. q->numpending--;
  520. spin_unlock_irqrestore(q->lock, qflags);
  521. if (wait == -1) {
  522. printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
  523. "Usually a result of a PCI interrupt routing problem;\n"
  524. "update mother board BIOS or consider utilizing one of\n"
  525. "the SAFE mode kernel options (acpi, apic etc)\n");
  526. }
  527. return -ETIMEDOUT;
  528. }
  529. if ((blink = aac_adapter_check_health(dev)) > 0) {
  530. if (wait == -1) {
  531. printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
  532. "Usually a result of a serious unrecoverable hardware problem\n",
  533. blink);
  534. }
  535. return -EFAULT;
  536. }
  537. /* We used to udelay() here but that absorbed
  538. * a CPU when a timeout occured. Not very
  539. * useful. */
  540. cpu_relax();
  541. }
  542. } else if (down_interruptible(&fibptr->event_wait)) {
  543. /* Do nothing ... satisfy
  544. * down_interruptible must_check */
  545. }
  546. spin_lock_irqsave(&fibptr->event_lock, flags);
  547. if (fibptr->done == 0) {
  548. fibptr->done = 2; /* Tell interrupt we aborted */
  549. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  550. return -ERESTARTSYS;
  551. }
  552. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  553. BUG_ON(fibptr->done == 0);
  554. if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  555. return -ETIMEDOUT;
  556. return 0;
  557. }
  558. /*
  559. * If the user does not want a response than return success otherwise
  560. * return pending
  561. */
  562. if (reply)
  563. return -EINPROGRESS;
  564. else
  565. return 0;
  566. }
  567. /**
  568. * aac_consumer_get - get the top of the queue
  569. * @dev: Adapter
  570. * @q: Queue
  571. * @entry: Return entry
  572. *
  573. * Will return a pointer to the entry on the top of the queue requested that
  574. * we are a consumer of, and return the address of the queue entry. It does
  575. * not change the state of the queue.
  576. */
  577. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  578. {
  579. u32 index;
  580. int status;
  581. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  582. status = 0;
  583. } else {
  584. /*
  585. * The consumer index must be wrapped if we have reached
  586. * the end of the queue, else we just use the entry
  587. * pointed to by the header index
  588. */
  589. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  590. index = 0;
  591. else
  592. index = le32_to_cpu(*q->headers.consumer);
  593. *entry = q->base + index;
  594. status = 1;
  595. }
  596. return(status);
  597. }
  598. /**
  599. * aac_consumer_free - free consumer entry
  600. * @dev: Adapter
  601. * @q: Queue
  602. * @qid: Queue ident
  603. *
  604. * Frees up the current top of the queue we are a consumer of. If the
  605. * queue was full notify the producer that the queue is no longer full.
  606. */
  607. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  608. {
  609. int wasfull = 0;
  610. u32 notify;
  611. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  612. wasfull = 1;
  613. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  614. *q->headers.consumer = cpu_to_le32(1);
  615. else
  616. le32_add_cpu(q->headers.consumer, 1);
  617. if (wasfull) {
  618. switch (qid) {
  619. case HostNormCmdQueue:
  620. notify = HostNormCmdNotFull;
  621. break;
  622. case HostNormRespQueue:
  623. notify = HostNormRespNotFull;
  624. break;
  625. default:
  626. BUG();
  627. return;
  628. }
  629. aac_adapter_notify(dev, notify);
  630. }
  631. }
  632. /**
  633. * aac_fib_adapter_complete - complete adapter issued fib
  634. * @fibptr: fib to complete
  635. * @size: size of fib
  636. *
  637. * Will do all necessary work to complete a FIB that was sent from
  638. * the adapter.
  639. */
  640. int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
  641. {
  642. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  643. struct aac_dev * dev = fibptr->dev;
  644. struct aac_queue * q;
  645. unsigned long nointr = 0;
  646. unsigned long qflags;
  647. if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
  648. dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
  649. kfree(hw_fib);
  650. return 0;
  651. }
  652. if (hw_fib->header.XferState == 0) {
  653. if (dev->comm_interface == AAC_COMM_MESSAGE)
  654. kfree(hw_fib);
  655. return 0;
  656. }
  657. /*
  658. * If we plan to do anything check the structure type first.
  659. */
  660. if (hw_fib->header.StructType != FIB_MAGIC &&
  661. hw_fib->header.StructType != FIB_MAGIC2 &&
  662. hw_fib->header.StructType != FIB_MAGIC2_64) {
  663. if (dev->comm_interface == AAC_COMM_MESSAGE)
  664. kfree(hw_fib);
  665. return -EINVAL;
  666. }
  667. /*
  668. * This block handles the case where the adapter had sent us a
  669. * command and we have finished processing the command. We
  670. * call completeFib when we are done processing the command
  671. * and want to send a response back to the adapter. This will
  672. * send the completed cdb to the adapter.
  673. */
  674. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  675. if (dev->comm_interface == AAC_COMM_MESSAGE) {
  676. kfree (hw_fib);
  677. } else {
  678. u32 index;
  679. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  680. if (size) {
  681. size += sizeof(struct aac_fibhdr);
  682. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  683. return -EMSGSIZE;
  684. hw_fib->header.Size = cpu_to_le16(size);
  685. }
  686. q = &dev->queues->queue[AdapNormRespQueue];
  687. spin_lock_irqsave(q->lock, qflags);
  688. aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
  689. *(q->headers.producer) = cpu_to_le32(index + 1);
  690. spin_unlock_irqrestore(q->lock, qflags);
  691. if (!(nointr & (int)aac_config.irq_mod))
  692. aac_adapter_notify(dev, AdapNormRespQueue);
  693. }
  694. } else {
  695. printk(KERN_WARNING "aac_fib_adapter_complete: "
  696. "Unknown xferstate detected.\n");
  697. BUG();
  698. }
  699. return 0;
  700. }
  701. /**
  702. * aac_fib_complete - fib completion handler
  703. * @fib: FIB to complete
  704. *
  705. * Will do all necessary work to complete a FIB.
  706. */
  707. int aac_fib_complete(struct fib *fibptr)
  708. {
  709. unsigned long flags;
  710. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  711. /*
  712. * Check for a fib which has already been completed
  713. */
  714. if (hw_fib->header.XferState == 0)
  715. return 0;
  716. /*
  717. * If we plan to do anything check the structure type first.
  718. */
  719. if (hw_fib->header.StructType != FIB_MAGIC &&
  720. hw_fib->header.StructType != FIB_MAGIC2 &&
  721. hw_fib->header.StructType != FIB_MAGIC2_64)
  722. return -EINVAL;
  723. /*
  724. * This block completes a cdb which orginated on the host and we
  725. * just need to deallocate the cdb or reinit it. At this point the
  726. * command is complete that we had sent to the adapter and this
  727. * cdb could be reused.
  728. */
  729. spin_lock_irqsave(&fibptr->event_lock, flags);
  730. if (fibptr->done == 2) {
  731. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  732. return 0;
  733. }
  734. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  735. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  736. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  737. {
  738. fib_dealloc(fibptr);
  739. }
  740. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  741. {
  742. /*
  743. * This handles the case when the host has aborted the I/O
  744. * to the adapter because the adapter is not responding
  745. */
  746. fib_dealloc(fibptr);
  747. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  748. fib_dealloc(fibptr);
  749. } else {
  750. BUG();
  751. }
  752. return 0;
  753. }
  754. /**
  755. * aac_printf - handle printf from firmware
  756. * @dev: Adapter
  757. * @val: Message info
  758. *
  759. * Print a message passed to us by the controller firmware on the
  760. * Adaptec board
  761. */
  762. void aac_printf(struct aac_dev *dev, u32 val)
  763. {
  764. char *cp = dev->printfbuf;
  765. if (dev->printf_enabled)
  766. {
  767. int length = val & 0xffff;
  768. int level = (val >> 16) & 0xffff;
  769. /*
  770. * The size of the printfbuf is set in port.c
  771. * There is no variable or define for it
  772. */
  773. if (length > 255)
  774. length = 255;
  775. if (cp[length] != 0)
  776. cp[length] = 0;
  777. if (level == LOG_AAC_HIGH_ERROR)
  778. printk(KERN_WARNING "%s:%s", dev->name, cp);
  779. else
  780. printk(KERN_INFO "%s:%s", dev->name, cp);
  781. }
  782. memset(cp, 0, 256);
  783. }
  784. /**
  785. * aac_handle_aif - Handle a message from the firmware
  786. * @dev: Which adapter this fib is from
  787. * @fibptr: Pointer to fibptr from adapter
  788. *
  789. * This routine handles a driver notify fib from the adapter and
  790. * dispatches it to the appropriate routine for handling.
  791. */
  792. #define AIF_SNIFF_TIMEOUT (30*HZ)
  793. static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  794. {
  795. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  796. struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
  797. u32 channel, id, lun, container;
  798. struct scsi_device *device;
  799. enum {
  800. NOTHING,
  801. DELETE,
  802. ADD,
  803. CHANGE
  804. } device_config_needed = NOTHING;
  805. /* Sniff for container changes */
  806. if (!dev || !dev->fsa_dev)
  807. return;
  808. container = channel = id = lun = (u32)-1;
  809. /*
  810. * We have set this up to try and minimize the number of
  811. * re-configures that take place. As a result of this when
  812. * certain AIF's come in we will set a flag waiting for another
  813. * type of AIF before setting the re-config flag.
  814. */
  815. switch (le32_to_cpu(aifcmd->command)) {
  816. case AifCmdDriverNotify:
  817. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  818. /*
  819. * Morph or Expand complete
  820. */
  821. case AifDenMorphComplete:
  822. case AifDenVolumeExtendComplete:
  823. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  824. if (container >= dev->maximum_num_containers)
  825. break;
  826. /*
  827. * Find the scsi_device associated with the SCSI
  828. * address. Make sure we have the right array, and if
  829. * so set the flag to initiate a new re-config once we
  830. * see an AifEnConfigChange AIF come through.
  831. */
  832. if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
  833. device = scsi_device_lookup(dev->scsi_host_ptr,
  834. CONTAINER_TO_CHANNEL(container),
  835. CONTAINER_TO_ID(container),
  836. CONTAINER_TO_LUN(container));
  837. if (device) {
  838. dev->fsa_dev[container].config_needed = CHANGE;
  839. dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
  840. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  841. scsi_device_put(device);
  842. }
  843. }
  844. }
  845. /*
  846. * If we are waiting on something and this happens to be
  847. * that thing then set the re-configure flag.
  848. */
  849. if (container != (u32)-1) {
  850. if (container >= dev->maximum_num_containers)
  851. break;
  852. if ((dev->fsa_dev[container].config_waiting_on ==
  853. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  854. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  855. dev->fsa_dev[container].config_waiting_on = 0;
  856. } else for (container = 0;
  857. container < dev->maximum_num_containers; ++container) {
  858. if ((dev->fsa_dev[container].config_waiting_on ==
  859. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  860. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  861. dev->fsa_dev[container].config_waiting_on = 0;
  862. }
  863. break;
  864. case AifCmdEventNotify:
  865. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  866. case AifEnBatteryEvent:
  867. dev->cache_protected =
  868. (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
  869. break;
  870. /*
  871. * Add an Array.
  872. */
  873. case AifEnAddContainer:
  874. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  875. if (container >= dev->maximum_num_containers)
  876. break;
  877. dev->fsa_dev[container].config_needed = ADD;
  878. dev->fsa_dev[container].config_waiting_on =
  879. AifEnConfigChange;
  880. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  881. break;
  882. /*
  883. * Delete an Array.
  884. */
  885. case AifEnDeleteContainer:
  886. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  887. if (container >= dev->maximum_num_containers)
  888. break;
  889. dev->fsa_dev[container].config_needed = DELETE;
  890. dev->fsa_dev[container].config_waiting_on =
  891. AifEnConfigChange;
  892. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  893. break;
  894. /*
  895. * Container change detected. If we currently are not
  896. * waiting on something else, setup to wait on a Config Change.
  897. */
  898. case AifEnContainerChange:
  899. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  900. if (container >= dev->maximum_num_containers)
  901. break;
  902. if (dev->fsa_dev[container].config_waiting_on &&
  903. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  904. break;
  905. dev->fsa_dev[container].config_needed = CHANGE;
  906. dev->fsa_dev[container].config_waiting_on =
  907. AifEnConfigChange;
  908. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  909. break;
  910. case AifEnConfigChange:
  911. break;
  912. case AifEnAddJBOD:
  913. case AifEnDeleteJBOD:
  914. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  915. if ((container >> 28)) {
  916. container = (u32)-1;
  917. break;
  918. }
  919. channel = (container >> 24) & 0xF;
  920. if (channel >= dev->maximum_num_channels) {
  921. container = (u32)-1;
  922. break;
  923. }
  924. id = container & 0xFFFF;
  925. if (id >= dev->maximum_num_physicals) {
  926. container = (u32)-1;
  927. break;
  928. }
  929. lun = (container >> 16) & 0xFF;
  930. container = (u32)-1;
  931. channel = aac_phys_to_logical(channel);
  932. device_config_needed =
  933. (((__le32 *)aifcmd->data)[0] ==
  934. cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
  935. if (device_config_needed == ADD) {
  936. device = scsi_device_lookup(dev->scsi_host_ptr,
  937. channel,
  938. id,
  939. lun);
  940. if (device) {
  941. scsi_remove_device(device);
  942. scsi_device_put(device);
  943. }
  944. }
  945. break;
  946. case AifEnEnclosureManagement:
  947. /*
  948. * If in JBOD mode, automatic exposure of new
  949. * physical target to be suppressed until configured.
  950. */
  951. if (dev->jbod)
  952. break;
  953. switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
  954. case EM_DRIVE_INSERTION:
  955. case EM_DRIVE_REMOVAL:
  956. container = le32_to_cpu(
  957. ((__le32 *)aifcmd->data)[2]);
  958. if ((container >> 28)) {
  959. container = (u32)-1;
  960. break;
  961. }
  962. channel = (container >> 24) & 0xF;
  963. if (channel >= dev->maximum_num_channels) {
  964. container = (u32)-1;
  965. break;
  966. }
  967. id = container & 0xFFFF;
  968. lun = (container >> 16) & 0xFF;
  969. container = (u32)-1;
  970. if (id >= dev->maximum_num_physicals) {
  971. /* legacy dev_t ? */
  972. if ((0x2000 <= id) || lun || channel ||
  973. ((channel = (id >> 7) & 0x3F) >=
  974. dev->maximum_num_channels))
  975. break;
  976. lun = (id >> 4) & 7;
  977. id &= 0xF;
  978. }
  979. channel = aac_phys_to_logical(channel);
  980. device_config_needed =
  981. (((__le32 *)aifcmd->data)[3]
  982. == cpu_to_le32(EM_DRIVE_INSERTION)) ?
  983. ADD : DELETE;
  984. break;
  985. }
  986. break;
  987. }
  988. /*
  989. * If we are waiting on something and this happens to be
  990. * that thing then set the re-configure flag.
  991. */
  992. if (container != (u32)-1) {
  993. if (container >= dev->maximum_num_containers)
  994. break;
  995. if ((dev->fsa_dev[container].config_waiting_on ==
  996. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  997. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  998. dev->fsa_dev[container].config_waiting_on = 0;
  999. } else for (container = 0;
  1000. container < dev->maximum_num_containers; ++container) {
  1001. if ((dev->fsa_dev[container].config_waiting_on ==
  1002. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1003. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1004. dev->fsa_dev[container].config_waiting_on = 0;
  1005. }
  1006. break;
  1007. case AifCmdJobProgress:
  1008. /*
  1009. * These are job progress AIF's. When a Clear is being
  1010. * done on a container it is initially created then hidden from
  1011. * the OS. When the clear completes we don't get a config
  1012. * change so we monitor the job status complete on a clear then
  1013. * wait for a container change.
  1014. */
  1015. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1016. (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
  1017. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
  1018. for (container = 0;
  1019. container < dev->maximum_num_containers;
  1020. ++container) {
  1021. /*
  1022. * Stomp on all config sequencing for all
  1023. * containers?
  1024. */
  1025. dev->fsa_dev[container].config_waiting_on =
  1026. AifEnContainerChange;
  1027. dev->fsa_dev[container].config_needed = ADD;
  1028. dev->fsa_dev[container].config_waiting_stamp =
  1029. jiffies;
  1030. }
  1031. }
  1032. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1033. ((__le32 *)aifcmd->data)[6] == 0 &&
  1034. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
  1035. for (container = 0;
  1036. container < dev->maximum_num_containers;
  1037. ++container) {
  1038. /*
  1039. * Stomp on all config sequencing for all
  1040. * containers?
  1041. */
  1042. dev->fsa_dev[container].config_waiting_on =
  1043. AifEnContainerChange;
  1044. dev->fsa_dev[container].config_needed = DELETE;
  1045. dev->fsa_dev[container].config_waiting_stamp =
  1046. jiffies;
  1047. }
  1048. }
  1049. break;
  1050. }
  1051. container = 0;
  1052. retry_next:
  1053. if (device_config_needed == NOTHING)
  1054. for (; container < dev->maximum_num_containers; ++container) {
  1055. if ((dev->fsa_dev[container].config_waiting_on == 0) &&
  1056. (dev->fsa_dev[container].config_needed != NOTHING) &&
  1057. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
  1058. device_config_needed =
  1059. dev->fsa_dev[container].config_needed;
  1060. dev->fsa_dev[container].config_needed = NOTHING;
  1061. channel = CONTAINER_TO_CHANNEL(container);
  1062. id = CONTAINER_TO_ID(container);
  1063. lun = CONTAINER_TO_LUN(container);
  1064. break;
  1065. }
  1066. }
  1067. if (device_config_needed == NOTHING)
  1068. return;
  1069. /*
  1070. * If we decided that a re-configuration needs to be done,
  1071. * schedule it here on the way out the door, please close the door
  1072. * behind you.
  1073. */
  1074. /*
  1075. * Find the scsi_device associated with the SCSI address,
  1076. * and mark it as changed, invalidating the cache. This deals
  1077. * with changes to existing device IDs.
  1078. */
  1079. if (!dev || !dev->scsi_host_ptr)
  1080. return;
  1081. /*
  1082. * force reload of disk info via aac_probe_container
  1083. */
  1084. if ((channel == CONTAINER_CHANNEL) &&
  1085. (device_config_needed != NOTHING)) {
  1086. if (dev->fsa_dev[container].valid == 1)
  1087. dev->fsa_dev[container].valid = 2;
  1088. aac_probe_container(dev, container);
  1089. }
  1090. device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
  1091. if (device) {
  1092. switch (device_config_needed) {
  1093. case DELETE:
  1094. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1095. scsi_remove_device(device);
  1096. #else
  1097. if (scsi_device_online(device)) {
  1098. scsi_device_set_state(device, SDEV_OFFLINE);
  1099. sdev_printk(KERN_INFO, device,
  1100. "Device offlined - %s\n",
  1101. (channel == CONTAINER_CHANNEL) ?
  1102. "array deleted" :
  1103. "enclosure services event");
  1104. }
  1105. #endif
  1106. break;
  1107. case ADD:
  1108. if (!scsi_device_online(device)) {
  1109. sdev_printk(KERN_INFO, device,
  1110. "Device online - %s\n",
  1111. (channel == CONTAINER_CHANNEL) ?
  1112. "array created" :
  1113. "enclosure services event");
  1114. scsi_device_set_state(device, SDEV_RUNNING);
  1115. }
  1116. /* FALLTHRU */
  1117. case CHANGE:
  1118. if ((channel == CONTAINER_CHANNEL)
  1119. && (!dev->fsa_dev[container].valid)) {
  1120. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1121. scsi_remove_device(device);
  1122. #else
  1123. if (!scsi_device_online(device))
  1124. break;
  1125. scsi_device_set_state(device, SDEV_OFFLINE);
  1126. sdev_printk(KERN_INFO, device,
  1127. "Device offlined - %s\n",
  1128. "array failed");
  1129. #endif
  1130. break;
  1131. }
  1132. scsi_rescan_device(&device->sdev_gendev);
  1133. default:
  1134. break;
  1135. }
  1136. scsi_device_put(device);
  1137. device_config_needed = NOTHING;
  1138. }
  1139. if (device_config_needed == ADD)
  1140. scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
  1141. if (channel == CONTAINER_CHANNEL) {
  1142. container++;
  1143. device_config_needed = NOTHING;
  1144. goto retry_next;
  1145. }
  1146. }
  1147. static int _aac_reset_adapter(struct aac_dev *aac, int forced)
  1148. {
  1149. int index, quirks;
  1150. int retval;
  1151. struct Scsi_Host *host;
  1152. struct scsi_device *dev;
  1153. struct scsi_cmnd *command;
  1154. struct scsi_cmnd *command_list;
  1155. int jafo = 0;
  1156. /*
  1157. * Assumptions:
  1158. * - host is locked, unless called by the aacraid thread.
  1159. * (a matter of convenience, due to legacy issues surrounding
  1160. * eh_host_adapter_reset).
  1161. * - in_reset is asserted, so no new i/o is getting to the
  1162. * card.
  1163. * - The card is dead, or will be very shortly ;-/ so no new
  1164. * commands are completing in the interrupt service.
  1165. */
  1166. host = aac->scsi_host_ptr;
  1167. scsi_block_requests(host);
  1168. aac_adapter_disable_int(aac);
  1169. if (aac->thread->pid != current->pid) {
  1170. spin_unlock_irq(host->host_lock);
  1171. kthread_stop(aac->thread);
  1172. jafo = 1;
  1173. }
  1174. /*
  1175. * If a positive health, means in a known DEAD PANIC
  1176. * state and the adapter could be reset to `try again'.
  1177. */
  1178. retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
  1179. if (retval)
  1180. goto out;
  1181. /*
  1182. * Loop through the fibs, close the synchronous FIBS
  1183. */
  1184. for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
  1185. struct fib *fib = &aac->fibs[index];
  1186. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1187. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
  1188. unsigned long flagv;
  1189. spin_lock_irqsave(&fib->event_lock, flagv);
  1190. up(&fib->event_wait);
  1191. spin_unlock_irqrestore(&fib->event_lock, flagv);
  1192. schedule();
  1193. retval = 0;
  1194. }
  1195. }
  1196. /* Give some extra time for ioctls to complete. */
  1197. if (retval == 0)
  1198. ssleep(2);
  1199. index = aac->cardtype;
  1200. /*
  1201. * Re-initialize the adapter, first free resources, then carefully
  1202. * apply the initialization sequence to come back again. Only risk
  1203. * is a change in Firmware dropping cache, it is assumed the caller
  1204. * will ensure that i/o is queisced and the card is flushed in that
  1205. * case.
  1206. */
  1207. aac_fib_map_free(aac);
  1208. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
  1209. aac->comm_addr = NULL;
  1210. aac->comm_phys = 0;
  1211. kfree(aac->queues);
  1212. aac->queues = NULL;
  1213. free_irq(aac->pdev->irq, aac);
  1214. if (aac->msi)
  1215. pci_disable_msi(aac->pdev);
  1216. kfree(aac->fsa_dev);
  1217. aac->fsa_dev = NULL;
  1218. quirks = aac_get_driver_ident(index)->quirks;
  1219. if (quirks & AAC_QUIRK_31BIT) {
  1220. if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
  1221. ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
  1222. goto out;
  1223. } else {
  1224. if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
  1225. ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
  1226. goto out;
  1227. }
  1228. if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
  1229. goto out;
  1230. if (quirks & AAC_QUIRK_31BIT)
  1231. if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
  1232. goto out;
  1233. if (jafo) {
  1234. aac->thread = kthread_run(aac_command_thread, aac, aac->name);
  1235. if (IS_ERR(aac->thread)) {
  1236. retval = PTR_ERR(aac->thread);
  1237. goto out;
  1238. }
  1239. }
  1240. (void)aac_get_adapter_info(aac);
  1241. if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
  1242. host->sg_tablesize = 34;
  1243. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1244. }
  1245. if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
  1246. host->sg_tablesize = 17;
  1247. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1248. }
  1249. aac_get_config_status(aac, 1);
  1250. aac_get_containers(aac);
  1251. /*
  1252. * This is where the assumption that the Adapter is quiesced
  1253. * is important.
  1254. */
  1255. command_list = NULL;
  1256. __shost_for_each_device(dev, host) {
  1257. unsigned long flags;
  1258. spin_lock_irqsave(&dev->list_lock, flags);
  1259. list_for_each_entry(command, &dev->cmd_list, list)
  1260. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1261. command->SCp.buffer = (struct scatterlist *)command_list;
  1262. command_list = command;
  1263. }
  1264. spin_unlock_irqrestore(&dev->list_lock, flags);
  1265. }
  1266. while ((command = command_list)) {
  1267. command_list = (struct scsi_cmnd *)command->SCp.buffer;
  1268. command->SCp.buffer = NULL;
  1269. command->result = DID_OK << 16
  1270. | COMMAND_COMPLETE << 8
  1271. | SAM_STAT_TASK_SET_FULL;
  1272. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  1273. command->scsi_done(command);
  1274. }
  1275. retval = 0;
  1276. out:
  1277. aac->in_reset = 0;
  1278. scsi_unblock_requests(host);
  1279. if (jafo) {
  1280. spin_lock_irq(host->host_lock);
  1281. }
  1282. return retval;
  1283. }
  1284. int aac_reset_adapter(struct aac_dev * aac, int forced)
  1285. {
  1286. unsigned long flagv = 0;
  1287. int retval;
  1288. struct Scsi_Host * host;
  1289. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1290. return -EBUSY;
  1291. if (aac->in_reset) {
  1292. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1293. return -EBUSY;
  1294. }
  1295. aac->in_reset = 1;
  1296. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1297. /*
  1298. * Wait for all commands to complete to this specific
  1299. * target (block maximum 60 seconds). Although not necessary,
  1300. * it does make us a good storage citizen.
  1301. */
  1302. host = aac->scsi_host_ptr;
  1303. scsi_block_requests(host);
  1304. if (forced < 2) for (retval = 60; retval; --retval) {
  1305. struct scsi_device * dev;
  1306. struct scsi_cmnd * command;
  1307. int active = 0;
  1308. __shost_for_each_device(dev, host) {
  1309. spin_lock_irqsave(&dev->list_lock, flagv);
  1310. list_for_each_entry(command, &dev->cmd_list, list) {
  1311. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1312. active++;
  1313. break;
  1314. }
  1315. }
  1316. spin_unlock_irqrestore(&dev->list_lock, flagv);
  1317. if (active)
  1318. break;
  1319. }
  1320. /*
  1321. * We can exit If all the commands are complete
  1322. */
  1323. if (active == 0)
  1324. break;
  1325. ssleep(1);
  1326. }
  1327. /* Quiesce build, flush cache, write through mode */
  1328. if (forced < 2)
  1329. aac_send_shutdown(aac);
  1330. spin_lock_irqsave(host->host_lock, flagv);
  1331. retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
  1332. spin_unlock_irqrestore(host->host_lock, flagv);
  1333. if ((forced < 2) && (retval == -ENODEV)) {
  1334. /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
  1335. struct fib * fibctx = aac_fib_alloc(aac);
  1336. if (fibctx) {
  1337. struct aac_pause *cmd;
  1338. int status;
  1339. aac_fib_init(fibctx);
  1340. cmd = (struct aac_pause *) fib_data(fibctx);
  1341. cmd->command = cpu_to_le32(VM_ContainerConfig);
  1342. cmd->type = cpu_to_le32(CT_PAUSE_IO);
  1343. cmd->timeout = cpu_to_le32(1);
  1344. cmd->min = cpu_to_le32(1);
  1345. cmd->noRescan = cpu_to_le32(1);
  1346. cmd->count = cpu_to_le32(0);
  1347. status = aac_fib_send(ContainerCommand,
  1348. fibctx,
  1349. sizeof(struct aac_pause),
  1350. FsaNormal,
  1351. -2 /* Timeout silently */, 1,
  1352. NULL, NULL);
  1353. if (status >= 0)
  1354. aac_fib_complete(fibctx);
  1355. /* FIB should be freed only after getting
  1356. * the response from the F/W */
  1357. if (status != -ERESTARTSYS)
  1358. aac_fib_free(fibctx);
  1359. }
  1360. }
  1361. return retval;
  1362. }
  1363. int aac_check_health(struct aac_dev * aac)
  1364. {
  1365. int BlinkLED;
  1366. unsigned long time_now, flagv = 0;
  1367. struct list_head * entry;
  1368. struct Scsi_Host * host;
  1369. /* Extending the scope of fib_lock slightly to protect aac->in_reset */
  1370. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1371. return 0;
  1372. if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
  1373. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1374. return 0; /* OK */
  1375. }
  1376. aac->in_reset = 1;
  1377. /* Fake up an AIF:
  1378. * aac_aifcmd.command = AifCmdEventNotify = 1
  1379. * aac_aifcmd.seqnum = 0xFFFFFFFF
  1380. * aac_aifcmd.data[0] = AifEnExpEvent = 23
  1381. * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
  1382. * aac.aifcmd.data[2] = AifHighPriority = 3
  1383. * aac.aifcmd.data[3] = BlinkLED
  1384. */
  1385. time_now = jiffies/HZ;
  1386. entry = aac->fib_list.next;
  1387. /*
  1388. * For each Context that is on the
  1389. * fibctxList, make a copy of the
  1390. * fib, and then set the event to wake up the
  1391. * thread that is waiting for it.
  1392. */
  1393. while (entry != &aac->fib_list) {
  1394. /*
  1395. * Extract the fibctx
  1396. */
  1397. struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
  1398. struct hw_fib * hw_fib;
  1399. struct fib * fib;
  1400. /*
  1401. * Check if the queue is getting
  1402. * backlogged
  1403. */
  1404. if (fibctx->count > 20) {
  1405. /*
  1406. * It's *not* jiffies folks,
  1407. * but jiffies / HZ, so do not
  1408. * panic ...
  1409. */
  1410. u32 time_last = fibctx->jiffies;
  1411. /*
  1412. * Has it been > 2 minutes
  1413. * since the last read off
  1414. * the queue?
  1415. */
  1416. if ((time_now - time_last) > aif_timeout) {
  1417. entry = entry->next;
  1418. aac_close_fib_context(aac, fibctx);
  1419. continue;
  1420. }
  1421. }
  1422. /*
  1423. * Warning: no sleep allowed while
  1424. * holding spinlock
  1425. */
  1426. hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  1427. fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
  1428. if (fib && hw_fib) {
  1429. struct aac_aifcmd * aif;
  1430. fib->hw_fib_va = hw_fib;
  1431. fib->dev = aac;
  1432. aac_fib_init(fib);
  1433. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1434. fib->size = sizeof (struct fib);
  1435. fib->data = hw_fib->data;
  1436. aif = (struct aac_aifcmd *)hw_fib->data;
  1437. aif->command = cpu_to_le32(AifCmdEventNotify);
  1438. aif->seqnum = cpu_to_le32(0xFFFFFFFF);
  1439. ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
  1440. ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
  1441. ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
  1442. ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
  1443. /*
  1444. * Put the FIB onto the
  1445. * fibctx's fibs
  1446. */
  1447. list_add_tail(&fib->fiblink, &fibctx->fib_list);
  1448. fibctx->count++;
  1449. /*
  1450. * Set the event to wake up the
  1451. * thread that will waiting.
  1452. */
  1453. up(&fibctx->wait_sem);
  1454. } else {
  1455. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1456. kfree(fib);
  1457. kfree(hw_fib);
  1458. }
  1459. entry = entry->next;
  1460. }
  1461. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1462. if (BlinkLED < 0) {
  1463. printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
  1464. goto out;
  1465. }
  1466. printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
  1467. if (!aac_check_reset || ((aac_check_reset == 1) &&
  1468. (aac->supplement_adapter_info.SupportedOptions2 &
  1469. AAC_OPTION_IGNORE_RESET)))
  1470. goto out;
  1471. host = aac->scsi_host_ptr;
  1472. if (aac->thread->pid != current->pid)
  1473. spin_lock_irqsave(host->host_lock, flagv);
  1474. BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
  1475. if (aac->thread->pid != current->pid)
  1476. spin_unlock_irqrestore(host->host_lock, flagv);
  1477. return BlinkLED;
  1478. out:
  1479. aac->in_reset = 0;
  1480. return BlinkLED;
  1481. }
  1482. /**
  1483. * aac_command_thread - command processing thread
  1484. * @dev: Adapter to monitor
  1485. *
  1486. * Waits on the commandready event in it's queue. When the event gets set
  1487. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  1488. * until the queue is empty. When the queue is empty it will wait for
  1489. * more FIBs.
  1490. */
  1491. int aac_command_thread(void *data)
  1492. {
  1493. struct aac_dev *dev = data;
  1494. struct hw_fib *hw_fib, *hw_newfib;
  1495. struct fib *fib, *newfib;
  1496. struct aac_fib_context *fibctx;
  1497. unsigned long flags;
  1498. DECLARE_WAITQUEUE(wait, current);
  1499. unsigned long next_jiffies = jiffies + HZ;
  1500. unsigned long next_check_jiffies = next_jiffies;
  1501. long difference = HZ;
  1502. /*
  1503. * We can only have one thread per adapter for AIF's.
  1504. */
  1505. if (dev->aif_thread)
  1506. return -EINVAL;
  1507. /*
  1508. * Let the DPC know it has a place to send the AIF's to.
  1509. */
  1510. dev->aif_thread = 1;
  1511. add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  1512. set_current_state(TASK_INTERRUPTIBLE);
  1513. dprintk ((KERN_INFO "aac_command_thread start\n"));
  1514. while (1) {
  1515. spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1516. while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
  1517. struct list_head *entry;
  1518. struct aac_aifcmd * aifcmd;
  1519. set_current_state(TASK_RUNNING);
  1520. entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
  1521. list_del(entry);
  1522. spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1523. fib = list_entry(entry, struct fib, fiblink);
  1524. /*
  1525. * We will process the FIB here or pass it to a
  1526. * worker thread that is TBD. We Really can't
  1527. * do anything at this point since we don't have
  1528. * anything defined for this thread to do.
  1529. */
  1530. hw_fib = fib->hw_fib_va;
  1531. memset(fib, 0, sizeof(struct fib));
  1532. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1533. fib->size = sizeof(struct fib);
  1534. fib->hw_fib_va = hw_fib;
  1535. fib->data = hw_fib->data;
  1536. fib->dev = dev;
  1537. /*
  1538. * We only handle AifRequest fibs from the adapter.
  1539. */
  1540. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  1541. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  1542. /* Handle Driver Notify Events */
  1543. aac_handle_aif(dev, fib);
  1544. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1545. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  1546. } else {
  1547. /* The u32 here is important and intended. We are using
  1548. 32bit wrapping time to fit the adapter field */
  1549. u32 time_now, time_last;
  1550. unsigned long flagv;
  1551. unsigned num;
  1552. struct hw_fib ** hw_fib_pool, ** hw_fib_p;
  1553. struct fib ** fib_pool, ** fib_p;
  1554. /* Sniff events */
  1555. if ((aifcmd->command ==
  1556. cpu_to_le32(AifCmdEventNotify)) ||
  1557. (aifcmd->command ==
  1558. cpu_to_le32(AifCmdJobProgress))) {
  1559. aac_handle_aif(dev, fib);
  1560. }
  1561. time_now = jiffies/HZ;
  1562. /*
  1563. * Warning: no sleep allowed while
  1564. * holding spinlock. We take the estimate
  1565. * and pre-allocate a set of fibs outside the
  1566. * lock.
  1567. */
  1568. num = le32_to_cpu(dev->init->AdapterFibsSize)
  1569. / sizeof(struct hw_fib); /* some extra */
  1570. spin_lock_irqsave(&dev->fib_lock, flagv);
  1571. entry = dev->fib_list.next;
  1572. while (entry != &dev->fib_list) {
  1573. entry = entry->next;
  1574. ++num;
  1575. }
  1576. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1577. hw_fib_pool = NULL;
  1578. fib_pool = NULL;
  1579. if (num
  1580. && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
  1581. && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
  1582. hw_fib_p = hw_fib_pool;
  1583. fib_p = fib_pool;
  1584. while (hw_fib_p < &hw_fib_pool[num]) {
  1585. if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
  1586. --hw_fib_p;
  1587. break;
  1588. }
  1589. if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
  1590. kfree(*(--hw_fib_p));
  1591. break;
  1592. }
  1593. }
  1594. if ((num = hw_fib_p - hw_fib_pool) == 0) {
  1595. kfree(fib_pool);
  1596. fib_pool = NULL;
  1597. kfree(hw_fib_pool);
  1598. hw_fib_pool = NULL;
  1599. }
  1600. } else {
  1601. kfree(hw_fib_pool);
  1602. hw_fib_pool = NULL;
  1603. }
  1604. spin_lock_irqsave(&dev->fib_lock, flagv);
  1605. entry = dev->fib_list.next;
  1606. /*
  1607. * For each Context that is on the
  1608. * fibctxList, make a copy of the
  1609. * fib, and then set the event to wake up the
  1610. * thread that is waiting for it.
  1611. */
  1612. hw_fib_p = hw_fib_pool;
  1613. fib_p = fib_pool;
  1614. while (entry != &dev->fib_list) {
  1615. /*
  1616. * Extract the fibctx
  1617. */
  1618. fibctx = list_entry(entry, struct aac_fib_context, next);
  1619. /*
  1620. * Check if the queue is getting
  1621. * backlogged
  1622. */
  1623. if (fibctx->count > 20)
  1624. {
  1625. /*
  1626. * It's *not* jiffies folks,
  1627. * but jiffies / HZ so do not
  1628. * panic ...
  1629. */
  1630. time_last = fibctx->jiffies;
  1631. /*
  1632. * Has it been > 2 minutes
  1633. * since the last read off
  1634. * the queue?
  1635. */
  1636. if ((time_now - time_last) > aif_timeout) {
  1637. entry = entry->next;
  1638. aac_close_fib_context(dev, fibctx);
  1639. continue;
  1640. }
  1641. }
  1642. /*
  1643. * Warning: no sleep allowed while
  1644. * holding spinlock
  1645. */
  1646. if (hw_fib_p < &hw_fib_pool[num]) {
  1647. hw_newfib = *hw_fib_p;
  1648. *(hw_fib_p++) = NULL;
  1649. newfib = *fib_p;
  1650. *(fib_p++) = NULL;
  1651. /*
  1652. * Make the copy of the FIB
  1653. */
  1654. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  1655. memcpy(newfib, fib, sizeof(struct fib));
  1656. newfib->hw_fib_va = hw_newfib;
  1657. /*
  1658. * Put the FIB onto the
  1659. * fibctx's fibs
  1660. */
  1661. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  1662. fibctx->count++;
  1663. /*
  1664. * Set the event to wake up the
  1665. * thread that is waiting.
  1666. */
  1667. up(&fibctx->wait_sem);
  1668. } else {
  1669. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1670. }
  1671. entry = entry->next;
  1672. }
  1673. /*
  1674. * Set the status of this FIB
  1675. */
  1676. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1677. aac_fib_adapter_complete(fib, sizeof(u32));
  1678. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1679. /* Free up the remaining resources */
  1680. hw_fib_p = hw_fib_pool;
  1681. fib_p = fib_pool;
  1682. while (hw_fib_p < &hw_fib_pool[num]) {
  1683. kfree(*hw_fib_p);
  1684. kfree(*fib_p);
  1685. ++fib_p;
  1686. ++hw_fib_p;
  1687. }
  1688. kfree(hw_fib_pool);
  1689. kfree(fib_pool);
  1690. }
  1691. kfree(fib);
  1692. spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1693. }
  1694. /*
  1695. * There are no more AIF's
  1696. */
  1697. spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1698. /*
  1699. * Background activity
  1700. */
  1701. if ((time_before(next_check_jiffies,next_jiffies))
  1702. && ((difference = next_check_jiffies - jiffies) <= 0)) {
  1703. next_check_jiffies = next_jiffies;
  1704. if (aac_check_health(dev) == 0) {
  1705. difference = ((long)(unsigned)check_interval)
  1706. * HZ;
  1707. next_check_jiffies = jiffies + difference;
  1708. } else if (!dev->queues)
  1709. break;
  1710. }
  1711. if (!time_before(next_check_jiffies,next_jiffies)
  1712. && ((difference = next_jiffies - jiffies) <= 0)) {
  1713. struct timeval now;
  1714. int ret;
  1715. /* Don't even try to talk to adapter if its sick */
  1716. ret = aac_check_health(dev);
  1717. if (!ret && !dev->queues)
  1718. break;
  1719. next_check_jiffies = jiffies
  1720. + ((long)(unsigned)check_interval)
  1721. * HZ;
  1722. do_gettimeofday(&now);
  1723. /* Synchronize our watches */
  1724. if (((1000000 - (1000000 / HZ)) > now.tv_usec)
  1725. && (now.tv_usec > (1000000 / HZ)))
  1726. difference = (((1000000 - now.tv_usec) * HZ)
  1727. + 500000) / 1000000;
  1728. else if (ret == 0) {
  1729. struct fib *fibptr;
  1730. if ((fibptr = aac_fib_alloc(dev))) {
  1731. int status;
  1732. __le32 *info;
  1733. aac_fib_init(fibptr);
  1734. info = (__le32 *) fib_data(fibptr);
  1735. if (now.tv_usec > 500000)
  1736. ++now.tv_sec;
  1737. *info = cpu_to_le32(now.tv_sec);
  1738. status = aac_fib_send(SendHostTime,
  1739. fibptr,
  1740. sizeof(*info),
  1741. FsaNormal,
  1742. 1, 1,
  1743. NULL,
  1744. NULL);
  1745. /* Do not set XferState to zero unless
  1746. * receives a response from F/W */
  1747. if (status >= 0)
  1748. aac_fib_complete(fibptr);
  1749. /* FIB should be freed only after
  1750. * getting the response from the F/W */
  1751. if (status != -ERESTARTSYS)
  1752. aac_fib_free(fibptr);
  1753. }
  1754. difference = (long)(unsigned)update_interval*HZ;
  1755. } else {
  1756. /* retry shortly */
  1757. difference = 10 * HZ;
  1758. }
  1759. next_jiffies = jiffies + difference;
  1760. if (time_before(next_check_jiffies,next_jiffies))
  1761. difference = next_check_jiffies - jiffies;
  1762. }
  1763. if (difference <= 0)
  1764. difference = 1;
  1765. set_current_state(TASK_INTERRUPTIBLE);
  1766. schedule_timeout(difference);
  1767. if (kthread_should_stop())
  1768. break;
  1769. }
  1770. if (dev->queues)
  1771. remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  1772. dev->aif_thread = 0;
  1773. return 0;
  1774. }