commsup.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; see the file COPYING. If not, write to
  22. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. * Module Name:
  25. * commsup.c
  26. *
  27. * Abstract: Contain all routines that are required for FSA host/adapter
  28. * communication.
  29. *
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/slab.h>
  38. #include <linux/completion.h>
  39. #include <linux/blkdev.h>
  40. #include <linux/delay.h>
  41. #include <linux/kthread.h>
  42. #include <linux/interrupt.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_device.h>
  46. #include <scsi/scsi_cmnd.h>
  47. #include <asm/semaphore.h>
  48. #include "aacraid.h"
  49. /**
  50. * fib_map_alloc - allocate the fib objects
  51. * @dev: Adapter to allocate for
  52. *
  53. * Allocate and map the shared PCI space for the FIB blocks used to
  54. * talk to the Adaptec firmware.
  55. */
  56. static int fib_map_alloc(struct aac_dev *dev)
  57. {
  58. dprintk((KERN_INFO
  59. "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
  60. dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
  61. AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  62. if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
  63. * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
  64. &dev->hw_fib_pa))==NULL)
  65. return -ENOMEM;
  66. return 0;
  67. }
  68. /**
  69. * aac_fib_map_free - free the fib objects
  70. * @dev: Adapter to free
  71. *
  72. * Free the PCI mappings and the memory allocated for FIB blocks
  73. * on this adapter.
  74. */
  75. void aac_fib_map_free(struct aac_dev *dev)
  76. {
  77. pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
  78. }
  79. /**
  80. * aac_fib_setup - setup the fibs
  81. * @dev: Adapter to set up
  82. *
  83. * Allocate the PCI space for the fibs, map it and then intialise the
  84. * fib area, the unmapped fib data and also the free list
  85. */
  86. int aac_fib_setup(struct aac_dev * dev)
  87. {
  88. struct fib *fibptr;
  89. struct hw_fib *hw_fib;
  90. dma_addr_t hw_fib_pa;
  91. int i;
  92. while (((i = fib_map_alloc(dev)) == -ENOMEM)
  93. && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
  94. dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
  95. dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
  96. }
  97. if (i<0)
  98. return -ENOMEM;
  99. hw_fib = dev->hw_fib_va;
  100. hw_fib_pa = dev->hw_fib_pa;
  101. memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
  102. /*
  103. * Initialise the fibs
  104. */
  105. for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
  106. {
  107. fibptr->dev = dev;
  108. fibptr->hw_fib_va = hw_fib;
  109. fibptr->data = (void *) fibptr->hw_fib_va->data;
  110. fibptr->next = fibptr+1; /* Forward chain the fibs */
  111. init_MUTEX_LOCKED(&fibptr->event_wait);
  112. spin_lock_init(&fibptr->event_lock);
  113. hw_fib->header.XferState = cpu_to_le32(0xffffffff);
  114. hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
  115. fibptr->hw_fib_pa = hw_fib_pa;
  116. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
  117. hw_fib_pa = hw_fib_pa + dev->max_fib_size;
  118. }
  119. /*
  120. * Add the fib chain to the free list
  121. */
  122. dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
  123. /*
  124. * Enable this to debug out of queue space
  125. */
  126. dev->free_fib = &dev->fibs[0];
  127. return 0;
  128. }
  129. /**
  130. * aac_fib_alloc - allocate a fib
  131. * @dev: Adapter to allocate the fib for
  132. *
  133. * Allocate a fib from the adapter fib pool. If the pool is empty we
  134. * return NULL.
  135. */
  136. struct fib *aac_fib_alloc(struct aac_dev *dev)
  137. {
  138. struct fib * fibptr;
  139. unsigned long flags;
  140. spin_lock_irqsave(&dev->fib_lock, flags);
  141. fibptr = dev->free_fib;
  142. if(!fibptr){
  143. spin_unlock_irqrestore(&dev->fib_lock, flags);
  144. return fibptr;
  145. }
  146. dev->free_fib = fibptr->next;
  147. spin_unlock_irqrestore(&dev->fib_lock, flags);
  148. /*
  149. * Set the proper node type code and node byte size
  150. */
  151. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  152. fibptr->size = sizeof(struct fib);
  153. /*
  154. * Null out fields that depend on being zero at the start of
  155. * each I/O
  156. */
  157. fibptr->hw_fib_va->header.XferState = 0;
  158. fibptr->callback = NULL;
  159. fibptr->callback_data = NULL;
  160. return fibptr;
  161. }
  162. /**
  163. * aac_fib_free - free a fib
  164. * @fibptr: fib to free up
  165. *
  166. * Frees up a fib and places it on the appropriate queue
  167. */
  168. void aac_fib_free(struct fib *fibptr)
  169. {
  170. unsigned long flags;
  171. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  172. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  173. aac_config.fib_timeouts++;
  174. if (fibptr->hw_fib_va->header.XferState != 0) {
  175. printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  176. (void*)fibptr,
  177. le32_to_cpu(fibptr->hw_fib_va->header.XferState));
  178. }
  179. fibptr->next = fibptr->dev->free_fib;
  180. fibptr->dev->free_fib = fibptr;
  181. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  182. }
  183. /**
  184. * aac_fib_init - initialise a fib
  185. * @fibptr: The fib to initialize
  186. *
  187. * Set up the generic fib fields ready for use
  188. */
  189. void aac_fib_init(struct fib *fibptr)
  190. {
  191. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  192. hw_fib->header.StructType = FIB_MAGIC;
  193. hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
  194. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  195. hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
  196. hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  197. hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
  198. }
  199. /**
  200. * fib_deallocate - deallocate a fib
  201. * @fibptr: fib to deallocate
  202. *
  203. * Will deallocate and return to the free pool the FIB pointed to by the
  204. * caller.
  205. */
  206. static void fib_dealloc(struct fib * fibptr)
  207. {
  208. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  209. BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
  210. hw_fib->header.XferState = 0;
  211. }
  212. /*
  213. * Commuication primitives define and support the queuing method we use to
  214. * support host to adapter commuication. All queue accesses happen through
  215. * these routines and are the only routines which have a knowledge of the
  216. * how these queues are implemented.
  217. */
  218. /**
  219. * aac_get_entry - get a queue entry
  220. * @dev: Adapter
  221. * @qid: Queue Number
  222. * @entry: Entry return
  223. * @index: Index return
  224. * @nonotify: notification control
  225. *
  226. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  227. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  228. * returned.
  229. */
  230. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  231. {
  232. struct aac_queue * q;
  233. unsigned long idx;
  234. /*
  235. * All of the queues wrap when they reach the end, so we check
  236. * to see if they have reached the end and if they have we just
  237. * set the index back to zero. This is a wrap. You could or off
  238. * the high bits in all updates but this is a bit faster I think.
  239. */
  240. q = &dev->queues->queue[qid];
  241. idx = *index = le32_to_cpu(*(q->headers.producer));
  242. /* Interrupt Moderation, only interrupt for first two entries */
  243. if (idx != le32_to_cpu(*(q->headers.consumer))) {
  244. if (--idx == 0) {
  245. if (qid == AdapNormCmdQueue)
  246. idx = ADAP_NORM_CMD_ENTRIES;
  247. else
  248. idx = ADAP_NORM_RESP_ENTRIES;
  249. }
  250. if (idx != le32_to_cpu(*(q->headers.consumer)))
  251. *nonotify = 1;
  252. }
  253. if (qid == AdapNormCmdQueue) {
  254. if (*index >= ADAP_NORM_CMD_ENTRIES)
  255. *index = 0; /* Wrap to front of the Producer Queue. */
  256. } else {
  257. if (*index >= ADAP_NORM_RESP_ENTRIES)
  258. *index = 0; /* Wrap to front of the Producer Queue. */
  259. }
  260. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
  261. printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
  262. qid, q->numpending);
  263. return 0;
  264. } else {
  265. *entry = q->base + *index;
  266. return 1;
  267. }
  268. }
  269. /**
  270. * aac_queue_get - get the next free QE
  271. * @dev: Adapter
  272. * @index: Returned index
  273. * @priority: Priority of fib
  274. * @fib: Fib to associate with the queue entry
  275. * @wait: Wait if queue full
  276. * @fibptr: Driver fib object to go with fib
  277. * @nonotify: Don't notify the adapter
  278. *
  279. * Gets the next free QE off the requested priorty adapter command
  280. * queue and associates the Fib with the QE. The QE represented by
  281. * index is ready to insert on the queue when this routine returns
  282. * success.
  283. */
  284. int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  285. {
  286. struct aac_entry * entry = NULL;
  287. int map = 0;
  288. if (qid == AdapNormCmdQueue) {
  289. /* if no entries wait for some if caller wants to */
  290. while (!aac_get_entry(dev, qid, &entry, index, nonotify))
  291. {
  292. printk(KERN_ERR "GetEntries failed\n");
  293. }
  294. /*
  295. * Setup queue entry with a command, status and fib mapped
  296. */
  297. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  298. map = 1;
  299. } else {
  300. while(!aac_get_entry(dev, qid, &entry, index, nonotify))
  301. {
  302. /* if no entries wait for some if caller wants to */
  303. }
  304. /*
  305. * Setup queue entry with command, status and fib mapped
  306. */
  307. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  308. entry->addr = hw_fib->header.SenderFibAddress;
  309. /* Restore adapters pointer to the FIB */
  310. hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  311. map = 0;
  312. }
  313. /*
  314. * If MapFib is true than we need to map the Fib and put pointers
  315. * in the queue entry.
  316. */
  317. if (map)
  318. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  319. return 0;
  320. }
  321. /*
  322. * Define the highest level of host to adapter communication routines.
  323. * These routines will support host to adapter FS commuication. These
  324. * routines have no knowledge of the commuication method used. This level
  325. * sends and receives FIBs. This level has no knowledge of how these FIBs
  326. * get passed back and forth.
  327. */
  328. /**
  329. * aac_fib_send - send a fib to the adapter
  330. * @command: Command to send
  331. * @fibptr: The fib
  332. * @size: Size of fib data area
  333. * @priority: Priority of Fib
  334. * @wait: Async/sync select
  335. * @reply: True if a reply is wanted
  336. * @callback: Called with reply
  337. * @callback_data: Passed to callback
  338. *
  339. * Sends the requested FIB to the adapter and optionally will wait for a
  340. * response FIB. If the caller does not wish to wait for a response than
  341. * an event to wait on must be supplied. This event will be set when a
  342. * response FIB is received from the adapter.
  343. */
  344. int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
  345. int priority, int wait, int reply, fib_callback callback,
  346. void *callback_data)
  347. {
  348. struct aac_dev * dev = fibptr->dev;
  349. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  350. unsigned long flags = 0;
  351. unsigned long qflags;
  352. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  353. return -EBUSY;
  354. /*
  355. * There are 5 cases with the wait and reponse requested flags.
  356. * The only invalid cases are if the caller requests to wait and
  357. * does not request a response and if the caller does not want a
  358. * response and the Fib is not allocated from pool. If a response
  359. * is not requesed the Fib will just be deallocaed by the DPC
  360. * routine when the response comes back from the adapter. No
  361. * further processing will be done besides deleting the Fib. We
  362. * will have a debug mode where the adapter can notify the host
  363. * it had a problem and the host can log that fact.
  364. */
  365. if (wait && !reply) {
  366. return -EINVAL;
  367. } else if (!wait && reply) {
  368. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  369. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  370. } else if (!wait && !reply) {
  371. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  372. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  373. } else if (wait && reply) {
  374. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  375. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  376. }
  377. /*
  378. * Map the fib into 32bits by using the fib number
  379. */
  380. hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
  381. hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
  382. /*
  383. * Set FIB state to indicate where it came from and if we want a
  384. * response from the adapter. Also load the command from the
  385. * caller.
  386. *
  387. * Map the hw fib pointer as a 32bit value
  388. */
  389. hw_fib->header.Command = cpu_to_le16(command);
  390. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  391. fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
  392. /*
  393. * Set the size of the Fib we want to send to the adapter
  394. */
  395. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  396. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  397. return -EMSGSIZE;
  398. }
  399. /*
  400. * Get a queue entry connect the FIB to it and send an notify
  401. * the adapter a command is ready.
  402. */
  403. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  404. /*
  405. * Fill in the Callback and CallbackContext if we are not
  406. * going to wait.
  407. */
  408. if (!wait) {
  409. fibptr->callback = callback;
  410. fibptr->callback_data = callback_data;
  411. }
  412. fibptr->done = 0;
  413. fibptr->flags = 0;
  414. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  415. dprintk((KERN_DEBUG "Fib contents:.\n"));
  416. dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
  417. dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
  418. dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
  419. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
  420. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  421. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  422. if (!dev->queues)
  423. return -EBUSY;
  424. if(wait)
  425. spin_lock_irqsave(&fibptr->event_lock, flags);
  426. aac_adapter_deliver(fibptr);
  427. /*
  428. * If the caller wanted us to wait for response wait now.
  429. */
  430. if (wait) {
  431. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  432. /* Only set for first known interruptable command */
  433. if (wait < 0) {
  434. /*
  435. * *VERY* Dangerous to time out a command, the
  436. * assumption is made that we have no hope of
  437. * functioning because an interrupt routing or other
  438. * hardware failure has occurred.
  439. */
  440. unsigned long count = 36000000L; /* 3 minutes */
  441. while (down_trylock(&fibptr->event_wait)) {
  442. int blink;
  443. if (--count == 0) {
  444. struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
  445. spin_lock_irqsave(q->lock, qflags);
  446. q->numpending--;
  447. spin_unlock_irqrestore(q->lock, qflags);
  448. if (wait == -1) {
  449. printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
  450. "Usually a result of a PCI interrupt routing problem;\n"
  451. "update mother board BIOS or consider utilizing one of\n"
  452. "the SAFE mode kernel options (acpi, apic etc)\n");
  453. }
  454. return -ETIMEDOUT;
  455. }
  456. if ((blink = aac_adapter_check_health(dev)) > 0) {
  457. if (wait == -1) {
  458. printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
  459. "Usually a result of a serious unrecoverable hardware problem\n",
  460. blink);
  461. }
  462. return -EFAULT;
  463. }
  464. udelay(5);
  465. }
  466. } else
  467. (void)down_interruptible(&fibptr->event_wait);
  468. spin_lock_irqsave(&fibptr->event_lock, flags);
  469. if (fibptr->done == 0) {
  470. fibptr->done = 2; /* Tell interrupt we aborted */
  471. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  472. return -EINTR;
  473. }
  474. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  475. BUG_ON(fibptr->done == 0);
  476. if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
  477. return -ETIMEDOUT;
  478. } else {
  479. return 0;
  480. }
  481. }
  482. /*
  483. * If the user does not want a response than return success otherwise
  484. * return pending
  485. */
  486. if (reply)
  487. return -EINPROGRESS;
  488. else
  489. return 0;
  490. }
  491. /**
  492. * aac_consumer_get - get the top of the queue
  493. * @dev: Adapter
  494. * @q: Queue
  495. * @entry: Return entry
  496. *
  497. * Will return a pointer to the entry on the top of the queue requested that
  498. * we are a consumer of, and return the address of the queue entry. It does
  499. * not change the state of the queue.
  500. */
  501. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  502. {
  503. u32 index;
  504. int status;
  505. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  506. status = 0;
  507. } else {
  508. /*
  509. * The consumer index must be wrapped if we have reached
  510. * the end of the queue, else we just use the entry
  511. * pointed to by the header index
  512. */
  513. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  514. index = 0;
  515. else
  516. index = le32_to_cpu(*q->headers.consumer);
  517. *entry = q->base + index;
  518. status = 1;
  519. }
  520. return(status);
  521. }
  522. /**
  523. * aac_consumer_free - free consumer entry
  524. * @dev: Adapter
  525. * @q: Queue
  526. * @qid: Queue ident
  527. *
  528. * Frees up the current top of the queue we are a consumer of. If the
  529. * queue was full notify the producer that the queue is no longer full.
  530. */
  531. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  532. {
  533. int wasfull = 0;
  534. u32 notify;
  535. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  536. wasfull = 1;
  537. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  538. *q->headers.consumer = cpu_to_le32(1);
  539. else
  540. *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
  541. if (wasfull) {
  542. switch (qid) {
  543. case HostNormCmdQueue:
  544. notify = HostNormCmdNotFull;
  545. break;
  546. case HostNormRespQueue:
  547. notify = HostNormRespNotFull;
  548. break;
  549. default:
  550. BUG();
  551. return;
  552. }
  553. aac_adapter_notify(dev, notify);
  554. }
  555. }
  556. /**
  557. * aac_fib_adapter_complete - complete adapter issued fib
  558. * @fibptr: fib to complete
  559. * @size: size of fib
  560. *
  561. * Will do all necessary work to complete a FIB that was sent from
  562. * the adapter.
  563. */
  564. int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
  565. {
  566. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  567. struct aac_dev * dev = fibptr->dev;
  568. struct aac_queue * q;
  569. unsigned long nointr = 0;
  570. unsigned long qflags;
  571. if (hw_fib->header.XferState == 0) {
  572. if (dev->comm_interface == AAC_COMM_MESSAGE)
  573. kfree (hw_fib);
  574. return 0;
  575. }
  576. /*
  577. * If we plan to do anything check the structure type first.
  578. */
  579. if ( hw_fib->header.StructType != FIB_MAGIC ) {
  580. if (dev->comm_interface == AAC_COMM_MESSAGE)
  581. kfree (hw_fib);
  582. return -EINVAL;
  583. }
  584. /*
  585. * This block handles the case where the adapter had sent us a
  586. * command and we have finished processing the command. We
  587. * call completeFib when we are done processing the command
  588. * and want to send a response back to the adapter. This will
  589. * send the completed cdb to the adapter.
  590. */
  591. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  592. if (dev->comm_interface == AAC_COMM_MESSAGE) {
  593. kfree (hw_fib);
  594. } else {
  595. u32 index;
  596. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  597. if (size) {
  598. size += sizeof(struct aac_fibhdr);
  599. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  600. return -EMSGSIZE;
  601. hw_fib->header.Size = cpu_to_le16(size);
  602. }
  603. q = &dev->queues->queue[AdapNormRespQueue];
  604. spin_lock_irqsave(q->lock, qflags);
  605. aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
  606. *(q->headers.producer) = cpu_to_le32(index + 1);
  607. spin_unlock_irqrestore(q->lock, qflags);
  608. if (!(nointr & (int)aac_config.irq_mod))
  609. aac_adapter_notify(dev, AdapNormRespQueue);
  610. }
  611. }
  612. else
  613. {
  614. printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
  615. BUG();
  616. }
  617. return 0;
  618. }
  619. /**
  620. * aac_fib_complete - fib completion handler
  621. * @fib: FIB to complete
  622. *
  623. * Will do all necessary work to complete a FIB.
  624. */
  625. int aac_fib_complete(struct fib *fibptr)
  626. {
  627. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  628. /*
  629. * Check for a fib which has already been completed
  630. */
  631. if (hw_fib->header.XferState == 0)
  632. return 0;
  633. /*
  634. * If we plan to do anything check the structure type first.
  635. */
  636. if (hw_fib->header.StructType != FIB_MAGIC)
  637. return -EINVAL;
  638. /*
  639. * This block completes a cdb which orginated on the host and we
  640. * just need to deallocate the cdb or reinit it. At this point the
  641. * command is complete that we had sent to the adapter and this
  642. * cdb could be reused.
  643. */
  644. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  645. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  646. {
  647. fib_dealloc(fibptr);
  648. }
  649. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  650. {
  651. /*
  652. * This handles the case when the host has aborted the I/O
  653. * to the adapter because the adapter is not responding
  654. */
  655. fib_dealloc(fibptr);
  656. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  657. fib_dealloc(fibptr);
  658. } else {
  659. BUG();
  660. }
  661. return 0;
  662. }
  663. /**
  664. * aac_printf - handle printf from firmware
  665. * @dev: Adapter
  666. * @val: Message info
  667. *
  668. * Print a message passed to us by the controller firmware on the
  669. * Adaptec board
  670. */
  671. void aac_printf(struct aac_dev *dev, u32 val)
  672. {
  673. char *cp = dev->printfbuf;
  674. if (dev->printf_enabled)
  675. {
  676. int length = val & 0xffff;
  677. int level = (val >> 16) & 0xffff;
  678. /*
  679. * The size of the printfbuf is set in port.c
  680. * There is no variable or define for it
  681. */
  682. if (length > 255)
  683. length = 255;
  684. if (cp[length] != 0)
  685. cp[length] = 0;
  686. if (level == LOG_AAC_HIGH_ERROR)
  687. printk(KERN_WARNING "%s:%s", dev->name, cp);
  688. else
  689. printk(KERN_INFO "%s:%s", dev->name, cp);
  690. }
  691. memset(cp, 0, 256);
  692. }
  693. /**
  694. * aac_handle_aif - Handle a message from the firmware
  695. * @dev: Which adapter this fib is from
  696. * @fibptr: Pointer to fibptr from adapter
  697. *
  698. * This routine handles a driver notify fib from the adapter and
  699. * dispatches it to the appropriate routine for handling.
  700. */
  701. #define AIF_SNIFF_TIMEOUT (30*HZ)
  702. static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  703. {
  704. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  705. struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
  706. u32 container;
  707. struct scsi_device *device;
  708. enum {
  709. NOTHING,
  710. DELETE,
  711. ADD,
  712. CHANGE
  713. } device_config_needed;
  714. /* Sniff for container changes */
  715. if (!dev || !dev->fsa_dev)
  716. return;
  717. container = (u32)-1;
  718. /*
  719. * We have set this up to try and minimize the number of
  720. * re-configures that take place. As a result of this when
  721. * certain AIF's come in we will set a flag waiting for another
  722. * type of AIF before setting the re-config flag.
  723. */
  724. switch (le32_to_cpu(aifcmd->command)) {
  725. case AifCmdDriverNotify:
  726. switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
  727. /*
  728. * Morph or Expand complete
  729. */
  730. case AifDenMorphComplete:
  731. case AifDenVolumeExtendComplete:
  732. container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
  733. if (container >= dev->maximum_num_containers)
  734. break;
  735. /*
  736. * Find the scsi_device associated with the SCSI
  737. * address. Make sure we have the right array, and if
  738. * so set the flag to initiate a new re-config once we
  739. * see an AifEnConfigChange AIF come through.
  740. */
  741. if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
  742. device = scsi_device_lookup(dev->scsi_host_ptr,
  743. CONTAINER_TO_CHANNEL(container),
  744. CONTAINER_TO_ID(container),
  745. CONTAINER_TO_LUN(container));
  746. if (device) {
  747. dev->fsa_dev[container].config_needed = CHANGE;
  748. dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
  749. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  750. scsi_device_put(device);
  751. }
  752. }
  753. }
  754. /*
  755. * If we are waiting on something and this happens to be
  756. * that thing then set the re-configure flag.
  757. */
  758. if (container != (u32)-1) {
  759. if (container >= dev->maximum_num_containers)
  760. break;
  761. if ((dev->fsa_dev[container].config_waiting_on ==
  762. le32_to_cpu(*(u32 *)aifcmd->data)) &&
  763. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  764. dev->fsa_dev[container].config_waiting_on = 0;
  765. } else for (container = 0;
  766. container < dev->maximum_num_containers; ++container) {
  767. if ((dev->fsa_dev[container].config_waiting_on ==
  768. le32_to_cpu(*(u32 *)aifcmd->data)) &&
  769. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  770. dev->fsa_dev[container].config_waiting_on = 0;
  771. }
  772. break;
  773. case AifCmdEventNotify:
  774. switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
  775. /*
  776. * Add an Array.
  777. */
  778. case AifEnAddContainer:
  779. container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
  780. if (container >= dev->maximum_num_containers)
  781. break;
  782. dev->fsa_dev[container].config_needed = ADD;
  783. dev->fsa_dev[container].config_waiting_on =
  784. AifEnConfigChange;
  785. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  786. break;
  787. /*
  788. * Delete an Array.
  789. */
  790. case AifEnDeleteContainer:
  791. container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
  792. if (container >= dev->maximum_num_containers)
  793. break;
  794. dev->fsa_dev[container].config_needed = DELETE;
  795. dev->fsa_dev[container].config_waiting_on =
  796. AifEnConfigChange;
  797. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  798. break;
  799. /*
  800. * Container change detected. If we currently are not
  801. * waiting on something else, setup to wait on a Config Change.
  802. */
  803. case AifEnContainerChange:
  804. container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
  805. if (container >= dev->maximum_num_containers)
  806. break;
  807. if (dev->fsa_dev[container].config_waiting_on &&
  808. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  809. break;
  810. dev->fsa_dev[container].config_needed = CHANGE;
  811. dev->fsa_dev[container].config_waiting_on =
  812. AifEnConfigChange;
  813. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  814. break;
  815. case AifEnConfigChange:
  816. break;
  817. }
  818. /*
  819. * If we are waiting on something and this happens to be
  820. * that thing then set the re-configure flag.
  821. */
  822. if (container != (u32)-1) {
  823. if (container >= dev->maximum_num_containers)
  824. break;
  825. if ((dev->fsa_dev[container].config_waiting_on ==
  826. le32_to_cpu(*(u32 *)aifcmd->data)) &&
  827. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  828. dev->fsa_dev[container].config_waiting_on = 0;
  829. } else for (container = 0;
  830. container < dev->maximum_num_containers; ++container) {
  831. if ((dev->fsa_dev[container].config_waiting_on ==
  832. le32_to_cpu(*(u32 *)aifcmd->data)) &&
  833. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  834. dev->fsa_dev[container].config_waiting_on = 0;
  835. }
  836. break;
  837. case AifCmdJobProgress:
  838. /*
  839. * These are job progress AIF's. When a Clear is being
  840. * done on a container it is initially created then hidden from
  841. * the OS. When the clear completes we don't get a config
  842. * change so we monitor the job status complete on a clear then
  843. * wait for a container change.
  844. */
  845. if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
  846. && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
  847. || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
  848. for (container = 0;
  849. container < dev->maximum_num_containers;
  850. ++container) {
  851. /*
  852. * Stomp on all config sequencing for all
  853. * containers?
  854. */
  855. dev->fsa_dev[container].config_waiting_on =
  856. AifEnContainerChange;
  857. dev->fsa_dev[container].config_needed = ADD;
  858. dev->fsa_dev[container].config_waiting_stamp =
  859. jiffies;
  860. }
  861. }
  862. if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
  863. && (((u32 *)aifcmd->data)[6] == 0)
  864. && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
  865. for (container = 0;
  866. container < dev->maximum_num_containers;
  867. ++container) {
  868. /*
  869. * Stomp on all config sequencing for all
  870. * containers?
  871. */
  872. dev->fsa_dev[container].config_waiting_on =
  873. AifEnContainerChange;
  874. dev->fsa_dev[container].config_needed = DELETE;
  875. dev->fsa_dev[container].config_waiting_stamp =
  876. jiffies;
  877. }
  878. }
  879. break;
  880. }
  881. device_config_needed = NOTHING;
  882. for (container = 0; container < dev->maximum_num_containers;
  883. ++container) {
  884. if ((dev->fsa_dev[container].config_waiting_on == 0) &&
  885. (dev->fsa_dev[container].config_needed != NOTHING) &&
  886. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
  887. device_config_needed =
  888. dev->fsa_dev[container].config_needed;
  889. dev->fsa_dev[container].config_needed = NOTHING;
  890. break;
  891. }
  892. }
  893. if (device_config_needed == NOTHING)
  894. return;
  895. /*
  896. * If we decided that a re-configuration needs to be done,
  897. * schedule it here on the way out the door, please close the door
  898. * behind you.
  899. */
  900. /*
  901. * Find the scsi_device associated with the SCSI address,
  902. * and mark it as changed, invalidating the cache. This deals
  903. * with changes to existing device IDs.
  904. */
  905. if (!dev || !dev->scsi_host_ptr)
  906. return;
  907. /*
  908. * force reload of disk info via aac_probe_container
  909. */
  910. if ((device_config_needed == CHANGE)
  911. && (dev->fsa_dev[container].valid == 1))
  912. dev->fsa_dev[container].valid = 2;
  913. if ((device_config_needed == CHANGE) ||
  914. (device_config_needed == ADD))
  915. aac_probe_container(dev, container);
  916. device = scsi_device_lookup(dev->scsi_host_ptr,
  917. CONTAINER_TO_CHANNEL(container),
  918. CONTAINER_TO_ID(container),
  919. CONTAINER_TO_LUN(container));
  920. if (device) {
  921. switch (device_config_needed) {
  922. case DELETE:
  923. case CHANGE:
  924. scsi_rescan_device(&device->sdev_gendev);
  925. default:
  926. break;
  927. }
  928. scsi_device_put(device);
  929. }
  930. if (device_config_needed == ADD) {
  931. scsi_add_device(dev->scsi_host_ptr,
  932. CONTAINER_TO_CHANNEL(container),
  933. CONTAINER_TO_ID(container),
  934. CONTAINER_TO_LUN(container));
  935. }
  936. }
  937. static int _aac_reset_adapter(struct aac_dev *aac)
  938. {
  939. int index, quirks;
  940. int retval;
  941. struct Scsi_Host *host;
  942. struct scsi_device *dev;
  943. struct scsi_cmnd *command;
  944. struct scsi_cmnd *command_list;
  945. /*
  946. * Assumptions:
  947. * - host is locked.
  948. * - in_reset is asserted, so no new i/o is getting to the
  949. * card.
  950. * - The card is dead.
  951. */
  952. host = aac->scsi_host_ptr;
  953. scsi_block_requests(host);
  954. aac_adapter_disable_int(aac);
  955. spin_unlock_irq(host->host_lock);
  956. kthread_stop(aac->thread);
  957. /*
  958. * If a positive health, means in a known DEAD PANIC
  959. * state and the adapter could be reset to `try again'.
  960. */
  961. retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
  962. if (retval)
  963. goto out;
  964. /*
  965. * Loop through the fibs, close the synchronous FIBS
  966. */
  967. for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
  968. struct fib *fib = &aac->fibs[index];
  969. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  970. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
  971. unsigned long flagv;
  972. spin_lock_irqsave(&fib->event_lock, flagv);
  973. up(&fib->event_wait);
  974. spin_unlock_irqrestore(&fib->event_lock, flagv);
  975. schedule();
  976. retval = 0;
  977. }
  978. }
  979. /* Give some extra time for ioctls to complete. */
  980. if (retval == 0)
  981. ssleep(2);
  982. index = aac->cardtype;
  983. /*
  984. * Re-initialize the adapter, first free resources, then carefully
  985. * apply the initialization sequence to come back again. Only risk
  986. * is a change in Firmware dropping cache, it is assumed the caller
  987. * will ensure that i/o is queisced and the card is flushed in that
  988. * case.
  989. */
  990. aac_fib_map_free(aac);
  991. aac->hw_fib_va = NULL;
  992. aac->hw_fib_pa = 0;
  993. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
  994. aac->comm_addr = NULL;
  995. aac->comm_phys = 0;
  996. kfree(aac->queues);
  997. aac->queues = NULL;
  998. free_irq(aac->pdev->irq, aac);
  999. kfree(aac->fsa_dev);
  1000. aac->fsa_dev = NULL;
  1001. if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
  1002. if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
  1003. ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
  1004. goto out;
  1005. } else {
  1006. if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
  1007. ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
  1008. goto out;
  1009. }
  1010. if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
  1011. goto out;
  1012. if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
  1013. if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
  1014. goto out;
  1015. aac->thread = kthread_run(aac_command_thread, aac, aac->name);
  1016. if (IS_ERR(aac->thread)) {
  1017. retval = PTR_ERR(aac->thread);
  1018. goto out;
  1019. }
  1020. (void)aac_get_adapter_info(aac);
  1021. quirks = aac_get_driver_ident(index)->quirks;
  1022. if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
  1023. host->sg_tablesize = 34;
  1024. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1025. }
  1026. if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
  1027. host->sg_tablesize = 17;
  1028. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1029. }
  1030. aac_get_config_status(aac, 1);
  1031. aac_get_containers(aac);
  1032. /*
  1033. * This is where the assumption that the Adapter is quiesced
  1034. * is important.
  1035. */
  1036. command_list = NULL;
  1037. __shost_for_each_device(dev, host) {
  1038. unsigned long flags;
  1039. spin_lock_irqsave(&dev->list_lock, flags);
  1040. list_for_each_entry(command, &dev->cmd_list, list)
  1041. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1042. command->SCp.buffer = (struct scatterlist *)command_list;
  1043. command_list = command;
  1044. }
  1045. spin_unlock_irqrestore(&dev->list_lock, flags);
  1046. }
  1047. while ((command = command_list)) {
  1048. command_list = (struct scsi_cmnd *)command->SCp.buffer;
  1049. command->SCp.buffer = NULL;
  1050. command->result = DID_OK << 16
  1051. | COMMAND_COMPLETE << 8
  1052. | SAM_STAT_TASK_SET_FULL;
  1053. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  1054. command->scsi_done(command);
  1055. }
  1056. retval = 0;
  1057. out:
  1058. aac->in_reset = 0;
  1059. scsi_unblock_requests(host);
  1060. spin_lock_irq(host->host_lock);
  1061. return retval;
  1062. }
  1063. int aac_check_health(struct aac_dev * aac)
  1064. {
  1065. int BlinkLED;
  1066. unsigned long time_now, flagv = 0;
  1067. struct list_head * entry;
  1068. struct Scsi_Host * host;
  1069. /* Extending the scope of fib_lock slightly to protect aac->in_reset */
  1070. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1071. return 0;
  1072. if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
  1073. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1074. return 0; /* OK */
  1075. }
  1076. aac->in_reset = 1;
  1077. /* Fake up an AIF:
  1078. * aac_aifcmd.command = AifCmdEventNotify = 1
  1079. * aac_aifcmd.seqnum = 0xFFFFFFFF
  1080. * aac_aifcmd.data[0] = AifEnExpEvent = 23
  1081. * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
  1082. * aac.aifcmd.data[2] = AifHighPriority = 3
  1083. * aac.aifcmd.data[3] = BlinkLED
  1084. */
  1085. time_now = jiffies/HZ;
  1086. entry = aac->fib_list.next;
  1087. /*
  1088. * For each Context that is on the
  1089. * fibctxList, make a copy of the
  1090. * fib, and then set the event to wake up the
  1091. * thread that is waiting for it.
  1092. */
  1093. while (entry != &aac->fib_list) {
  1094. /*
  1095. * Extract the fibctx
  1096. */
  1097. struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
  1098. struct hw_fib * hw_fib;
  1099. struct fib * fib;
  1100. /*
  1101. * Check if the queue is getting
  1102. * backlogged
  1103. */
  1104. if (fibctx->count > 20) {
  1105. /*
  1106. * It's *not* jiffies folks,
  1107. * but jiffies / HZ, so do not
  1108. * panic ...
  1109. */
  1110. u32 time_last = fibctx->jiffies;
  1111. /*
  1112. * Has it been > 2 minutes
  1113. * since the last read off
  1114. * the queue?
  1115. */
  1116. if ((time_now - time_last) > aif_timeout) {
  1117. entry = entry->next;
  1118. aac_close_fib_context(aac, fibctx);
  1119. continue;
  1120. }
  1121. }
  1122. /*
  1123. * Warning: no sleep allowed while
  1124. * holding spinlock
  1125. */
  1126. hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  1127. fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
  1128. if (fib && hw_fib) {
  1129. struct aac_aifcmd * aif;
  1130. memset(hw_fib, 0, sizeof(struct hw_fib));
  1131. memset(fib, 0, sizeof(struct fib));
  1132. fib->hw_fib_va = hw_fib;
  1133. fib->dev = aac;
  1134. aac_fib_init(fib);
  1135. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1136. fib->size = sizeof (struct fib);
  1137. fib->data = hw_fib->data;
  1138. aif = (struct aac_aifcmd *)hw_fib->data;
  1139. aif->command = cpu_to_le32(AifCmdEventNotify);
  1140. aif->seqnum = cpu_to_le32(0xFFFFFFFF);
  1141. aif->data[0] = cpu_to_le32(AifEnExpEvent);
  1142. aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
  1143. aif->data[2] = cpu_to_le32(AifHighPriority);
  1144. aif->data[3] = cpu_to_le32(BlinkLED);
  1145. /*
  1146. * Put the FIB onto the
  1147. * fibctx's fibs
  1148. */
  1149. list_add_tail(&fib->fiblink, &fibctx->fib_list);
  1150. fibctx->count++;
  1151. /*
  1152. * Set the event to wake up the
  1153. * thread that will waiting.
  1154. */
  1155. up(&fibctx->wait_sem);
  1156. } else {
  1157. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1158. kfree(fib);
  1159. kfree(hw_fib);
  1160. }
  1161. entry = entry->next;
  1162. }
  1163. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1164. if (BlinkLED < 0) {
  1165. printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
  1166. goto out;
  1167. }
  1168. printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
  1169. host = aac->scsi_host_ptr;
  1170. spin_lock_irqsave(host->host_lock, flagv);
  1171. BlinkLED = _aac_reset_adapter(aac);
  1172. spin_unlock_irqrestore(host->host_lock, flagv);
  1173. return BlinkLED;
  1174. out:
  1175. aac->in_reset = 0;
  1176. return BlinkLED;
  1177. }
  1178. /**
  1179. * aac_command_thread - command processing thread
  1180. * @dev: Adapter to monitor
  1181. *
  1182. * Waits on the commandready event in it's queue. When the event gets set
  1183. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  1184. * until the queue is empty. When the queue is empty it will wait for
  1185. * more FIBs.
  1186. */
  1187. int aac_command_thread(void *data)
  1188. {
  1189. struct aac_dev *dev = data;
  1190. struct hw_fib *hw_fib, *hw_newfib;
  1191. struct fib *fib, *newfib;
  1192. struct aac_fib_context *fibctx;
  1193. unsigned long flags;
  1194. DECLARE_WAITQUEUE(wait, current);
  1195. /*
  1196. * We can only have one thread per adapter for AIF's.
  1197. */
  1198. if (dev->aif_thread)
  1199. return -EINVAL;
  1200. /*
  1201. * Let the DPC know it has a place to send the AIF's to.
  1202. */
  1203. dev->aif_thread = 1;
  1204. add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  1205. set_current_state(TASK_INTERRUPTIBLE);
  1206. dprintk ((KERN_INFO "aac_command_thread start\n"));
  1207. while(1)
  1208. {
  1209. spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1210. while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
  1211. struct list_head *entry;
  1212. struct aac_aifcmd * aifcmd;
  1213. set_current_state(TASK_RUNNING);
  1214. entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
  1215. list_del(entry);
  1216. spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1217. fib = list_entry(entry, struct fib, fiblink);
  1218. /*
  1219. * We will process the FIB here or pass it to a
  1220. * worker thread that is TBD. We Really can't
  1221. * do anything at this point since we don't have
  1222. * anything defined for this thread to do.
  1223. */
  1224. hw_fib = fib->hw_fib_va;
  1225. memset(fib, 0, sizeof(struct fib));
  1226. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1227. fib->size = sizeof( struct fib );
  1228. fib->hw_fib_va = hw_fib;
  1229. fib->data = hw_fib->data;
  1230. fib->dev = dev;
  1231. /*
  1232. * We only handle AifRequest fibs from the adapter.
  1233. */
  1234. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  1235. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  1236. /* Handle Driver Notify Events */
  1237. aac_handle_aif(dev, fib);
  1238. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1239. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  1240. } else {
  1241. struct list_head *entry;
  1242. /* The u32 here is important and intended. We are using
  1243. 32bit wrapping time to fit the adapter field */
  1244. u32 time_now, time_last;
  1245. unsigned long flagv;
  1246. unsigned num;
  1247. struct hw_fib ** hw_fib_pool, ** hw_fib_p;
  1248. struct fib ** fib_pool, ** fib_p;
  1249. /* Sniff events */
  1250. if ((aifcmd->command ==
  1251. cpu_to_le32(AifCmdEventNotify)) ||
  1252. (aifcmd->command ==
  1253. cpu_to_le32(AifCmdJobProgress))) {
  1254. aac_handle_aif(dev, fib);
  1255. }
  1256. time_now = jiffies/HZ;
  1257. /*
  1258. * Warning: no sleep allowed while
  1259. * holding spinlock. We take the estimate
  1260. * and pre-allocate a set of fibs outside the
  1261. * lock.
  1262. */
  1263. num = le32_to_cpu(dev->init->AdapterFibsSize)
  1264. / sizeof(struct hw_fib); /* some extra */
  1265. spin_lock_irqsave(&dev->fib_lock, flagv);
  1266. entry = dev->fib_list.next;
  1267. while (entry != &dev->fib_list) {
  1268. entry = entry->next;
  1269. ++num;
  1270. }
  1271. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1272. hw_fib_pool = NULL;
  1273. fib_pool = NULL;
  1274. if (num
  1275. && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
  1276. && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
  1277. hw_fib_p = hw_fib_pool;
  1278. fib_p = fib_pool;
  1279. while (hw_fib_p < &hw_fib_pool[num]) {
  1280. if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
  1281. --hw_fib_p;
  1282. break;
  1283. }
  1284. if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
  1285. kfree(*(--hw_fib_p));
  1286. break;
  1287. }
  1288. }
  1289. if ((num = hw_fib_p - hw_fib_pool) == 0) {
  1290. kfree(fib_pool);
  1291. fib_pool = NULL;
  1292. kfree(hw_fib_pool);
  1293. hw_fib_pool = NULL;
  1294. }
  1295. } else {
  1296. kfree(hw_fib_pool);
  1297. hw_fib_pool = NULL;
  1298. }
  1299. spin_lock_irqsave(&dev->fib_lock, flagv);
  1300. entry = dev->fib_list.next;
  1301. /*
  1302. * For each Context that is on the
  1303. * fibctxList, make a copy of the
  1304. * fib, and then set the event to wake up the
  1305. * thread that is waiting for it.
  1306. */
  1307. hw_fib_p = hw_fib_pool;
  1308. fib_p = fib_pool;
  1309. while (entry != &dev->fib_list) {
  1310. /*
  1311. * Extract the fibctx
  1312. */
  1313. fibctx = list_entry(entry, struct aac_fib_context, next);
  1314. /*
  1315. * Check if the queue is getting
  1316. * backlogged
  1317. */
  1318. if (fibctx->count > 20)
  1319. {
  1320. /*
  1321. * It's *not* jiffies folks,
  1322. * but jiffies / HZ so do not
  1323. * panic ...
  1324. */
  1325. time_last = fibctx->jiffies;
  1326. /*
  1327. * Has it been > 2 minutes
  1328. * since the last read off
  1329. * the queue?
  1330. */
  1331. if ((time_now - time_last) > aif_timeout) {
  1332. entry = entry->next;
  1333. aac_close_fib_context(dev, fibctx);
  1334. continue;
  1335. }
  1336. }
  1337. /*
  1338. * Warning: no sleep allowed while
  1339. * holding spinlock
  1340. */
  1341. if (hw_fib_p < &hw_fib_pool[num]) {
  1342. hw_newfib = *hw_fib_p;
  1343. *(hw_fib_p++) = NULL;
  1344. newfib = *fib_p;
  1345. *(fib_p++) = NULL;
  1346. /*
  1347. * Make the copy of the FIB
  1348. */
  1349. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  1350. memcpy(newfib, fib, sizeof(struct fib));
  1351. newfib->hw_fib_va = hw_newfib;
  1352. /*
  1353. * Put the FIB onto the
  1354. * fibctx's fibs
  1355. */
  1356. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  1357. fibctx->count++;
  1358. /*
  1359. * Set the event to wake up the
  1360. * thread that is waiting.
  1361. */
  1362. up(&fibctx->wait_sem);
  1363. } else {
  1364. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1365. }
  1366. entry = entry->next;
  1367. }
  1368. /*
  1369. * Set the status of this FIB
  1370. */
  1371. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1372. aac_fib_adapter_complete(fib, sizeof(u32));
  1373. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1374. /* Free up the remaining resources */
  1375. hw_fib_p = hw_fib_pool;
  1376. fib_p = fib_pool;
  1377. while (hw_fib_p < &hw_fib_pool[num]) {
  1378. kfree(*hw_fib_p);
  1379. kfree(*fib_p);
  1380. ++fib_p;
  1381. ++hw_fib_p;
  1382. }
  1383. kfree(hw_fib_pool);
  1384. kfree(fib_pool);
  1385. }
  1386. kfree(fib);
  1387. spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1388. }
  1389. /*
  1390. * There are no more AIF's
  1391. */
  1392. spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
  1393. schedule();
  1394. if (kthread_should_stop())
  1395. break;
  1396. set_current_state(TASK_INTERRUPTIBLE);
  1397. }
  1398. if (dev->queues)
  1399. remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  1400. dev->aif_thread = 0;
  1401. return 0;
  1402. }