commsup.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; see the file COPYING. If not, write to
  22. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. * Module Name:
  25. * commsup.c
  26. *
  27. * Abstract: Contain all routines that are required for FSA host/adapter
  28. * commuication.
  29. *
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/slab.h>
  38. #include <linux/completion.h>
  39. #include <linux/blkdev.h>
  40. #include <asm/semaphore.h>
  41. #include "aacraid.h"
  42. /**
  43. * fib_map_alloc - allocate the fib objects
  44. * @dev: Adapter to allocate for
  45. *
  46. * Allocate and map the shared PCI space for the FIB blocks used to
  47. * talk to the Adaptec firmware.
  48. */
  49. static int fib_map_alloc(struct aac_dev *dev)
  50. {
  51. if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
  52. return -ENOMEM;
  53. return 0;
  54. }
  55. /**
  56. * fib_map_free - free the fib objects
  57. * @dev: Adapter to free
  58. *
  59. * Free the PCI mappings and the memory allocated for FIB blocks
  60. * on this adapter.
  61. */
  62. void fib_map_free(struct aac_dev *dev)
  63. {
  64. pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
  65. }
  66. /**
  67. * fib_setup - setup the fibs
  68. * @dev: Adapter to set up
  69. *
  70. * Allocate the PCI space for the fibs, map it and then intialise the
  71. * fib area, the unmapped fib data and also the free list
  72. */
  73. int fib_setup(struct aac_dev * dev)
  74. {
  75. struct fib *fibptr;
  76. struct hw_fib *hw_fib_va;
  77. dma_addr_t hw_fib_pa;
  78. int i;
  79. if(fib_map_alloc(dev)<0)
  80. return -ENOMEM;
  81. hw_fib_va = dev->hw_fib_va;
  82. hw_fib_pa = dev->hw_fib_pa;
  83. memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
  84. /*
  85. * Initialise the fibs
  86. */
  87. for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
  88. {
  89. fibptr->dev = dev;
  90. fibptr->hw_fib = hw_fib_va;
  91. fibptr->data = (void *) fibptr->hw_fib->data;
  92. fibptr->next = fibptr+1; /* Forward chain the fibs */
  93. init_MUTEX_LOCKED(&fibptr->event_wait);
  94. spin_lock_init(&fibptr->event_lock);
  95. hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
  96. hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
  97. fibptr->hw_fib_pa = hw_fib_pa;
  98. hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
  99. hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib);
  100. }
  101. /*
  102. * Add the fib chain to the free list
  103. */
  104. dev->fibs[AAC_NUM_FIB-1].next = NULL;
  105. /*
  106. * Enable this to debug out of queue space
  107. */
  108. dev->free_fib = &dev->fibs[0];
  109. return 0;
  110. }
  111. /**
  112. * fib_alloc - allocate a fib
  113. * @dev: Adapter to allocate the fib for
  114. *
  115. * Allocate a fib from the adapter fib pool. If the pool is empty we
  116. * wait for fibs to become free.
  117. */
  118. struct fib * fib_alloc(struct aac_dev *dev)
  119. {
  120. struct fib * fibptr;
  121. unsigned long flags;
  122. spin_lock_irqsave(&dev->fib_lock, flags);
  123. fibptr = dev->free_fib;
  124. /* Cannot sleep here or you get hangs. Instead we did the
  125. maths at compile time. */
  126. if(!fibptr)
  127. BUG();
  128. dev->free_fib = fibptr->next;
  129. spin_unlock_irqrestore(&dev->fib_lock, flags);
  130. /*
  131. * Set the proper node type code and node byte size
  132. */
  133. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  134. fibptr->size = sizeof(struct fib);
  135. /*
  136. * Null out fields that depend on being zero at the start of
  137. * each I/O
  138. */
  139. fibptr->hw_fib->header.XferState = 0;
  140. fibptr->callback = NULL;
  141. fibptr->callback_data = NULL;
  142. return fibptr;
  143. }
  144. /**
  145. * fib_free - free a fib
  146. * @fibptr: fib to free up
  147. *
  148. * Frees up a fib and places it on the appropriate queue
  149. * (either free or timed out)
  150. */
  151. void fib_free(struct fib * fibptr)
  152. {
  153. unsigned long flags;
  154. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  155. if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
  156. aac_config.fib_timeouts++;
  157. fibptr->next = fibptr->dev->timeout_fib;
  158. fibptr->dev->timeout_fib = fibptr;
  159. } else {
  160. if (fibptr->hw_fib->header.XferState != 0) {
  161. printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  162. (void*)fibptr,
  163. le32_to_cpu(fibptr->hw_fib->header.XferState));
  164. }
  165. fibptr->next = fibptr->dev->free_fib;
  166. fibptr->dev->free_fib = fibptr;
  167. }
  168. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  169. }
  170. /**
  171. * fib_init - initialise a fib
  172. * @fibptr: The fib to initialize
  173. *
  174. * Set up the generic fib fields ready for use
  175. */
  176. void fib_init(struct fib *fibptr)
  177. {
  178. struct hw_fib *hw_fib = fibptr->hw_fib;
  179. hw_fib->header.StructType = FIB_MAGIC;
  180. hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
  181. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  182. hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  183. hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  184. hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
  185. }
  186. /**
  187. * fib_deallocate - deallocate a fib
  188. * @fibptr: fib to deallocate
  189. *
  190. * Will deallocate and return to the free pool the FIB pointed to by the
  191. * caller.
  192. */
  193. static void fib_dealloc(struct fib * fibptr)
  194. {
  195. struct hw_fib *hw_fib = fibptr->hw_fib;
  196. if(hw_fib->header.StructType != FIB_MAGIC)
  197. BUG();
  198. hw_fib->header.XferState = 0;
  199. }
  200. /*
  201. * Commuication primitives define and support the queuing method we use to
  202. * support host to adapter commuication. All queue accesses happen through
  203. * these routines and are the only routines which have a knowledge of the
  204. * how these queues are implemented.
  205. */
  206. /**
  207. * aac_get_entry - get a queue entry
  208. * @dev: Adapter
  209. * @qid: Queue Number
  210. * @entry: Entry return
  211. * @index: Index return
  212. * @nonotify: notification control
  213. *
  214. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  215. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  216. * returned.
  217. */
  218. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  219. {
  220. struct aac_queue * q;
  221. /*
  222. * All of the queues wrap when they reach the end, so we check
  223. * to see if they have reached the end and if they have we just
  224. * set the index back to zero. This is a wrap. You could or off
  225. * the high bits in all updates but this is a bit faster I think.
  226. */
  227. q = &dev->queues->queue[qid];
  228. *index = le32_to_cpu(*(q->headers.producer));
  229. if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
  230. *nonotify = 1;
  231. if (qid == AdapHighCmdQueue) {
  232. if (*index >= ADAP_HIGH_CMD_ENTRIES)
  233. *index = 0;
  234. } else if (qid == AdapNormCmdQueue) {
  235. if (*index >= ADAP_NORM_CMD_ENTRIES)
  236. *index = 0; /* Wrap to front of the Producer Queue. */
  237. }
  238. else if (qid == AdapHighRespQueue)
  239. {
  240. if (*index >= ADAP_HIGH_RESP_ENTRIES)
  241. *index = 0;
  242. }
  243. else if (qid == AdapNormRespQueue)
  244. {
  245. if (*index >= ADAP_NORM_RESP_ENTRIES)
  246. *index = 0; /* Wrap to front of the Producer Queue. */
  247. }
  248. else {
  249. printk("aacraid: invalid qid\n");
  250. BUG();
  251. }
  252. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
  253. printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
  254. qid, q->numpending);
  255. return 0;
  256. } else {
  257. *entry = q->base + *index;
  258. return 1;
  259. }
  260. }
  261. /**
  262. * aac_queue_get - get the next free QE
  263. * @dev: Adapter
  264. * @index: Returned index
  265. * @priority: Priority of fib
  266. * @fib: Fib to associate with the queue entry
  267. * @wait: Wait if queue full
  268. * @fibptr: Driver fib object to go with fib
  269. * @nonotify: Don't notify the adapter
  270. *
  271. * Gets the next free QE off the requested priorty adapter command
  272. * queue and associates the Fib with the QE. The QE represented by
  273. * index is ready to insert on the queue when this routine returns
  274. * success.
  275. */
  276. static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  277. {
  278. struct aac_entry * entry = NULL;
  279. int map = 0;
  280. struct aac_queue * q = &dev->queues->queue[qid];
  281. spin_lock_irqsave(q->lock, q->SavedIrql);
  282. if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
  283. {
  284. /* if no entries wait for some if caller wants to */
  285. while (!aac_get_entry(dev, qid, &entry, index, nonotify))
  286. {
  287. printk(KERN_ERR "GetEntries failed\n");
  288. }
  289. /*
  290. * Setup queue entry with a command, status and fib mapped
  291. */
  292. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  293. map = 1;
  294. }
  295. else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
  296. {
  297. while(!aac_get_entry(dev, qid, &entry, index, nonotify))
  298. {
  299. /* if no entries wait for some if caller wants to */
  300. }
  301. /*
  302. * Setup queue entry with command, status and fib mapped
  303. */
  304. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  305. entry->addr = hw_fib->header.SenderFibAddress;
  306. /* Restore adapters pointer to the FIB */
  307. hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  308. map = 0;
  309. }
  310. /*
  311. * If MapFib is true than we need to map the Fib and put pointers
  312. * in the queue entry.
  313. */
  314. if (map)
  315. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  316. return 0;
  317. }
  318. /**
  319. * aac_insert_entry - insert a queue entry
  320. * @dev: Adapter
  321. * @index: Index of entry to insert
  322. * @qid: Queue number
  323. * @nonotify: Suppress adapter notification
  324. *
  325. * Gets the next free QE off the requested priorty adapter command
  326. * queue and associates the Fib with the QE. The QE represented by
  327. * index is ready to insert on the queue when this routine returns
  328. * success.
  329. */
  330. static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
  331. {
  332. struct aac_queue * q = &dev->queues->queue[qid];
  333. if(q == NULL)
  334. BUG();
  335. *(q->headers.producer) = cpu_to_le32(index + 1);
  336. spin_unlock_irqrestore(q->lock, q->SavedIrql);
  337. if (qid == AdapHighCmdQueue ||
  338. qid == AdapNormCmdQueue ||
  339. qid == AdapHighRespQueue ||
  340. qid == AdapNormRespQueue)
  341. {
  342. if (!nonotify)
  343. aac_adapter_notify(dev, qid);
  344. }
  345. else
  346. printk("Suprise insert!\n");
  347. return 0;
  348. }
  349. /*
  350. * Define the highest level of host to adapter communication routines.
  351. * These routines will support host to adapter FS commuication. These
  352. * routines have no knowledge of the commuication method used. This level
  353. * sends and receives FIBs. This level has no knowledge of how these FIBs
  354. * get passed back and forth.
  355. */
  356. /**
  357. * fib_send - send a fib to the adapter
  358. * @command: Command to send
  359. * @fibptr: The fib
  360. * @size: Size of fib data area
  361. * @priority: Priority of Fib
  362. * @wait: Async/sync select
  363. * @reply: True if a reply is wanted
  364. * @callback: Called with reply
  365. * @callback_data: Passed to callback
  366. *
  367. * Sends the requested FIB to the adapter and optionally will wait for a
  368. * response FIB. If the caller does not wish to wait for a response than
  369. * an event to wait on must be supplied. This event will be set when a
  370. * response FIB is received from the adapter.
  371. */
  372. int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
  373. {
  374. u32 index;
  375. u32 qid;
  376. struct aac_dev * dev = fibptr->dev;
  377. unsigned long nointr = 0;
  378. struct hw_fib * hw_fib = fibptr->hw_fib;
  379. struct aac_queue * q;
  380. unsigned long flags = 0;
  381. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  382. return -EBUSY;
  383. /*
  384. * There are 5 cases with the wait and reponse requested flags.
  385. * The only invalid cases are if the caller requests to wait and
  386. * does not request a response and if the caller does not want a
  387. * response and the Fib is not allocated from pool. If a response
  388. * is not requesed the Fib will just be deallocaed by the DPC
  389. * routine when the response comes back from the adapter. No
  390. * further processing will be done besides deleting the Fib. We
  391. * will have a debug mode where the adapter can notify the host
  392. * it had a problem and the host can log that fact.
  393. */
  394. if (wait && !reply) {
  395. return -EINVAL;
  396. } else if (!wait && reply) {
  397. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  398. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  399. } else if (!wait && !reply) {
  400. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  401. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  402. } else if (wait && reply) {
  403. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  404. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  405. }
  406. /*
  407. * Map the fib into 32bits by using the fib number
  408. */
  409. hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
  410. hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
  411. /*
  412. * Set FIB state to indicate where it came from and if we want a
  413. * response from the adapter. Also load the command from the
  414. * caller.
  415. *
  416. * Map the hw fib pointer as a 32bit value
  417. */
  418. hw_fib->header.Command = cpu_to_le16(command);
  419. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  420. fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
  421. /*
  422. * Set the size of the Fib we want to send to the adapter
  423. */
  424. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  425. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  426. return -EMSGSIZE;
  427. }
  428. /*
  429. * Get a queue entry connect the FIB to it and send an notify
  430. * the adapter a command is ready.
  431. */
  432. if (priority == FsaHigh) {
  433. hw_fib->header.XferState |= cpu_to_le32(HighPriority);
  434. qid = AdapHighCmdQueue;
  435. } else {
  436. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  437. qid = AdapNormCmdQueue;
  438. }
  439. q = &dev->queues->queue[qid];
  440. if(wait)
  441. spin_lock_irqsave(&fibptr->event_lock, flags);
  442. if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
  443. return -EWOULDBLOCK;
  444. dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
  445. dprintk((KERN_DEBUG "Fib contents:.\n"));
  446. dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
  447. dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
  448. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
  449. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  450. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  451. /*
  452. * Fill in the Callback and CallbackContext if we are not
  453. * going to wait.
  454. */
  455. if (!wait) {
  456. fibptr->callback = callback;
  457. fibptr->callback_data = callback_data;
  458. }
  459. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  460. list_add_tail(&fibptr->queue, &q->pendingq);
  461. q->numpending++;
  462. fibptr->done = 0;
  463. fibptr->flags = 0;
  464. if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
  465. return -EWOULDBLOCK;
  466. /*
  467. * If the caller wanted us to wait for response wait now.
  468. */
  469. if (wait) {
  470. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  471. down(&fibptr->event_wait);
  472. if(fibptr->done == 0)
  473. BUG();
  474. if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
  475. return -ETIMEDOUT;
  476. } else {
  477. return 0;
  478. }
  479. }
  480. /*
  481. * If the user does not want a response than return success otherwise
  482. * return pending
  483. */
  484. if (reply)
  485. return -EINPROGRESS;
  486. else
  487. return 0;
  488. }
  489. /**
  490. * aac_consumer_get - get the top of the queue
  491. * @dev: Adapter
  492. * @q: Queue
  493. * @entry: Return entry
  494. *
  495. * Will return a pointer to the entry on the top of the queue requested that
  496. * we are a consumer of, and return the address of the queue entry. It does
  497. * not change the state of the queue.
  498. */
  499. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  500. {
  501. u32 index;
  502. int status;
  503. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  504. status = 0;
  505. } else {
  506. /*
  507. * The consumer index must be wrapped if we have reached
  508. * the end of the queue, else we just use the entry
  509. * pointed to by the header index
  510. */
  511. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  512. index = 0;
  513. else
  514. index = le32_to_cpu(*q->headers.consumer);
  515. *entry = q->base + index;
  516. status = 1;
  517. }
  518. return(status);
  519. }
  520. /**
  521. * aac_consumer_free - free consumer entry
  522. * @dev: Adapter
  523. * @q: Queue
  524. * @qid: Queue ident
  525. *
  526. * Frees up the current top of the queue we are a consumer of. If the
  527. * queue was full notify the producer that the queue is no longer full.
  528. */
  529. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  530. {
  531. int wasfull = 0;
  532. u32 notify;
  533. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  534. wasfull = 1;
  535. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  536. *q->headers.consumer = cpu_to_le32(1);
  537. else
  538. *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
  539. if (wasfull) {
  540. switch (qid) {
  541. case HostNormCmdQueue:
  542. notify = HostNormCmdNotFull;
  543. break;
  544. case HostHighCmdQueue:
  545. notify = HostHighCmdNotFull;
  546. break;
  547. case HostNormRespQueue:
  548. notify = HostNormRespNotFull;
  549. break;
  550. case HostHighRespQueue:
  551. notify = HostHighRespNotFull;
  552. break;
  553. default:
  554. BUG();
  555. return;
  556. }
  557. aac_adapter_notify(dev, notify);
  558. }
  559. }
  560. /**
  561. * fib_adapter_complete - complete adapter issued fib
  562. * @fibptr: fib to complete
  563. * @size: size of fib
  564. *
  565. * Will do all necessary work to complete a FIB that was sent from
  566. * the adapter.
  567. */
  568. int fib_adapter_complete(struct fib * fibptr, unsigned short size)
  569. {
  570. struct hw_fib * hw_fib = fibptr->hw_fib;
  571. struct aac_dev * dev = fibptr->dev;
  572. unsigned long nointr = 0;
  573. if (hw_fib->header.XferState == 0)
  574. return 0;
  575. /*
  576. * If we plan to do anything check the structure type first.
  577. */
  578. if ( hw_fib->header.StructType != FIB_MAGIC ) {
  579. return -EINVAL;
  580. }
  581. /*
  582. * This block handles the case where the adapter had sent us a
  583. * command and we have finished processing the command. We
  584. * call completeFib when we are done processing the command
  585. * and want to send a response back to the adapter. This will
  586. * send the completed cdb to the adapter.
  587. */
  588. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  589. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  590. if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
  591. u32 index;
  592. if (size)
  593. {
  594. size += sizeof(struct aac_fibhdr);
  595. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  596. return -EMSGSIZE;
  597. hw_fib->header.Size = cpu_to_le16(size);
  598. }
  599. if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
  600. return -EWOULDBLOCK;
  601. }
  602. if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
  603. }
  604. } else if (hw_fib->header.XferState &
  605. cpu_to_le32(NormalPriority)) {
  606. u32 index;
  607. if (size) {
  608. size += sizeof(struct aac_fibhdr);
  609. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  610. return -EMSGSIZE;
  611. hw_fib->header.Size = cpu_to_le16(size);
  612. }
  613. if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
  614. return -EWOULDBLOCK;
  615. if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
  616. {
  617. }
  618. }
  619. }
  620. else
  621. {
  622. printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
  623. BUG();
  624. }
  625. return 0;
  626. }
  627. /**
  628. * fib_complete - fib completion handler
  629. * @fib: FIB to complete
  630. *
  631. * Will do all necessary work to complete a FIB.
  632. */
  633. int fib_complete(struct fib * fibptr)
  634. {
  635. struct hw_fib * hw_fib = fibptr->hw_fib;
  636. /*
  637. * Check for a fib which has already been completed
  638. */
  639. if (hw_fib->header.XferState == 0)
  640. return 0;
  641. /*
  642. * If we plan to do anything check the structure type first.
  643. */
  644. if (hw_fib->header.StructType != FIB_MAGIC)
  645. return -EINVAL;
  646. /*
  647. * This block completes a cdb which orginated on the host and we
  648. * just need to deallocate the cdb or reinit it. At this point the
  649. * command is complete that we had sent to the adapter and this
  650. * cdb could be reused.
  651. */
  652. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  653. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  654. {
  655. fib_dealloc(fibptr);
  656. }
  657. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  658. {
  659. /*
  660. * This handles the case when the host has aborted the I/O
  661. * to the adapter because the adapter is not responding
  662. */
  663. fib_dealloc(fibptr);
  664. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  665. fib_dealloc(fibptr);
  666. } else {
  667. BUG();
  668. }
  669. return 0;
  670. }
  671. /**
  672. * aac_printf - handle printf from firmware
  673. * @dev: Adapter
  674. * @val: Message info
  675. *
  676. * Print a message passed to us by the controller firmware on the
  677. * Adaptec board
  678. */
  679. void aac_printf(struct aac_dev *dev, u32 val)
  680. {
  681. int length = val & 0xffff;
  682. int level = (val >> 16) & 0xffff;
  683. char *cp = dev->printfbuf;
  684. /*
  685. * The size of the printfbuf is set in port.c
  686. * There is no variable or define for it
  687. */
  688. if (length > 255)
  689. length = 255;
  690. if (cp[length] != 0)
  691. cp[length] = 0;
  692. if (level == LOG_AAC_HIGH_ERROR)
  693. printk(KERN_WARNING "aacraid:%s", cp);
  694. else
  695. printk(KERN_INFO "aacraid:%s", cp);
  696. memset(cp, 0, 256);
  697. }
  698. /**
  699. * aac_command_thread - command processing thread
  700. * @dev: Adapter to monitor
  701. *
  702. * Waits on the commandready event in it's queue. When the event gets set
  703. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  704. * until the queue is empty. When the queue is empty it will wait for
  705. * more FIBs.
  706. */
  707. int aac_command_thread(struct aac_dev * dev)
  708. {
  709. struct hw_fib *hw_fib, *hw_newfib;
  710. struct fib *fib, *newfib;
  711. struct aac_queue_block *queues = dev->queues;
  712. struct aac_fib_context *fibctx;
  713. unsigned long flags;
  714. DECLARE_WAITQUEUE(wait, current);
  715. /*
  716. * We can only have one thread per adapter for AIF's.
  717. */
  718. if (dev->aif_thread)
  719. return -EINVAL;
  720. /*
  721. * Set up the name that will appear in 'ps'
  722. * stored in task_struct.comm[16].
  723. */
  724. daemonize("aacraid");
  725. allow_signal(SIGKILL);
  726. /*
  727. * Let the DPC know it has a place to send the AIF's to.
  728. */
  729. dev->aif_thread = 1;
  730. add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  731. set_current_state(TASK_INTERRUPTIBLE);
  732. while(1)
  733. {
  734. spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  735. while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
  736. struct list_head *entry;
  737. struct aac_aifcmd * aifcmd;
  738. set_current_state(TASK_RUNNING);
  739. entry = queues->queue[HostNormCmdQueue].cmdq.next;
  740. list_del(entry);
  741. spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  742. fib = list_entry(entry, struct fib, fiblink);
  743. /*
  744. * We will process the FIB here or pass it to a
  745. * worker thread that is TBD. We Really can't
  746. * do anything at this point since we don't have
  747. * anything defined for this thread to do.
  748. */
  749. hw_fib = fib->hw_fib;
  750. memset(fib, 0, sizeof(struct fib));
  751. fib->type = FSAFS_NTC_FIB_CONTEXT;
  752. fib->size = sizeof( struct fib );
  753. fib->hw_fib = hw_fib;
  754. fib->data = hw_fib->data;
  755. fib->dev = dev;
  756. /*
  757. * We only handle AifRequest fibs from the adapter.
  758. */
  759. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  760. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  761. /* Handle Driver Notify Events */
  762. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  763. fib_adapter_complete(fib, (u16)sizeof(u32));
  764. } else {
  765. struct list_head *entry;
  766. /* The u32 here is important and intended. We are using
  767. 32bit wrapping time to fit the adapter field */
  768. u32 time_now, time_last;
  769. unsigned long flagv;
  770. time_now = jiffies/HZ;
  771. spin_lock_irqsave(&dev->fib_lock, flagv);
  772. entry = dev->fib_list.next;
  773. /*
  774. * For each Context that is on the
  775. * fibctxList, make a copy of the
  776. * fib, and then set the event to wake up the
  777. * thread that is waiting for it.
  778. */
  779. while (entry != &dev->fib_list) {
  780. /*
  781. * Extract the fibctx
  782. */
  783. fibctx = list_entry(entry, struct aac_fib_context, next);
  784. /*
  785. * Check if the queue is getting
  786. * backlogged
  787. */
  788. if (fibctx->count > 20)
  789. {
  790. /*
  791. * It's *not* jiffies folks,
  792. * but jiffies / HZ so do not
  793. * panic ...
  794. */
  795. time_last = fibctx->jiffies;
  796. /*
  797. * Has it been > 2 minutes
  798. * since the last read off
  799. * the queue?
  800. */
  801. if ((time_now - time_last) > 120) {
  802. entry = entry->next;
  803. aac_close_fib_context(dev, fibctx);
  804. continue;
  805. }
  806. }
  807. /*
  808. * Warning: no sleep allowed while
  809. * holding spinlock
  810. */
  811. hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  812. newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
  813. if (newfib && hw_newfib) {
  814. /*
  815. * Make the copy of the FIB
  816. */
  817. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  818. memcpy(newfib, fib, sizeof(struct fib));
  819. newfib->hw_fib = hw_newfib;
  820. /*
  821. * Put the FIB onto the
  822. * fibctx's fibs
  823. */
  824. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  825. fibctx->count++;
  826. /*
  827. * Set the event to wake up the
  828. * thread that will waiting.
  829. */
  830. up(&fibctx->wait_sem);
  831. } else {
  832. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  833. if(newfib)
  834. kfree(newfib);
  835. if(hw_newfib)
  836. kfree(hw_newfib);
  837. }
  838. entry = entry->next;
  839. }
  840. /*
  841. * Set the status of this FIB
  842. */
  843. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  844. fib_adapter_complete(fib, sizeof(u32));
  845. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  846. }
  847. spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  848. kfree(fib);
  849. }
  850. /*
  851. * There are no more AIF's
  852. */
  853. spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  854. schedule();
  855. if(signal_pending(current))
  856. break;
  857. set_current_state(TASK_INTERRUPTIBLE);
  858. }
  859. remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  860. dev->aif_thread = 0;
  861. complete_and_exit(&dev->aif_completion, 0);
  862. }