commsup.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; see the file COPYING. If not, write to
  22. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. * Module Name:
  25. * commsup.c
  26. *
  27. * Abstract: Contain all routines that are required for FSA host/adapter
  28. * communication.
  29. *
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/slab.h>
  38. #include <linux/completion.h>
  39. #include <linux/blkdev.h>
  40. #include <scsi/scsi_host.h>
  41. #include <asm/semaphore.h>
  42. #include "aacraid.h"
  43. /**
  44. * fib_map_alloc - allocate the fib objects
  45. * @dev: Adapter to allocate for
  46. *
  47. * Allocate and map the shared PCI space for the FIB blocks used to
  48. * talk to the Adaptec firmware.
  49. */
  50. static int fib_map_alloc(struct aac_dev *dev)
  51. {
  52. dprintk((KERN_INFO
  53. "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
  54. dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
  55. AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  56. if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
  57. * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
  58. &dev->hw_fib_pa))==NULL)
  59. return -ENOMEM;
  60. return 0;
  61. }
  62. /**
  63. * fib_map_free - free the fib objects
  64. * @dev: Adapter to free
  65. *
  66. * Free the PCI mappings and the memory allocated for FIB blocks
  67. * on this adapter.
  68. */
  69. void fib_map_free(struct aac_dev *dev)
  70. {
  71. pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
  72. }
  73. /**
  74. * fib_setup - setup the fibs
  75. * @dev: Adapter to set up
  76. *
  77. * Allocate the PCI space for the fibs, map it and then intialise the
  78. * fib area, the unmapped fib data and also the free list
  79. */
  80. int fib_setup(struct aac_dev * dev)
  81. {
  82. struct fib *fibptr;
  83. struct hw_fib *hw_fib_va;
  84. dma_addr_t hw_fib_pa;
  85. int i;
  86. while (((i = fib_map_alloc(dev)) == -ENOMEM)
  87. && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
  88. dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
  89. dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
  90. }
  91. if (i<0)
  92. return -ENOMEM;
  93. hw_fib_va = dev->hw_fib_va;
  94. hw_fib_pa = dev->hw_fib_pa;
  95. memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
  96. /*
  97. * Initialise the fibs
  98. */
  99. for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
  100. {
  101. fibptr->dev = dev;
  102. fibptr->hw_fib = hw_fib_va;
  103. fibptr->data = (void *) fibptr->hw_fib->data;
  104. fibptr->next = fibptr+1; /* Forward chain the fibs */
  105. init_MUTEX_LOCKED(&fibptr->event_wait);
  106. spin_lock_init(&fibptr->event_lock);
  107. hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
  108. hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
  109. fibptr->hw_fib_pa = hw_fib_pa;
  110. hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
  111. hw_fib_pa = hw_fib_pa + dev->max_fib_size;
  112. }
  113. /*
  114. * Add the fib chain to the free list
  115. */
  116. dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
  117. /*
  118. * Enable this to debug out of queue space
  119. */
  120. dev->free_fib = &dev->fibs[0];
  121. return 0;
  122. }
  123. /**
  124. * fib_alloc - allocate a fib
  125. * @dev: Adapter to allocate the fib for
  126. *
  127. * Allocate a fib from the adapter fib pool. If the pool is empty we
  128. * return NULL.
  129. */
  130. struct fib * fib_alloc(struct aac_dev *dev)
  131. {
  132. struct fib * fibptr;
  133. unsigned long flags;
  134. spin_lock_irqsave(&dev->fib_lock, flags);
  135. fibptr = dev->free_fib;
  136. if(!fibptr){
  137. spin_unlock_irqrestore(&dev->fib_lock, flags);
  138. return fibptr;
  139. }
  140. dev->free_fib = fibptr->next;
  141. spin_unlock_irqrestore(&dev->fib_lock, flags);
  142. /*
  143. * Set the proper node type code and node byte size
  144. */
  145. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  146. fibptr->size = sizeof(struct fib);
  147. /*
  148. * Null out fields that depend on being zero at the start of
  149. * each I/O
  150. */
  151. fibptr->hw_fib->header.XferState = 0;
  152. fibptr->callback = NULL;
  153. fibptr->callback_data = NULL;
  154. return fibptr;
  155. }
  156. /**
  157. * fib_free - free a fib
  158. * @fibptr: fib to free up
  159. *
  160. * Frees up a fib and places it on the appropriate queue
  161. * (either free or timed out)
  162. */
  163. void fib_free(struct fib * fibptr)
  164. {
  165. unsigned long flags;
  166. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  167. if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
  168. aac_config.fib_timeouts++;
  169. fibptr->next = fibptr->dev->timeout_fib;
  170. fibptr->dev->timeout_fib = fibptr;
  171. } else {
  172. if (fibptr->hw_fib->header.XferState != 0) {
  173. printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  174. (void*)fibptr,
  175. le32_to_cpu(fibptr->hw_fib->header.XferState));
  176. }
  177. fibptr->next = fibptr->dev->free_fib;
  178. fibptr->dev->free_fib = fibptr;
  179. }
  180. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  181. }
  182. /**
  183. * fib_init - initialise a fib
  184. * @fibptr: The fib to initialize
  185. *
  186. * Set up the generic fib fields ready for use
  187. */
  188. void fib_init(struct fib *fibptr)
  189. {
  190. struct hw_fib *hw_fib = fibptr->hw_fib;
  191. hw_fib->header.StructType = FIB_MAGIC;
  192. hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
  193. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  194. hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  195. hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  196. hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
  197. }
  198. /**
  199. * fib_deallocate - deallocate a fib
  200. * @fibptr: fib to deallocate
  201. *
  202. * Will deallocate and return to the free pool the FIB pointed to by the
  203. * caller.
  204. */
  205. static void fib_dealloc(struct fib * fibptr)
  206. {
  207. struct hw_fib *hw_fib = fibptr->hw_fib;
  208. if(hw_fib->header.StructType != FIB_MAGIC)
  209. BUG();
  210. hw_fib->header.XferState = 0;
  211. }
  212. /*
  213. * Commuication primitives define and support the queuing method we use to
  214. * support host to adapter commuication. All queue accesses happen through
  215. * these routines and are the only routines which have a knowledge of the
  216. * how these queues are implemented.
  217. */
  218. /**
  219. * aac_get_entry - get a queue entry
  220. * @dev: Adapter
  221. * @qid: Queue Number
  222. * @entry: Entry return
  223. * @index: Index return
  224. * @nonotify: notification control
  225. *
  226. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  227. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  228. * returned.
  229. */
  230. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  231. {
  232. struct aac_queue * q;
  233. unsigned long idx;
  234. /*
  235. * All of the queues wrap when they reach the end, so we check
  236. * to see if they have reached the end and if they have we just
  237. * set the index back to zero. This is a wrap. You could or off
  238. * the high bits in all updates but this is a bit faster I think.
  239. */
  240. q = &dev->queues->queue[qid];
  241. idx = *index = le32_to_cpu(*(q->headers.producer));
  242. /* Interrupt Moderation, only interrupt for first two entries */
  243. if (idx != le32_to_cpu(*(q->headers.consumer))) {
  244. if (--idx == 0) {
  245. if (qid == AdapHighCmdQueue)
  246. idx = ADAP_HIGH_CMD_ENTRIES;
  247. else if (qid == AdapNormCmdQueue)
  248. idx = ADAP_NORM_CMD_ENTRIES;
  249. else if (qid == AdapHighRespQueue)
  250. idx = ADAP_HIGH_RESP_ENTRIES;
  251. else if (qid == AdapNormRespQueue)
  252. idx = ADAP_NORM_RESP_ENTRIES;
  253. }
  254. if (idx != le32_to_cpu(*(q->headers.consumer)))
  255. *nonotify = 1;
  256. }
  257. if (qid == AdapHighCmdQueue) {
  258. if (*index >= ADAP_HIGH_CMD_ENTRIES)
  259. *index = 0;
  260. } else if (qid == AdapNormCmdQueue) {
  261. if (*index >= ADAP_NORM_CMD_ENTRIES)
  262. *index = 0; /* Wrap to front of the Producer Queue. */
  263. }
  264. else if (qid == AdapHighRespQueue)
  265. {
  266. if (*index >= ADAP_HIGH_RESP_ENTRIES)
  267. *index = 0;
  268. }
  269. else if (qid == AdapNormRespQueue)
  270. {
  271. if (*index >= ADAP_NORM_RESP_ENTRIES)
  272. *index = 0; /* Wrap to front of the Producer Queue. */
  273. }
  274. else {
  275. printk("aacraid: invalid qid\n");
  276. BUG();
  277. }
  278. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
  279. printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
  280. qid, q->numpending);
  281. return 0;
  282. } else {
  283. *entry = q->base + *index;
  284. return 1;
  285. }
  286. }
  287. /**
  288. * aac_queue_get - get the next free QE
  289. * @dev: Adapter
  290. * @index: Returned index
  291. * @priority: Priority of fib
  292. * @fib: Fib to associate with the queue entry
  293. * @wait: Wait if queue full
  294. * @fibptr: Driver fib object to go with fib
  295. * @nonotify: Don't notify the adapter
  296. *
  297. * Gets the next free QE off the requested priorty adapter command
  298. * queue and associates the Fib with the QE. The QE represented by
  299. * index is ready to insert on the queue when this routine returns
  300. * success.
  301. */
  302. static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  303. {
  304. struct aac_entry * entry = NULL;
  305. int map = 0;
  306. struct aac_queue * q = &dev->queues->queue[qid];
  307. spin_lock_irqsave(q->lock, q->SavedIrql);
  308. if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
  309. {
  310. /* if no entries wait for some if caller wants to */
  311. while (!aac_get_entry(dev, qid, &entry, index, nonotify))
  312. {
  313. printk(KERN_ERR "GetEntries failed\n");
  314. }
  315. /*
  316. * Setup queue entry with a command, status and fib mapped
  317. */
  318. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  319. map = 1;
  320. }
  321. else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
  322. {
  323. while(!aac_get_entry(dev, qid, &entry, index, nonotify))
  324. {
  325. /* if no entries wait for some if caller wants to */
  326. }
  327. /*
  328. * Setup queue entry with command, status and fib mapped
  329. */
  330. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  331. entry->addr = hw_fib->header.SenderFibAddress;
  332. /* Restore adapters pointer to the FIB */
  333. hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  334. map = 0;
  335. }
  336. /*
  337. * If MapFib is true than we need to map the Fib and put pointers
  338. * in the queue entry.
  339. */
  340. if (map)
  341. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  342. return 0;
  343. }
  344. /**
  345. * aac_insert_entry - insert a queue entry
  346. * @dev: Adapter
  347. * @index: Index of entry to insert
  348. * @qid: Queue number
  349. * @nonotify: Suppress adapter notification
  350. *
  351. * Gets the next free QE off the requested priorty adapter command
  352. * queue and associates the Fib with the QE. The QE represented by
  353. * index is ready to insert on the queue when this routine returns
  354. * success.
  355. */
  356. static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
  357. {
  358. struct aac_queue * q = &dev->queues->queue[qid];
  359. if(q == NULL)
  360. BUG();
  361. *(q->headers.producer) = cpu_to_le32(index + 1);
  362. spin_unlock_irqrestore(q->lock, q->SavedIrql);
  363. if (qid == AdapHighCmdQueue ||
  364. qid == AdapNormCmdQueue ||
  365. qid == AdapHighRespQueue ||
  366. qid == AdapNormRespQueue)
  367. {
  368. if (!nonotify)
  369. aac_adapter_notify(dev, qid);
  370. }
  371. else
  372. printk("Suprise insert!\n");
  373. return 0;
  374. }
  375. /*
  376. * Define the highest level of host to adapter communication routines.
  377. * These routines will support host to adapter FS commuication. These
  378. * routines have no knowledge of the commuication method used. This level
  379. * sends and receives FIBs. This level has no knowledge of how these FIBs
  380. * get passed back and forth.
  381. */
  382. /**
  383. * fib_send - send a fib to the adapter
  384. * @command: Command to send
  385. * @fibptr: The fib
  386. * @size: Size of fib data area
  387. * @priority: Priority of Fib
  388. * @wait: Async/sync select
  389. * @reply: True if a reply is wanted
  390. * @callback: Called with reply
  391. * @callback_data: Passed to callback
  392. *
  393. * Sends the requested FIB to the adapter and optionally will wait for a
  394. * response FIB. If the caller does not wish to wait for a response than
  395. * an event to wait on must be supplied. This event will be set when a
  396. * response FIB is received from the adapter.
  397. */
  398. int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
  399. {
  400. u32 index;
  401. u32 qid;
  402. struct aac_dev * dev = fibptr->dev;
  403. unsigned long nointr = 0;
  404. struct hw_fib * hw_fib = fibptr->hw_fib;
  405. struct aac_queue * q;
  406. unsigned long flags = 0;
  407. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  408. return -EBUSY;
  409. /*
  410. * There are 5 cases with the wait and reponse requested flags.
  411. * The only invalid cases are if the caller requests to wait and
  412. * does not request a response and if the caller does not want a
  413. * response and the Fib is not allocated from pool. If a response
  414. * is not requesed the Fib will just be deallocaed by the DPC
  415. * routine when the response comes back from the adapter. No
  416. * further processing will be done besides deleting the Fib. We
  417. * will have a debug mode where the adapter can notify the host
  418. * it had a problem and the host can log that fact.
  419. */
  420. if (wait && !reply) {
  421. return -EINVAL;
  422. } else if (!wait && reply) {
  423. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  424. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  425. } else if (!wait && !reply) {
  426. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  427. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  428. } else if (wait && reply) {
  429. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  430. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  431. }
  432. /*
  433. * Map the fib into 32bits by using the fib number
  434. */
  435. hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
  436. hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
  437. /*
  438. * Set FIB state to indicate where it came from and if we want a
  439. * response from the adapter. Also load the command from the
  440. * caller.
  441. *
  442. * Map the hw fib pointer as a 32bit value
  443. */
  444. hw_fib->header.Command = cpu_to_le16(command);
  445. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  446. fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
  447. /*
  448. * Set the size of the Fib we want to send to the adapter
  449. */
  450. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  451. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  452. return -EMSGSIZE;
  453. }
  454. /*
  455. * Get a queue entry connect the FIB to it and send an notify
  456. * the adapter a command is ready.
  457. */
  458. if (priority == FsaHigh) {
  459. hw_fib->header.XferState |= cpu_to_le32(HighPriority);
  460. qid = AdapHighCmdQueue;
  461. } else {
  462. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  463. qid = AdapNormCmdQueue;
  464. }
  465. q = &dev->queues->queue[qid];
  466. if(wait)
  467. spin_lock_irqsave(&fibptr->event_lock, flags);
  468. if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
  469. return -EWOULDBLOCK;
  470. dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
  471. dprintk((KERN_DEBUG "Fib contents:.\n"));
  472. dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
  473. dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
  474. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
  475. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  476. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  477. /*
  478. * Fill in the Callback and CallbackContext if we are not
  479. * going to wait.
  480. */
  481. if (!wait) {
  482. fibptr->callback = callback;
  483. fibptr->callback_data = callback_data;
  484. }
  485. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  486. list_add_tail(&fibptr->queue, &q->pendingq);
  487. q->numpending++;
  488. fibptr->done = 0;
  489. fibptr->flags = 0;
  490. if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
  491. return -EWOULDBLOCK;
  492. /*
  493. * If the caller wanted us to wait for response wait now.
  494. */
  495. if (wait) {
  496. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  497. down(&fibptr->event_wait);
  498. if(fibptr->done == 0)
  499. BUG();
  500. if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
  501. return -ETIMEDOUT;
  502. } else {
  503. return 0;
  504. }
  505. }
  506. /*
  507. * If the user does not want a response than return success otherwise
  508. * return pending
  509. */
  510. if (reply)
  511. return -EINPROGRESS;
  512. else
  513. return 0;
  514. }
  515. /**
  516. * aac_consumer_get - get the top of the queue
  517. * @dev: Adapter
  518. * @q: Queue
  519. * @entry: Return entry
  520. *
  521. * Will return a pointer to the entry on the top of the queue requested that
  522. * we are a consumer of, and return the address of the queue entry. It does
  523. * not change the state of the queue.
  524. */
  525. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  526. {
  527. u32 index;
  528. int status;
  529. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  530. status = 0;
  531. } else {
  532. /*
  533. * The consumer index must be wrapped if we have reached
  534. * the end of the queue, else we just use the entry
  535. * pointed to by the header index
  536. */
  537. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  538. index = 0;
  539. else
  540. index = le32_to_cpu(*q->headers.consumer);
  541. *entry = q->base + index;
  542. status = 1;
  543. }
  544. return(status);
  545. }
  546. /**
  547. * aac_consumer_free - free consumer entry
  548. * @dev: Adapter
  549. * @q: Queue
  550. * @qid: Queue ident
  551. *
  552. * Frees up the current top of the queue we are a consumer of. If the
  553. * queue was full notify the producer that the queue is no longer full.
  554. */
  555. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  556. {
  557. int wasfull = 0;
  558. u32 notify;
  559. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  560. wasfull = 1;
  561. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  562. *q->headers.consumer = cpu_to_le32(1);
  563. else
  564. *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
  565. if (wasfull) {
  566. switch (qid) {
  567. case HostNormCmdQueue:
  568. notify = HostNormCmdNotFull;
  569. break;
  570. case HostHighCmdQueue:
  571. notify = HostHighCmdNotFull;
  572. break;
  573. case HostNormRespQueue:
  574. notify = HostNormRespNotFull;
  575. break;
  576. case HostHighRespQueue:
  577. notify = HostHighRespNotFull;
  578. break;
  579. default:
  580. BUG();
  581. return;
  582. }
  583. aac_adapter_notify(dev, notify);
  584. }
  585. }
  586. /**
  587. * fib_adapter_complete - complete adapter issued fib
  588. * @fibptr: fib to complete
  589. * @size: size of fib
  590. *
  591. * Will do all necessary work to complete a FIB that was sent from
  592. * the adapter.
  593. */
  594. int fib_adapter_complete(struct fib * fibptr, unsigned short size)
  595. {
  596. struct hw_fib * hw_fib = fibptr->hw_fib;
  597. struct aac_dev * dev = fibptr->dev;
  598. unsigned long nointr = 0;
  599. if (hw_fib->header.XferState == 0)
  600. return 0;
  601. /*
  602. * If we plan to do anything check the structure type first.
  603. */
  604. if ( hw_fib->header.StructType != FIB_MAGIC ) {
  605. return -EINVAL;
  606. }
  607. /*
  608. * This block handles the case where the adapter had sent us a
  609. * command and we have finished processing the command. We
  610. * call completeFib when we are done processing the command
  611. * and want to send a response back to the adapter. This will
  612. * send the completed cdb to the adapter.
  613. */
  614. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  615. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  616. if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
  617. u32 index;
  618. if (size)
  619. {
  620. size += sizeof(struct aac_fibhdr);
  621. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  622. return -EMSGSIZE;
  623. hw_fib->header.Size = cpu_to_le16(size);
  624. }
  625. if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
  626. return -EWOULDBLOCK;
  627. }
  628. if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
  629. }
  630. } else if (hw_fib->header.XferState &
  631. cpu_to_le32(NormalPriority)) {
  632. u32 index;
  633. if (size) {
  634. size += sizeof(struct aac_fibhdr);
  635. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  636. return -EMSGSIZE;
  637. hw_fib->header.Size = cpu_to_le16(size);
  638. }
  639. if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
  640. return -EWOULDBLOCK;
  641. if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
  642. {
  643. }
  644. }
  645. }
  646. else
  647. {
  648. printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
  649. BUG();
  650. }
  651. return 0;
  652. }
  653. /**
  654. * fib_complete - fib completion handler
  655. * @fib: FIB to complete
  656. *
  657. * Will do all necessary work to complete a FIB.
  658. */
  659. int fib_complete(struct fib * fibptr)
  660. {
  661. struct hw_fib * hw_fib = fibptr->hw_fib;
  662. /*
  663. * Check for a fib which has already been completed
  664. */
  665. if (hw_fib->header.XferState == 0)
  666. return 0;
  667. /*
  668. * If we plan to do anything check the structure type first.
  669. */
  670. if (hw_fib->header.StructType != FIB_MAGIC)
  671. return -EINVAL;
  672. /*
  673. * This block completes a cdb which orginated on the host and we
  674. * just need to deallocate the cdb or reinit it. At this point the
  675. * command is complete that we had sent to the adapter and this
  676. * cdb could be reused.
  677. */
  678. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  679. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  680. {
  681. fib_dealloc(fibptr);
  682. }
  683. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  684. {
  685. /*
  686. * This handles the case when the host has aborted the I/O
  687. * to the adapter because the adapter is not responding
  688. */
  689. fib_dealloc(fibptr);
  690. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  691. fib_dealloc(fibptr);
  692. } else {
  693. BUG();
  694. }
  695. return 0;
  696. }
  697. /**
  698. * aac_printf - handle printf from firmware
  699. * @dev: Adapter
  700. * @val: Message info
  701. *
  702. * Print a message passed to us by the controller firmware on the
  703. * Adaptec board
  704. */
  705. void aac_printf(struct aac_dev *dev, u32 val)
  706. {
  707. char *cp = dev->printfbuf;
  708. if (dev->printf_enabled)
  709. {
  710. int length = val & 0xffff;
  711. int level = (val >> 16) & 0xffff;
  712. /*
  713. * The size of the printfbuf is set in port.c
  714. * There is no variable or define for it
  715. */
  716. if (length > 255)
  717. length = 255;
  718. if (cp[length] != 0)
  719. cp[length] = 0;
  720. if (level == LOG_AAC_HIGH_ERROR)
  721. printk(KERN_WARNING "aacraid:%s", cp);
  722. else
  723. printk(KERN_INFO "aacraid:%s", cp);
  724. }
  725. memset(cp, 0, 256);
  726. }
  727. /**
  728. * aac_command_thread - command processing thread
  729. * @dev: Adapter to monitor
  730. *
  731. * Waits on the commandready event in it's queue. When the event gets set
  732. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  733. * until the queue is empty. When the queue is empty it will wait for
  734. * more FIBs.
  735. */
  736. int aac_command_thread(struct aac_dev * dev)
  737. {
  738. struct hw_fib *hw_fib, *hw_newfib;
  739. struct fib *fib, *newfib;
  740. struct aac_queue_block *queues = dev->queues;
  741. struct aac_fib_context *fibctx;
  742. unsigned long flags;
  743. DECLARE_WAITQUEUE(wait, current);
  744. /*
  745. * We can only have one thread per adapter for AIF's.
  746. */
  747. if (dev->aif_thread)
  748. return -EINVAL;
  749. /*
  750. * Set up the name that will appear in 'ps'
  751. * stored in task_struct.comm[16].
  752. */
  753. daemonize("aacraid");
  754. allow_signal(SIGKILL);
  755. /*
  756. * Let the DPC know it has a place to send the AIF's to.
  757. */
  758. dev->aif_thread = 1;
  759. add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  760. set_current_state(TASK_INTERRUPTIBLE);
  761. while(1)
  762. {
  763. spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  764. while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
  765. struct list_head *entry;
  766. struct aac_aifcmd * aifcmd;
  767. set_current_state(TASK_RUNNING);
  768. entry = queues->queue[HostNormCmdQueue].cmdq.next;
  769. list_del(entry);
  770. spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  771. fib = list_entry(entry, struct fib, fiblink);
  772. /*
  773. * We will process the FIB here or pass it to a
  774. * worker thread that is TBD. We Really can't
  775. * do anything at this point since we don't have
  776. * anything defined for this thread to do.
  777. */
  778. hw_fib = fib->hw_fib;
  779. memset(fib, 0, sizeof(struct fib));
  780. fib->type = FSAFS_NTC_FIB_CONTEXT;
  781. fib->size = sizeof( struct fib );
  782. fib->hw_fib = hw_fib;
  783. fib->data = hw_fib->data;
  784. fib->dev = dev;
  785. /*
  786. * We only handle AifRequest fibs from the adapter.
  787. */
  788. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  789. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  790. /* Handle Driver Notify Events */
  791. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  792. fib_adapter_complete(fib, (u16)sizeof(u32));
  793. } else {
  794. struct list_head *entry;
  795. /* The u32 here is important and intended. We are using
  796. 32bit wrapping time to fit the adapter field */
  797. u32 time_now, time_last;
  798. unsigned long flagv;
  799. time_now = jiffies/HZ;
  800. spin_lock_irqsave(&dev->fib_lock, flagv);
  801. entry = dev->fib_list.next;
  802. /*
  803. * For each Context that is on the
  804. * fibctxList, make a copy of the
  805. * fib, and then set the event to wake up the
  806. * thread that is waiting for it.
  807. */
  808. while (entry != &dev->fib_list) {
  809. /*
  810. * Extract the fibctx
  811. */
  812. fibctx = list_entry(entry, struct aac_fib_context, next);
  813. /*
  814. * Check if the queue is getting
  815. * backlogged
  816. */
  817. if (fibctx->count > 20)
  818. {
  819. /*
  820. * It's *not* jiffies folks,
  821. * but jiffies / HZ so do not
  822. * panic ...
  823. */
  824. time_last = fibctx->jiffies;
  825. /*
  826. * Has it been > 2 minutes
  827. * since the last read off
  828. * the queue?
  829. */
  830. if ((time_now - time_last) > 120) {
  831. entry = entry->next;
  832. aac_close_fib_context(dev, fibctx);
  833. continue;
  834. }
  835. }
  836. /*
  837. * Warning: no sleep allowed while
  838. * holding spinlock
  839. */
  840. hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  841. newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
  842. if (newfib && hw_newfib) {
  843. /*
  844. * Make the copy of the FIB
  845. */
  846. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  847. memcpy(newfib, fib, sizeof(struct fib));
  848. newfib->hw_fib = hw_newfib;
  849. /*
  850. * Put the FIB onto the
  851. * fibctx's fibs
  852. */
  853. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  854. fibctx->count++;
  855. /*
  856. * Set the event to wake up the
  857. * thread that will waiting.
  858. */
  859. up(&fibctx->wait_sem);
  860. } else {
  861. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  862. if(newfib)
  863. kfree(newfib);
  864. if(hw_newfib)
  865. kfree(hw_newfib);
  866. }
  867. entry = entry->next;
  868. }
  869. /*
  870. * Set the status of this FIB
  871. */
  872. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  873. fib_adapter_complete(fib, sizeof(u32));
  874. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  875. }
  876. spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  877. kfree(fib);
  878. }
  879. /*
  880. * There are no more AIF's
  881. */
  882. spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  883. schedule();
  884. if(signal_pending(current))
  885. break;
  886. set_current_state(TASK_INTERRUPTIBLE);
  887. }
  888. remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  889. dev->aif_thread = 0;
  890. complete_and_exit(&dev->aif_completion, 0);
  891. }