commctrl.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; see the file COPYING. If not, write to
  22. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. * Module Name:
  25. * commctrl.c
  26. *
  27. * Abstract: Contains all routines for control of the AFA comm layer
  28. *
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/init.h>
  32. #include <linux/types.h>
  33. #include <linux/sched.h>
  34. #include <linux/pci.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/slab.h>
  37. #include <linux/completion.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/blkdev.h>
  40. #include <asm/semaphore.h>
  41. #include <asm/uaccess.h>
  42. #include "aacraid.h"
  43. /**
  44. * ioctl_send_fib - send a FIB from userspace
  45. * @dev: adapter is being processed
  46. * @arg: arguments to the ioctl call
  47. *
  48. * This routine sends a fib to the adapter on behalf of a user level
  49. * program.
  50. */
  51. static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
  52. {
  53. struct hw_fib * kfib;
  54. struct fib *fibptr;
  55. fibptr = fib_alloc(dev);
  56. if(fibptr == NULL)
  57. return -ENOMEM;
  58. kfib = fibptr->hw_fib;
  59. /*
  60. * First copy in the header so that we can check the size field.
  61. */
  62. if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
  63. fib_free(fibptr);
  64. return -EFAULT;
  65. }
  66. /*
  67. * Since we copy based on the fib header size, make sure that we
  68. * will not overrun the buffer when we copy the memory. Return
  69. * an error if we would.
  70. */
  71. if (le16_to_cpu(kfib->header.Size) >
  72. sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
  73. fib_free(fibptr);
  74. return -EINVAL;
  75. }
  76. if (copy_from_user(kfib, arg, le16_to_cpu(kfib->header.Size) +
  77. sizeof(struct aac_fibhdr))) {
  78. fib_free(fibptr);
  79. return -EFAULT;
  80. }
  81. if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
  82. aac_adapter_interrupt(dev);
  83. /*
  84. * Since we didn't really send a fib, zero out the state to allow
  85. * cleanup code not to assert.
  86. */
  87. kfib->header.XferState = 0;
  88. } else {
  89. int retval = fib_send(kfib->header.Command, fibptr,
  90. le16_to_cpu(kfib->header.Size) , FsaNormal,
  91. 1, 1, NULL, NULL);
  92. if (retval) {
  93. fib_free(fibptr);
  94. return retval;
  95. }
  96. if (fib_complete(fibptr) != 0) {
  97. fib_free(fibptr);
  98. return -EINVAL;
  99. }
  100. }
  101. /*
  102. * Make sure that the size returned by the adapter (which includes
  103. * the header) is less than or equal to the size of a fib, so we
  104. * don't corrupt application data. Then copy that size to the user
  105. * buffer. (Don't try to add the header information again, since it
  106. * was already included by the adapter.)
  107. */
  108. if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
  109. fib_free(fibptr);
  110. return -EFAULT;
  111. }
  112. fib_free(fibptr);
  113. return 0;
  114. }
  115. /**
  116. * open_getadapter_fib - Get the next fib
  117. *
  118. * This routine will get the next Fib, if available, from the AdapterFibContext
  119. * passed in from the user.
  120. */
  121. static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
  122. {
  123. struct aac_fib_context * fibctx;
  124. int status;
  125. fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
  126. if (fibctx == NULL) {
  127. status = -ENOMEM;
  128. } else {
  129. unsigned long flags;
  130. struct list_head * entry;
  131. struct aac_fib_context * context;
  132. fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
  133. fibctx->size = sizeof(struct aac_fib_context);
  134. /*
  135. * Yes yes, I know this could be an index, but we have a
  136. * better guarantee of uniqueness for the locked loop below.
  137. * Without the aid of a persistent history, this also helps
  138. * reduce the chance that the opaque context would be reused.
  139. */
  140. fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
  141. /*
  142. * Initialize the mutex used to wait for the next AIF.
  143. */
  144. init_MUTEX_LOCKED(&fibctx->wait_sem);
  145. fibctx->wait = 0;
  146. /*
  147. * Initialize the fibs and set the count of fibs on
  148. * the list to 0.
  149. */
  150. fibctx->count = 0;
  151. INIT_LIST_HEAD(&fibctx->fib_list);
  152. fibctx->jiffies = jiffies/HZ;
  153. /*
  154. * Now add this context onto the adapter's
  155. * AdapterFibContext list.
  156. */
  157. spin_lock_irqsave(&dev->fib_lock, flags);
  158. /* Ensure that we have a unique identifier */
  159. entry = dev->fib_list.next;
  160. while (entry != &dev->fib_list) {
  161. context = list_entry(entry, struct aac_fib_context, next);
  162. if (context->unique == fibctx->unique) {
  163. /* Not unique (32 bits) */
  164. fibctx->unique++;
  165. entry = dev->fib_list.next;
  166. } else {
  167. entry = entry->next;
  168. }
  169. }
  170. list_add_tail(&fibctx->next, &dev->fib_list);
  171. spin_unlock_irqrestore(&dev->fib_lock, flags);
  172. if (copy_to_user(arg, &fibctx->unique,
  173. sizeof(fibctx->unique))) {
  174. status = -EFAULT;
  175. } else {
  176. status = 0;
  177. }
  178. }
  179. return status;
  180. }
  181. /**
  182. * next_getadapter_fib - get the next fib
  183. * @dev: adapter to use
  184. * @arg: ioctl argument
  185. *
  186. * This routine will get the next Fib, if available, from the AdapterFibContext
  187. * passed in from the user.
  188. */
  189. static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
  190. {
  191. struct fib_ioctl f;
  192. struct fib *fib;
  193. struct aac_fib_context *fibctx;
  194. int status;
  195. struct list_head * entry;
  196. unsigned long flags;
  197. if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
  198. return -EFAULT;
  199. /*
  200. * Verify that the HANDLE passed in was a valid AdapterFibContext
  201. *
  202. * Search the list of AdapterFibContext addresses on the adapter
  203. * to be sure this is a valid address
  204. */
  205. entry = dev->fib_list.next;
  206. fibctx = NULL;
  207. while (entry != &dev->fib_list) {
  208. fibctx = list_entry(entry, struct aac_fib_context, next);
  209. /*
  210. * Extract the AdapterFibContext from the Input parameters.
  211. */
  212. if (fibctx->unique == f.fibctx) { /* We found a winner */
  213. break;
  214. }
  215. entry = entry->next;
  216. fibctx = NULL;
  217. }
  218. if (!fibctx) {
  219. dprintk ((KERN_INFO "Fib Context not found\n"));
  220. return -EINVAL;
  221. }
  222. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  223. (fibctx->size != sizeof(struct aac_fib_context))) {
  224. dprintk ((KERN_INFO "Fib Context corrupt?\n"));
  225. return -EINVAL;
  226. }
  227. status = 0;
  228. spin_lock_irqsave(&dev->fib_lock, flags);
  229. /*
  230. * If there are no fibs to send back, then either wait or return
  231. * -EAGAIN
  232. */
  233. return_fib:
  234. if (!list_empty(&fibctx->fib_list)) {
  235. struct list_head * entry;
  236. /*
  237. * Pull the next fib from the fibs
  238. */
  239. entry = fibctx->fib_list.next;
  240. list_del(entry);
  241. fib = list_entry(entry, struct fib, fiblink);
  242. fibctx->count--;
  243. spin_unlock_irqrestore(&dev->fib_lock, flags);
  244. if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
  245. kfree(fib->hw_fib);
  246. kfree(fib);
  247. return -EFAULT;
  248. }
  249. /*
  250. * Free the space occupied by this copy of the fib.
  251. */
  252. kfree(fib->hw_fib);
  253. kfree(fib);
  254. status = 0;
  255. fibctx->jiffies = jiffies/HZ;
  256. } else {
  257. spin_unlock_irqrestore(&dev->fib_lock, flags);
  258. if (f.wait) {
  259. if(down_interruptible(&fibctx->wait_sem) < 0) {
  260. status = -EINTR;
  261. } else {
  262. /* Lock again and retry */
  263. spin_lock_irqsave(&dev->fib_lock, flags);
  264. goto return_fib;
  265. }
  266. } else {
  267. status = -EAGAIN;
  268. }
  269. }
  270. return status;
  271. }
  272. int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
  273. {
  274. struct fib *fib;
  275. /*
  276. * First free any FIBs that have not been consumed.
  277. */
  278. while (!list_empty(&fibctx->fib_list)) {
  279. struct list_head * entry;
  280. /*
  281. * Pull the next fib from the fibs
  282. */
  283. entry = fibctx->fib_list.next;
  284. list_del(entry);
  285. fib = list_entry(entry, struct fib, fiblink);
  286. fibctx->count--;
  287. /*
  288. * Free the space occupied by this copy of the fib.
  289. */
  290. kfree(fib->hw_fib);
  291. kfree(fib);
  292. }
  293. /*
  294. * Remove the Context from the AdapterFibContext List
  295. */
  296. list_del(&fibctx->next);
  297. /*
  298. * Invalidate context
  299. */
  300. fibctx->type = 0;
  301. /*
  302. * Free the space occupied by the Context
  303. */
  304. kfree(fibctx);
  305. return 0;
  306. }
  307. /**
  308. * close_getadapter_fib - close down user fib context
  309. * @dev: adapter
  310. * @arg: ioctl arguments
  311. *
  312. * This routine will close down the fibctx passed in from the user.
  313. */
  314. static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
  315. {
  316. struct aac_fib_context *fibctx;
  317. int status;
  318. unsigned long flags;
  319. struct list_head * entry;
  320. /*
  321. * Verify that the HANDLE passed in was a valid AdapterFibContext
  322. *
  323. * Search the list of AdapterFibContext addresses on the adapter
  324. * to be sure this is a valid address
  325. */
  326. entry = dev->fib_list.next;
  327. fibctx = NULL;
  328. while(entry != &dev->fib_list) {
  329. fibctx = list_entry(entry, struct aac_fib_context, next);
  330. /*
  331. * Extract the fibctx from the input parameters
  332. */
  333. if (fibctx->unique == (u32)(unsigned long)arg) {
  334. /* We found a winner */
  335. break;
  336. }
  337. entry = entry->next;
  338. fibctx = NULL;
  339. }
  340. if (!fibctx)
  341. return 0; /* Already gone */
  342. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  343. (fibctx->size != sizeof(struct aac_fib_context)))
  344. return -EINVAL;
  345. spin_lock_irqsave(&dev->fib_lock, flags);
  346. status = aac_close_fib_context(dev, fibctx);
  347. spin_unlock_irqrestore(&dev->fib_lock, flags);
  348. return status;
  349. }
  350. /**
  351. * check_revision - close down user fib context
  352. * @dev: adapter
  353. * @arg: ioctl arguments
  354. *
  355. * This routine returns the driver version.
  356. * Under Linux, there have been no version incompatibilities, so this is
  357. * simple!
  358. */
  359. static int check_revision(struct aac_dev *dev, void __user *arg)
  360. {
  361. struct revision response;
  362. response.compat = 1;
  363. response.version = dev->adapter_info.kernelrev;
  364. response.build = dev->adapter_info.kernelbuild;
  365. if (copy_to_user(arg, &response, sizeof(response)))
  366. return -EFAULT;
  367. return 0;
  368. }
  369. /**
  370. *
  371. * aac_send_raw_scb
  372. *
  373. */
  374. static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  375. {
  376. struct fib* srbfib;
  377. int status;
  378. struct aac_srb *srbcmd;
  379. struct aac_srb __user *user_srb = arg;
  380. struct aac_srb_reply __user *user_reply;
  381. struct aac_srb_reply* reply;
  382. u32 fibsize = 0;
  383. u32 flags = 0;
  384. s32 rcode = 0;
  385. u32 data_dir;
  386. void __user *sg_user[32];
  387. void *sg_list[32];
  388. u32 sg_indx = 0;
  389. u32 byte_count = 0;
  390. u32 actual_fibsize = 0;
  391. int i;
  392. if (!capable(CAP_SYS_ADMIN)){
  393. printk(KERN_DEBUG"aacraid: No permission to send raw srb\n");
  394. return -EPERM;
  395. }
  396. /*
  397. * Allocate and initialize a Fib then setup a BlockWrite command
  398. */
  399. if (!(srbfib = fib_alloc(dev))) {
  400. return -1;
  401. }
  402. fib_init(srbfib);
  403. srbcmd = (struct aac_srb*) fib_data(srbfib);
  404. if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
  405. printk(KERN_DEBUG"aacraid: Could not copy data size from user\n");
  406. rcode = -EFAULT;
  407. goto cleanup;
  408. }
  409. if (fibsize > FIB_DATA_SIZE_IN_BYTES) {
  410. rcode = -EINVAL;
  411. goto cleanup;
  412. }
  413. if(copy_from_user(srbcmd, user_srb,fibsize)){
  414. printk(KERN_DEBUG"aacraid: Could not copy srb from user\n");
  415. rcode = -EFAULT;
  416. goto cleanup;
  417. }
  418. user_reply = arg+fibsize;
  419. flags = srbcmd->flags;
  420. // Fix up srb for endian and force some values
  421. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
  422. srbcmd->channel = cpu_to_le32(srbcmd->channel);
  423. srbcmd->id = cpu_to_le32(srbcmd->id);
  424. srbcmd->lun = cpu_to_le32(srbcmd->lun);
  425. srbcmd->flags = cpu_to_le32(srbcmd->flags);
  426. srbcmd->timeout = cpu_to_le32(srbcmd->timeout);
  427. srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
  428. srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size);
  429. switch (srbcmd->flags & (SRB_DataIn | SRB_DataOut)) {
  430. case SRB_DataOut:
  431. data_dir = DMA_TO_DEVICE;
  432. break;
  433. case (SRB_DataIn | SRB_DataOut):
  434. data_dir = DMA_BIDIRECTIONAL;
  435. break;
  436. case SRB_DataIn:
  437. data_dir = DMA_FROM_DEVICE;
  438. break;
  439. default:
  440. data_dir = DMA_NONE;
  441. }
  442. if (dev->dac_support == 1) {
  443. struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
  444. byte_count = 0;
  445. /*
  446. * This should also catch if user used the 32 bit sgmap
  447. */
  448. actual_fibsize = sizeof(struct aac_srb) -
  449. sizeof(struct sgentry) + ((srbcmd->sg.count & 0xff) *
  450. sizeof(struct sgentry64));
  451. if(actual_fibsize != fibsize){ // User made a mistake - should not continue
  452. printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
  453. rcode = -EINVAL;
  454. goto cleanup;
  455. }
  456. if ((data_dir == DMA_NONE) && psg->count) {
  457. printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
  458. rcode = -EINVAL;
  459. goto cleanup;
  460. }
  461. for (i = 0; i < psg->count; i++) {
  462. dma_addr_t addr;
  463. u64 le_addr;
  464. void* p;
  465. p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA);
  466. if(p == 0) {
  467. printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  468. psg->sg[i].count,i,psg->count);
  469. rcode = -ENOMEM;
  470. goto cleanup;
  471. }
  472. sg_user[i] = (void __user *)psg->sg[i].addr;
  473. sg_list[i] = p; // save so we can clean up later
  474. sg_indx = i;
  475. if( flags & SRB_DataOut ){
  476. if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
  477. printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
  478. rcode = -EFAULT;
  479. goto cleanup;
  480. }
  481. }
  482. addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
  483. le_addr = cpu_to_le64(addr);
  484. psg->sg[i].addr[1] = (u32)(le_addr>>32);
  485. psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
  486. psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
  487. byte_count += psg->sg[i].count;
  488. }
  489. srbcmd->count = cpu_to_le32(byte_count);
  490. status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
  491. } else {
  492. struct sgmap* psg = &srbcmd->sg;
  493. byte_count = 0;
  494. actual_fibsize = sizeof (struct aac_srb) +
  495. (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
  496. sizeof (struct sgentry));
  497. if(actual_fibsize != fibsize){ // User made a mistake - should not continue
  498. printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
  499. rcode = -EINVAL;
  500. goto cleanup;
  501. }
  502. if ((data_dir == DMA_NONE) && psg->count) {
  503. printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
  504. rcode = -EINVAL;
  505. goto cleanup;
  506. }
  507. for (i = 0; i < psg->count; i++) {
  508. dma_addr_t addr;
  509. void* p;
  510. p = kmalloc(psg->sg[i].count,GFP_KERNEL);
  511. if(p == 0) {
  512. printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  513. psg->sg[i].count,i,psg->count);
  514. rcode = -ENOMEM;
  515. goto cleanup;
  516. }
  517. sg_user[i] = (void __user *)(psg->sg[i].addr);
  518. sg_list[i] = p; // save so we can clean up later
  519. sg_indx = i;
  520. if( flags & SRB_DataOut ){
  521. if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
  522. printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
  523. rcode = -EFAULT;
  524. goto cleanup;
  525. }
  526. }
  527. addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
  528. psg->sg[i].addr = cpu_to_le32(addr);
  529. psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
  530. byte_count += psg->sg[i].count;
  531. }
  532. srbcmd->count = cpu_to_le32(byte_count);
  533. status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
  534. }
  535. if (status != 0){
  536. printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n");
  537. rcode = -1;
  538. goto cleanup;
  539. }
  540. if( flags & SRB_DataIn ) {
  541. for(i = 0 ; i <= sg_indx; i++){
  542. if(copy_to_user(sg_user[i],sg_list[i],le32_to_cpu(srbcmd->sg.sg[i].count))){
  543. printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n");
  544. rcode = -EFAULT;
  545. goto cleanup;
  546. }
  547. }
  548. }
  549. reply = (struct aac_srb_reply *) fib_data(srbfib);
  550. if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
  551. printk(KERN_DEBUG"aacraid: Could not copy reply to user\n");
  552. rcode = -EFAULT;
  553. goto cleanup;
  554. }
  555. cleanup:
  556. for(i=0; i <= sg_indx; i++){
  557. kfree(sg_list[i]);
  558. }
  559. fib_complete(srbfib);
  560. fib_free(srbfib);
  561. return rcode;
  562. }
  563. struct aac_pci_info {
  564. u32 bus;
  565. u32 slot;
  566. };
  567. static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
  568. {
  569. struct aac_pci_info pci_info;
  570. pci_info.bus = dev->pdev->bus->number;
  571. pci_info.slot = PCI_SLOT(dev->pdev->devfn);
  572. if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
  573. printk(KERN_DEBUG "aacraid: Could not copy pci info\n");
  574. return -EFAULT;
  575. }
  576. return 0;
  577. }
  578. int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
  579. {
  580. int status;
  581. /*
  582. * HBA gets first crack
  583. */
  584. status = aac_dev_ioctl(dev, cmd, arg);
  585. if(status != -ENOTTY)
  586. return status;
  587. switch (cmd) {
  588. case FSACTL_MINIPORT_REV_CHECK:
  589. status = check_revision(dev, arg);
  590. break;
  591. case FSACTL_SENDFIB:
  592. status = ioctl_send_fib(dev, arg);
  593. break;
  594. case FSACTL_OPEN_GET_ADAPTER_FIB:
  595. status = open_getadapter_fib(dev, arg);
  596. break;
  597. case FSACTL_GET_NEXT_ADAPTER_FIB:
  598. status = next_getadapter_fib(dev, arg);
  599. break;
  600. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  601. status = close_getadapter_fib(dev, arg);
  602. break;
  603. case FSACTL_SEND_RAW_SRB:
  604. status = aac_send_raw_srb(dev,arg);
  605. break;
  606. case FSACTL_GET_PCI_INFO:
  607. status = aac_get_pci_info(dev,arg);
  608. break;
  609. default:
  610. status = -ENOTTY;
  611. break;
  612. }
  613. return status;
  614. }