cciss.c 99 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543
  1. /*
  2. * Disk Array driver for HP SA 5xxx and 6xxx Controllers
  3. * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Questions/Comments/Bugfixes to iss_storagedev@hp.com
  20. *
  21. */
  22. #include <linux/module.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/pci.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/delay.h>
  29. #include <linux/major.h>
  30. #include <linux/fs.h>
  31. #include <linux/bio.h>
  32. #include <linux/blkpg.h>
  33. #include <linux/timer.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/init.h>
  36. #include <linux/hdreg.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/compat.h>
  39. #include <linux/blktrace_api.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/io.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/blkdev.h>
  44. #include <linux/genhd.h>
  45. #include <linux/completion.h>
  46. #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
  47. #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
  48. #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
  49. /* Embedded module documentation macros - see modules.h */
  50. MODULE_AUTHOR("Hewlett-Packard Company");
  51. MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
  52. MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
  53. " SA6i P600 P800 P400 P400i E200 E200i E500");
  54. MODULE_VERSION("3.6.14");
  55. MODULE_LICENSE("GPL");
  56. #include "cciss_cmd.h"
  57. #include "cciss.h"
  58. #include <linux/cciss_ioctl.h>
  59. /* define the PCI info for the cards we can control */
  60. static const struct pci_device_id cciss_pci_device_id[] = {
  61. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
  62. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
  63. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
  64. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
  65. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
  66. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
  67. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
  68. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
  69. {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
  70. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
  71. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
  72. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
  73. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
  74. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
  75. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
  76. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
  77. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
  78. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
  79. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
  80. {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  81. PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  82. {0,}
  83. };
  84. MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
  85. /* board_id = Subsystem Device ID & Vendor ID
  86. * product = Marketing Name for the board
  87. * access = Address of the struct of function pointers
  88. * nr_cmds = Number of commands supported by controller
  89. */
  90. static struct board_type products[] = {
  91. {0x40700E11, "Smart Array 5300", &SA5_access, 512},
  92. {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
  93. {0x40820E11, "Smart Array 532", &SA5B_access, 512},
  94. {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
  95. {0x409A0E11, "Smart Array 641", &SA5_access, 512},
  96. {0x409B0E11, "Smart Array 642", &SA5_access, 512},
  97. {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
  98. {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
  99. {0x40910E11, "Smart Array 6i", &SA5_access, 512},
  100. {0x3225103C, "Smart Array P600", &SA5_access, 512},
  101. {0x3223103C, "Smart Array P800", &SA5_access, 512},
  102. {0x3234103C, "Smart Array P400", &SA5_access, 512},
  103. {0x3235103C, "Smart Array P400i", &SA5_access, 512},
  104. {0x3211103C, "Smart Array E200i", &SA5_access, 120},
  105. {0x3212103C, "Smart Array E200", &SA5_access, 120},
  106. {0x3213103C, "Smart Array E200i", &SA5_access, 120},
  107. {0x3214103C, "Smart Array E200i", &SA5_access, 120},
  108. {0x3215103C, "Smart Array E200i", &SA5_access, 120},
  109. {0x3237103C, "Smart Array E500", &SA5_access, 512},
  110. {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
  111. };
  112. /* How long to wait (in milliseconds) for board to go into simple mode */
  113. #define MAX_CONFIG_WAIT 30000
  114. #define MAX_IOCTL_CONFIG_WAIT 1000
  115. /*define how many times we will try a command because of bus resets */
  116. #define MAX_CMD_RETRIES 3
  117. #define READ_AHEAD 1024
  118. #define MAX_CTLR 32
  119. /* Originally cciss driver only supports 8 major numbers */
  120. #define MAX_CTLR_ORIG 8
  121. static ctlr_info_t *hba[MAX_CTLR];
  122. static void do_cciss_request(request_queue_t *q);
  123. static irqreturn_t do_cciss_intr(int irq, void *dev_id);
  124. static int cciss_open(struct inode *inode, struct file *filep);
  125. static int cciss_release(struct inode *inode, struct file *filep);
  126. static int cciss_ioctl(struct inode *inode, struct file *filep,
  127. unsigned int cmd, unsigned long arg);
  128. static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
  129. static int cciss_revalidate(struct gendisk *disk);
  130. static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
  131. static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
  132. int clear_all);
  133. static void cciss_read_capacity(int ctlr, int logvol, int withirq,
  134. sector_t *total_size, unsigned int *block_size);
  135. static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
  136. sector_t *total_size, unsigned int *block_size);
  137. static void cciss_geometry_inquiry(int ctlr, int logvol,
  138. int withirq, sector_t total_size,
  139. unsigned int block_size, InquiryData_struct *inq_buff,
  140. drive_info_struct *drv);
  141. static void cciss_getgeometry(int cntl_num);
  142. static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
  143. __u32);
  144. static void start_io(ctlr_info_t *h);
  145. static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
  146. unsigned int use_unit_num, unsigned int log_unit,
  147. __u8 page_code, unsigned char *scsi3addr, int cmd_type);
  148. static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
  149. unsigned int use_unit_num, unsigned int log_unit,
  150. __u8 page_code, int cmd_type);
  151. static void fail_all_cmds(unsigned long ctlr);
  152. #ifdef CONFIG_PROC_FS
  153. static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
  154. int length, int *eof, void *data);
  155. static void cciss_procinit(int i);
  156. #else
  157. static void cciss_procinit(int i)
  158. {
  159. }
  160. #endif /* CONFIG_PROC_FS */
  161. #ifdef CONFIG_COMPAT
  162. static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
  163. #endif
  164. static struct block_device_operations cciss_fops = {
  165. .owner = THIS_MODULE,
  166. .open = cciss_open,
  167. .release = cciss_release,
  168. .ioctl = cciss_ioctl,
  169. .getgeo = cciss_getgeo,
  170. #ifdef CONFIG_COMPAT
  171. .compat_ioctl = cciss_compat_ioctl,
  172. #endif
  173. .revalidate_disk = cciss_revalidate,
  174. };
  175. /*
  176. * Enqueuing and dequeuing functions for cmdlists.
  177. */
  178. static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
  179. {
  180. if (*Qptr == NULL) {
  181. *Qptr = c;
  182. c->next = c->prev = c;
  183. } else {
  184. c->prev = (*Qptr)->prev;
  185. c->next = (*Qptr);
  186. (*Qptr)->prev->next = c;
  187. (*Qptr)->prev = c;
  188. }
  189. }
  190. static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
  191. CommandList_struct *c)
  192. {
  193. if (c && c->next != c) {
  194. if (*Qptr == c)
  195. *Qptr = c->next;
  196. c->prev->next = c->next;
  197. c->next->prev = c->prev;
  198. } else {
  199. *Qptr = NULL;
  200. }
  201. return c;
  202. }
  203. #include "cciss_scsi.c" /* For SCSI tape support */
  204. #ifdef CONFIG_PROC_FS
  205. /*
  206. * Report information about this controller.
  207. */
  208. #define ENG_GIG 1000000000
  209. #define ENG_GIG_FACTOR (ENG_GIG/512)
  210. #define RAID_UNKNOWN 6
  211. static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
  212. "UNKNOWN"
  213. };
  214. static struct proc_dir_entry *proc_cciss;
  215. static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
  216. int length, int *eof, void *data)
  217. {
  218. off_t pos = 0;
  219. off_t len = 0;
  220. int size, i, ctlr;
  221. ctlr_info_t *h = (ctlr_info_t *) data;
  222. drive_info_struct *drv;
  223. unsigned long flags;
  224. sector_t vol_sz, vol_sz_frac;
  225. ctlr = h->ctlr;
  226. /* prevent displaying bogus info during configuration
  227. * or deconfiguration of a logical volume
  228. */
  229. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  230. if (h->busy_configuring) {
  231. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  232. return -EBUSY;
  233. }
  234. h->busy_configuring = 1;
  235. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  236. size = sprintf(buffer, "%s: HP %s Controller\n"
  237. "Board ID: 0x%08lx\n"
  238. "Firmware Version: %c%c%c%c\n"
  239. "IRQ: %d\n"
  240. "Logical drives: %d\n"
  241. "Max sectors: %d\n"
  242. "Current Q depth: %d\n"
  243. "Current # commands on controller: %d\n"
  244. "Max Q depth since init: %d\n"
  245. "Max # commands on controller since init: %d\n"
  246. "Max SG entries since init: %d\n\n",
  247. h->devname,
  248. h->product_name,
  249. (unsigned long)h->board_id,
  250. h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
  251. h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
  252. h->num_luns,
  253. h->cciss_max_sectors,
  254. h->Qdepth, h->commands_outstanding,
  255. h->maxQsinceinit, h->max_outstanding, h->maxSG);
  256. pos += size;
  257. len += size;
  258. cciss_proc_tape_report(ctlr, buffer, &pos, &len);
  259. for (i = 0; i <= h->highest_lun; i++) {
  260. drv = &h->drv[i];
  261. if (drv->heads == 0)
  262. continue;
  263. vol_sz = drv->nr_blocks;
  264. vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
  265. vol_sz_frac *= 100;
  266. sector_div(vol_sz_frac, ENG_GIG_FACTOR);
  267. if (drv->raid_level > 5)
  268. drv->raid_level = RAID_UNKNOWN;
  269. size = sprintf(buffer + len, "cciss/c%dd%d:"
  270. "\t%4u.%02uGB\tRAID %s\n",
  271. ctlr, i, (int)vol_sz, (int)vol_sz_frac,
  272. raid_label[drv->raid_level]);
  273. pos += size;
  274. len += size;
  275. }
  276. *eof = 1;
  277. *start = buffer + offset;
  278. len -= offset;
  279. if (len > length)
  280. len = length;
  281. h->busy_configuring = 0;
  282. return len;
  283. }
  284. static int
  285. cciss_proc_write(struct file *file, const char __user *buffer,
  286. unsigned long count, void *data)
  287. {
  288. unsigned char cmd[80];
  289. int len;
  290. #ifdef CONFIG_CISS_SCSI_TAPE
  291. ctlr_info_t *h = (ctlr_info_t *) data;
  292. int rc;
  293. #endif
  294. if (count > sizeof(cmd) - 1)
  295. return -EINVAL;
  296. if (copy_from_user(cmd, buffer, count))
  297. return -EFAULT;
  298. cmd[count] = '\0';
  299. len = strlen(cmd); // above 3 lines ensure safety
  300. if (len && cmd[len - 1] == '\n')
  301. cmd[--len] = '\0';
  302. # ifdef CONFIG_CISS_SCSI_TAPE
  303. if (strcmp("engage scsi", cmd) == 0) {
  304. rc = cciss_engage_scsi(h->ctlr);
  305. if (rc != 0)
  306. return -rc;
  307. return count;
  308. }
  309. /* might be nice to have "disengage" too, but it's not
  310. safely possible. (only 1 module use count, lock issues.) */
  311. # endif
  312. return -EINVAL;
  313. }
  314. /*
  315. * Get us a file in /proc/cciss that says something about each controller.
  316. * Create /proc/cciss if it doesn't exist yet.
  317. */
  318. static void __devinit cciss_procinit(int i)
  319. {
  320. struct proc_dir_entry *pde;
  321. if (proc_cciss == NULL) {
  322. proc_cciss = proc_mkdir("cciss", proc_root_driver);
  323. if (!proc_cciss)
  324. return;
  325. }
  326. pde = create_proc_read_entry(hba[i]->devname,
  327. S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
  328. proc_cciss, cciss_proc_get_info, hba[i]);
  329. pde->write_proc = cciss_proc_write;
  330. }
  331. #endif /* CONFIG_PROC_FS */
  332. /*
  333. * For operations that cannot sleep, a command block is allocated at init,
  334. * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
  335. * which ones are free or in use. For operations that can wait for kmalloc
  336. * to possible sleep, this routine can be called with get_from_pool set to 0.
  337. * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
  338. */
  339. static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
  340. {
  341. CommandList_struct *c;
  342. int i;
  343. u64bit temp64;
  344. dma_addr_t cmd_dma_handle, err_dma_handle;
  345. if (!get_from_pool) {
  346. c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
  347. sizeof(CommandList_struct), &cmd_dma_handle);
  348. if (c == NULL)
  349. return NULL;
  350. memset(c, 0, sizeof(CommandList_struct));
  351. c->cmdindex = -1;
  352. c->err_info = (ErrorInfo_struct *)
  353. pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
  354. &err_dma_handle);
  355. if (c->err_info == NULL) {
  356. pci_free_consistent(h->pdev,
  357. sizeof(CommandList_struct), c, cmd_dma_handle);
  358. return NULL;
  359. }
  360. memset(c->err_info, 0, sizeof(ErrorInfo_struct));
  361. } else { /* get it out of the controllers pool */
  362. do {
  363. i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
  364. if (i == h->nr_cmds)
  365. return NULL;
  366. } while (test_and_set_bit
  367. (i & (BITS_PER_LONG - 1),
  368. h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
  369. #ifdef CCISS_DEBUG
  370. printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
  371. #endif
  372. c = h->cmd_pool + i;
  373. memset(c, 0, sizeof(CommandList_struct));
  374. cmd_dma_handle = h->cmd_pool_dhandle
  375. + i * sizeof(CommandList_struct);
  376. c->err_info = h->errinfo_pool + i;
  377. memset(c->err_info, 0, sizeof(ErrorInfo_struct));
  378. err_dma_handle = h->errinfo_pool_dhandle
  379. + i * sizeof(ErrorInfo_struct);
  380. h->nr_allocs++;
  381. c->cmdindex = i;
  382. }
  383. c->busaddr = (__u32) cmd_dma_handle;
  384. temp64.val = (__u64) err_dma_handle;
  385. c->ErrDesc.Addr.lower = temp64.val32.lower;
  386. c->ErrDesc.Addr.upper = temp64.val32.upper;
  387. c->ErrDesc.Len = sizeof(ErrorInfo_struct);
  388. c->ctlr = h->ctlr;
  389. return c;
  390. }
  391. /*
  392. * Frees a command block that was previously allocated with cmd_alloc().
  393. */
  394. static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
  395. {
  396. int i;
  397. u64bit temp64;
  398. if (!got_from_pool) {
  399. temp64.val32.lower = c->ErrDesc.Addr.lower;
  400. temp64.val32.upper = c->ErrDesc.Addr.upper;
  401. pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
  402. c->err_info, (dma_addr_t) temp64.val);
  403. pci_free_consistent(h->pdev, sizeof(CommandList_struct),
  404. c, (dma_addr_t) c->busaddr);
  405. } else {
  406. i = c - h->cmd_pool;
  407. clear_bit(i & (BITS_PER_LONG - 1),
  408. h->cmd_pool_bits + (i / BITS_PER_LONG));
  409. h->nr_frees++;
  410. }
  411. }
  412. static inline ctlr_info_t *get_host(struct gendisk *disk)
  413. {
  414. return disk->queue->queuedata;
  415. }
  416. static inline drive_info_struct *get_drv(struct gendisk *disk)
  417. {
  418. return disk->private_data;
  419. }
  420. /*
  421. * Open. Make sure the device is really there.
  422. */
  423. static int cciss_open(struct inode *inode, struct file *filep)
  424. {
  425. ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
  426. drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
  427. #ifdef CCISS_DEBUG
  428. printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
  429. #endif /* CCISS_DEBUG */
  430. if (host->busy_initializing || drv->busy_configuring)
  431. return -EBUSY;
  432. /*
  433. * Root is allowed to open raw volume zero even if it's not configured
  434. * so array config can still work. Root is also allowed to open any
  435. * volume that has a LUN ID, so it can issue IOCTL to reread the
  436. * disk information. I don't think I really like this
  437. * but I'm already using way to many device nodes to claim another one
  438. * for "raw controller".
  439. */
  440. if (drv->heads == 0) {
  441. if (iminor(inode) != 0) { /* not node 0? */
  442. /* if not node 0 make sure it is a partition = 0 */
  443. if (iminor(inode) & 0x0f) {
  444. return -ENXIO;
  445. /* if it is, make sure we have a LUN ID */
  446. } else if (drv->LunID == 0) {
  447. return -ENXIO;
  448. }
  449. }
  450. if (!capable(CAP_SYS_ADMIN))
  451. return -EPERM;
  452. }
  453. drv->usage_count++;
  454. host->usage_count++;
  455. return 0;
  456. }
  457. /*
  458. * Close. Sync first.
  459. */
  460. static int cciss_release(struct inode *inode, struct file *filep)
  461. {
  462. ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
  463. drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
  464. #ifdef CCISS_DEBUG
  465. printk(KERN_DEBUG "cciss_release %s\n",
  466. inode->i_bdev->bd_disk->disk_name);
  467. #endif /* CCISS_DEBUG */
  468. drv->usage_count--;
  469. host->usage_count--;
  470. return 0;
  471. }
  472. #ifdef CONFIG_COMPAT
  473. static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
  474. {
  475. int ret;
  476. lock_kernel();
  477. ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
  478. unlock_kernel();
  479. return ret;
  480. }
  481. static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
  482. unsigned long arg);
  483. static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
  484. unsigned long arg);
  485. static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
  486. {
  487. switch (cmd) {
  488. case CCISS_GETPCIINFO:
  489. case CCISS_GETINTINFO:
  490. case CCISS_SETINTINFO:
  491. case CCISS_GETNODENAME:
  492. case CCISS_SETNODENAME:
  493. case CCISS_GETHEARTBEAT:
  494. case CCISS_GETBUSTYPES:
  495. case CCISS_GETFIRMVER:
  496. case CCISS_GETDRIVVER:
  497. case CCISS_REVALIDVOLS:
  498. case CCISS_DEREGDISK:
  499. case CCISS_REGNEWDISK:
  500. case CCISS_REGNEWD:
  501. case CCISS_RESCANDISK:
  502. case CCISS_GETLUNINFO:
  503. return do_ioctl(f, cmd, arg);
  504. case CCISS_PASSTHRU32:
  505. return cciss_ioctl32_passthru(f, cmd, arg);
  506. case CCISS_BIG_PASSTHRU32:
  507. return cciss_ioctl32_big_passthru(f, cmd, arg);
  508. default:
  509. return -ENOIOCTLCMD;
  510. }
  511. }
  512. static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
  513. unsigned long arg)
  514. {
  515. IOCTL32_Command_struct __user *arg32 =
  516. (IOCTL32_Command_struct __user *) arg;
  517. IOCTL_Command_struct arg64;
  518. IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
  519. int err;
  520. u32 cp;
  521. err = 0;
  522. err |=
  523. copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
  524. sizeof(arg64.LUN_info));
  525. err |=
  526. copy_from_user(&arg64.Request, &arg32->Request,
  527. sizeof(arg64.Request));
  528. err |=
  529. copy_from_user(&arg64.error_info, &arg32->error_info,
  530. sizeof(arg64.error_info));
  531. err |= get_user(arg64.buf_size, &arg32->buf_size);
  532. err |= get_user(cp, &arg32->buf);
  533. arg64.buf = compat_ptr(cp);
  534. err |= copy_to_user(p, &arg64, sizeof(arg64));
  535. if (err)
  536. return -EFAULT;
  537. err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
  538. if (err)
  539. return err;
  540. err |=
  541. copy_in_user(&arg32->error_info, &p->error_info,
  542. sizeof(arg32->error_info));
  543. if (err)
  544. return -EFAULT;
  545. return err;
  546. }
  547. static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
  548. unsigned long arg)
  549. {
  550. BIG_IOCTL32_Command_struct __user *arg32 =
  551. (BIG_IOCTL32_Command_struct __user *) arg;
  552. BIG_IOCTL_Command_struct arg64;
  553. BIG_IOCTL_Command_struct __user *p =
  554. compat_alloc_user_space(sizeof(arg64));
  555. int err;
  556. u32 cp;
  557. err = 0;
  558. err |=
  559. copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
  560. sizeof(arg64.LUN_info));
  561. err |=
  562. copy_from_user(&arg64.Request, &arg32->Request,
  563. sizeof(arg64.Request));
  564. err |=
  565. copy_from_user(&arg64.error_info, &arg32->error_info,
  566. sizeof(arg64.error_info));
  567. err |= get_user(arg64.buf_size, &arg32->buf_size);
  568. err |= get_user(arg64.malloc_size, &arg32->malloc_size);
  569. err |= get_user(cp, &arg32->buf);
  570. arg64.buf = compat_ptr(cp);
  571. err |= copy_to_user(p, &arg64, sizeof(arg64));
  572. if (err)
  573. return -EFAULT;
  574. err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
  575. if (err)
  576. return err;
  577. err |=
  578. copy_in_user(&arg32->error_info, &p->error_info,
  579. sizeof(arg32->error_info));
  580. if (err)
  581. return -EFAULT;
  582. return err;
  583. }
  584. #endif
  585. static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  586. {
  587. drive_info_struct *drv = get_drv(bdev->bd_disk);
  588. if (!drv->cylinders)
  589. return -ENXIO;
  590. geo->heads = drv->heads;
  591. geo->sectors = drv->sectors;
  592. geo->cylinders = drv->cylinders;
  593. return 0;
  594. }
  595. /*
  596. * ioctl
  597. */
  598. static int cciss_ioctl(struct inode *inode, struct file *filep,
  599. unsigned int cmd, unsigned long arg)
  600. {
  601. struct block_device *bdev = inode->i_bdev;
  602. struct gendisk *disk = bdev->bd_disk;
  603. ctlr_info_t *host = get_host(disk);
  604. drive_info_struct *drv = get_drv(disk);
  605. int ctlr = host->ctlr;
  606. void __user *argp = (void __user *)arg;
  607. #ifdef CCISS_DEBUG
  608. printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
  609. #endif /* CCISS_DEBUG */
  610. switch (cmd) {
  611. case CCISS_GETPCIINFO:
  612. {
  613. cciss_pci_info_struct pciinfo;
  614. if (!arg)
  615. return -EINVAL;
  616. pciinfo.domain = pci_domain_nr(host->pdev->bus);
  617. pciinfo.bus = host->pdev->bus->number;
  618. pciinfo.dev_fn = host->pdev->devfn;
  619. pciinfo.board_id = host->board_id;
  620. if (copy_to_user
  621. (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
  622. return -EFAULT;
  623. return 0;
  624. }
  625. case CCISS_GETINTINFO:
  626. {
  627. cciss_coalint_struct intinfo;
  628. if (!arg)
  629. return -EINVAL;
  630. intinfo.delay =
  631. readl(&host->cfgtable->HostWrite.CoalIntDelay);
  632. intinfo.count =
  633. readl(&host->cfgtable->HostWrite.CoalIntCount);
  634. if (copy_to_user
  635. (argp, &intinfo, sizeof(cciss_coalint_struct)))
  636. return -EFAULT;
  637. return 0;
  638. }
  639. case CCISS_SETINTINFO:
  640. {
  641. cciss_coalint_struct intinfo;
  642. unsigned long flags;
  643. int i;
  644. if (!arg)
  645. return -EINVAL;
  646. if (!capable(CAP_SYS_ADMIN))
  647. return -EPERM;
  648. if (copy_from_user
  649. (&intinfo, argp, sizeof(cciss_coalint_struct)))
  650. return -EFAULT;
  651. if ((intinfo.delay == 0) && (intinfo.count == 0))
  652. {
  653. // printk("cciss_ioctl: delay and count cannot be 0\n");
  654. return -EINVAL;
  655. }
  656. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  657. /* Update the field, and then ring the doorbell */
  658. writel(intinfo.delay,
  659. &(host->cfgtable->HostWrite.CoalIntDelay));
  660. writel(intinfo.count,
  661. &(host->cfgtable->HostWrite.CoalIntCount));
  662. writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
  663. for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
  664. if (!(readl(host->vaddr + SA5_DOORBELL)
  665. & CFGTBL_ChangeReq))
  666. break;
  667. /* delay and try again */
  668. udelay(1000);
  669. }
  670. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  671. if (i >= MAX_IOCTL_CONFIG_WAIT)
  672. return -EAGAIN;
  673. return 0;
  674. }
  675. case CCISS_GETNODENAME:
  676. {
  677. NodeName_type NodeName;
  678. int i;
  679. if (!arg)
  680. return -EINVAL;
  681. for (i = 0; i < 16; i++)
  682. NodeName[i] =
  683. readb(&host->cfgtable->ServerName[i]);
  684. if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
  685. return -EFAULT;
  686. return 0;
  687. }
  688. case CCISS_SETNODENAME:
  689. {
  690. NodeName_type NodeName;
  691. unsigned long flags;
  692. int i;
  693. if (!arg)
  694. return -EINVAL;
  695. if (!capable(CAP_SYS_ADMIN))
  696. return -EPERM;
  697. if (copy_from_user
  698. (NodeName, argp, sizeof(NodeName_type)))
  699. return -EFAULT;
  700. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  701. /* Update the field, and then ring the doorbell */
  702. for (i = 0; i < 16; i++)
  703. writeb(NodeName[i],
  704. &host->cfgtable->ServerName[i]);
  705. writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
  706. for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
  707. if (!(readl(host->vaddr + SA5_DOORBELL)
  708. & CFGTBL_ChangeReq))
  709. break;
  710. /* delay and try again */
  711. udelay(1000);
  712. }
  713. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  714. if (i >= MAX_IOCTL_CONFIG_WAIT)
  715. return -EAGAIN;
  716. return 0;
  717. }
  718. case CCISS_GETHEARTBEAT:
  719. {
  720. Heartbeat_type heartbeat;
  721. if (!arg)
  722. return -EINVAL;
  723. heartbeat = readl(&host->cfgtable->HeartBeat);
  724. if (copy_to_user
  725. (argp, &heartbeat, sizeof(Heartbeat_type)))
  726. return -EFAULT;
  727. return 0;
  728. }
  729. case CCISS_GETBUSTYPES:
  730. {
  731. BusTypes_type BusTypes;
  732. if (!arg)
  733. return -EINVAL;
  734. BusTypes = readl(&host->cfgtable->BusTypes);
  735. if (copy_to_user
  736. (argp, &BusTypes, sizeof(BusTypes_type)))
  737. return -EFAULT;
  738. return 0;
  739. }
  740. case CCISS_GETFIRMVER:
  741. {
  742. FirmwareVer_type firmware;
  743. if (!arg)
  744. return -EINVAL;
  745. memcpy(firmware, host->firm_ver, 4);
  746. if (copy_to_user
  747. (argp, firmware, sizeof(FirmwareVer_type)))
  748. return -EFAULT;
  749. return 0;
  750. }
  751. case CCISS_GETDRIVVER:
  752. {
  753. DriverVer_type DriverVer = DRIVER_VERSION;
  754. if (!arg)
  755. return -EINVAL;
  756. if (copy_to_user
  757. (argp, &DriverVer, sizeof(DriverVer_type)))
  758. return -EFAULT;
  759. return 0;
  760. }
  761. case CCISS_REVALIDVOLS:
  762. return rebuild_lun_table(host, NULL);
  763. case CCISS_GETLUNINFO:{
  764. LogvolInfo_struct luninfo;
  765. luninfo.LunID = drv->LunID;
  766. luninfo.num_opens = drv->usage_count;
  767. luninfo.num_parts = 0;
  768. if (copy_to_user(argp, &luninfo,
  769. sizeof(LogvolInfo_struct)))
  770. return -EFAULT;
  771. return 0;
  772. }
  773. case CCISS_DEREGDISK:
  774. return rebuild_lun_table(host, disk);
  775. case CCISS_REGNEWD:
  776. return rebuild_lun_table(host, NULL);
  777. case CCISS_PASSTHRU:
  778. {
  779. IOCTL_Command_struct iocommand;
  780. CommandList_struct *c;
  781. char *buff = NULL;
  782. u64bit temp64;
  783. unsigned long flags;
  784. DECLARE_COMPLETION_ONSTACK(wait);
  785. if (!arg)
  786. return -EINVAL;
  787. if (!capable(CAP_SYS_RAWIO))
  788. return -EPERM;
  789. if (copy_from_user
  790. (&iocommand, argp, sizeof(IOCTL_Command_struct)))
  791. return -EFAULT;
  792. if ((iocommand.buf_size < 1) &&
  793. (iocommand.Request.Type.Direction != XFER_NONE)) {
  794. return -EINVAL;
  795. }
  796. #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
  797. /* Check kmalloc limits */
  798. if (iocommand.buf_size > 128000)
  799. return -EINVAL;
  800. #endif
  801. if (iocommand.buf_size > 0) {
  802. buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
  803. if (buff == NULL)
  804. return -EFAULT;
  805. }
  806. if (iocommand.Request.Type.Direction == XFER_WRITE) {
  807. /* Copy the data into the buffer we created */
  808. if (copy_from_user
  809. (buff, iocommand.buf, iocommand.buf_size)) {
  810. kfree(buff);
  811. return -EFAULT;
  812. }
  813. } else {
  814. memset(buff, 0, iocommand.buf_size);
  815. }
  816. if ((c = cmd_alloc(host, 0)) == NULL) {
  817. kfree(buff);
  818. return -ENOMEM;
  819. }
  820. // Fill in the command type
  821. c->cmd_type = CMD_IOCTL_PEND;
  822. // Fill in Command Header
  823. c->Header.ReplyQueue = 0; // unused in simple mode
  824. if (iocommand.buf_size > 0) // buffer to fill
  825. {
  826. c->Header.SGList = 1;
  827. c->Header.SGTotal = 1;
  828. } else // no buffers to fill
  829. {
  830. c->Header.SGList = 0;
  831. c->Header.SGTotal = 0;
  832. }
  833. c->Header.LUN = iocommand.LUN_info;
  834. c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
  835. // Fill in Request block
  836. c->Request = iocommand.Request;
  837. // Fill in the scatter gather information
  838. if (iocommand.buf_size > 0) {
  839. temp64.val = pci_map_single(host->pdev, buff,
  840. iocommand.buf_size,
  841. PCI_DMA_BIDIRECTIONAL);
  842. c->SG[0].Addr.lower = temp64.val32.lower;
  843. c->SG[0].Addr.upper = temp64.val32.upper;
  844. c->SG[0].Len = iocommand.buf_size;
  845. c->SG[0].Ext = 0; // we are not chaining
  846. }
  847. c->waiting = &wait;
  848. /* Put the request on the tail of the request queue */
  849. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  850. addQ(&host->reqQ, c);
  851. host->Qdepth++;
  852. start_io(host);
  853. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  854. wait_for_completion(&wait);
  855. /* unlock the buffers from DMA */
  856. temp64.val32.lower = c->SG[0].Addr.lower;
  857. temp64.val32.upper = c->SG[0].Addr.upper;
  858. pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
  859. iocommand.buf_size,
  860. PCI_DMA_BIDIRECTIONAL);
  861. /* Copy the error information out */
  862. iocommand.error_info = *(c->err_info);
  863. if (copy_to_user
  864. (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
  865. kfree(buff);
  866. cmd_free(host, c, 0);
  867. return -EFAULT;
  868. }
  869. if (iocommand.Request.Type.Direction == XFER_READ) {
  870. /* Copy the data out of the buffer we created */
  871. if (copy_to_user
  872. (iocommand.buf, buff, iocommand.buf_size)) {
  873. kfree(buff);
  874. cmd_free(host, c, 0);
  875. return -EFAULT;
  876. }
  877. }
  878. kfree(buff);
  879. cmd_free(host, c, 0);
  880. return 0;
  881. }
  882. case CCISS_BIG_PASSTHRU:{
  883. BIG_IOCTL_Command_struct *ioc;
  884. CommandList_struct *c;
  885. unsigned char **buff = NULL;
  886. int *buff_size = NULL;
  887. u64bit temp64;
  888. unsigned long flags;
  889. BYTE sg_used = 0;
  890. int status = 0;
  891. int i;
  892. DECLARE_COMPLETION_ONSTACK(wait);
  893. __u32 left;
  894. __u32 sz;
  895. BYTE __user *data_ptr;
  896. if (!arg)
  897. return -EINVAL;
  898. if (!capable(CAP_SYS_RAWIO))
  899. return -EPERM;
  900. ioc = (BIG_IOCTL_Command_struct *)
  901. kmalloc(sizeof(*ioc), GFP_KERNEL);
  902. if (!ioc) {
  903. status = -ENOMEM;
  904. goto cleanup1;
  905. }
  906. if (copy_from_user(ioc, argp, sizeof(*ioc))) {
  907. status = -EFAULT;
  908. goto cleanup1;
  909. }
  910. if ((ioc->buf_size < 1) &&
  911. (ioc->Request.Type.Direction != XFER_NONE)) {
  912. status = -EINVAL;
  913. goto cleanup1;
  914. }
  915. /* Check kmalloc limits using all SGs */
  916. if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
  917. status = -EINVAL;
  918. goto cleanup1;
  919. }
  920. if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
  921. status = -EINVAL;
  922. goto cleanup1;
  923. }
  924. buff =
  925. kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
  926. if (!buff) {
  927. status = -ENOMEM;
  928. goto cleanup1;
  929. }
  930. buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
  931. GFP_KERNEL);
  932. if (!buff_size) {
  933. status = -ENOMEM;
  934. goto cleanup1;
  935. }
  936. left = ioc->buf_size;
  937. data_ptr = ioc->buf;
  938. while (left) {
  939. sz = (left >
  940. ioc->malloc_size) ? ioc->
  941. malloc_size : left;
  942. buff_size[sg_used] = sz;
  943. buff[sg_used] = kmalloc(sz, GFP_KERNEL);
  944. if (buff[sg_used] == NULL) {
  945. status = -ENOMEM;
  946. goto cleanup1;
  947. }
  948. if (ioc->Request.Type.Direction == XFER_WRITE) {
  949. if (copy_from_user
  950. (buff[sg_used], data_ptr, sz)) {
  951. status = -ENOMEM;
  952. goto cleanup1;
  953. }
  954. } else {
  955. memset(buff[sg_used], 0, sz);
  956. }
  957. left -= sz;
  958. data_ptr += sz;
  959. sg_used++;
  960. }
  961. if ((c = cmd_alloc(host, 0)) == NULL) {
  962. status = -ENOMEM;
  963. goto cleanup1;
  964. }
  965. c->cmd_type = CMD_IOCTL_PEND;
  966. c->Header.ReplyQueue = 0;
  967. if (ioc->buf_size > 0) {
  968. c->Header.SGList = sg_used;
  969. c->Header.SGTotal = sg_used;
  970. } else {
  971. c->Header.SGList = 0;
  972. c->Header.SGTotal = 0;
  973. }
  974. c->Header.LUN = ioc->LUN_info;
  975. c->Header.Tag.lower = c->busaddr;
  976. c->Request = ioc->Request;
  977. if (ioc->buf_size > 0) {
  978. int i;
  979. for (i = 0; i < sg_used; i++) {
  980. temp64.val =
  981. pci_map_single(host->pdev, buff[i],
  982. buff_size[i],
  983. PCI_DMA_BIDIRECTIONAL);
  984. c->SG[i].Addr.lower =
  985. temp64.val32.lower;
  986. c->SG[i].Addr.upper =
  987. temp64.val32.upper;
  988. c->SG[i].Len = buff_size[i];
  989. c->SG[i].Ext = 0; /* we are not chaining */
  990. }
  991. }
  992. c->waiting = &wait;
  993. /* Put the request on the tail of the request queue */
  994. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  995. addQ(&host->reqQ, c);
  996. host->Qdepth++;
  997. start_io(host);
  998. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  999. wait_for_completion(&wait);
  1000. /* unlock the buffers from DMA */
  1001. for (i = 0; i < sg_used; i++) {
  1002. temp64.val32.lower = c->SG[i].Addr.lower;
  1003. temp64.val32.upper = c->SG[i].Addr.upper;
  1004. pci_unmap_single(host->pdev,
  1005. (dma_addr_t) temp64.val, buff_size[i],
  1006. PCI_DMA_BIDIRECTIONAL);
  1007. }
  1008. /* Copy the error information out */
  1009. ioc->error_info = *(c->err_info);
  1010. if (copy_to_user(argp, ioc, sizeof(*ioc))) {
  1011. cmd_free(host, c, 0);
  1012. status = -EFAULT;
  1013. goto cleanup1;
  1014. }
  1015. if (ioc->Request.Type.Direction == XFER_READ) {
  1016. /* Copy the data out of the buffer we created */
  1017. BYTE __user *ptr = ioc->buf;
  1018. for (i = 0; i < sg_used; i++) {
  1019. if (copy_to_user
  1020. (ptr, buff[i], buff_size[i])) {
  1021. cmd_free(host, c, 0);
  1022. status = -EFAULT;
  1023. goto cleanup1;
  1024. }
  1025. ptr += buff_size[i];
  1026. }
  1027. }
  1028. cmd_free(host, c, 0);
  1029. status = 0;
  1030. cleanup1:
  1031. if (buff) {
  1032. for (i = 0; i < sg_used; i++)
  1033. kfree(buff[i]);
  1034. kfree(buff);
  1035. }
  1036. kfree(buff_size);
  1037. kfree(ioc);
  1038. return status;
  1039. }
  1040. default:
  1041. return -ENOTTY;
  1042. }
  1043. }
  1044. static inline void complete_buffers(struct bio *bio, int status)
  1045. {
  1046. while (bio) {
  1047. struct bio *xbh = bio->bi_next;
  1048. int nr_sectors = bio_sectors(bio);
  1049. bio->bi_next = NULL;
  1050. bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
  1051. bio = xbh;
  1052. }
  1053. }
  1054. static void cciss_check_queues(ctlr_info_t *h)
  1055. {
  1056. int start_queue = h->next_to_run;
  1057. int i;
  1058. /* check to see if we have maxed out the number of commands that can
  1059. * be placed on the queue. If so then exit. We do this check here
  1060. * in case the interrupt we serviced was from an ioctl and did not
  1061. * free any new commands.
  1062. */
  1063. if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
  1064. return;
  1065. /* We have room on the queue for more commands. Now we need to queue
  1066. * them up. We will also keep track of the next queue to run so
  1067. * that every queue gets a chance to be started first.
  1068. */
  1069. for (i = 0; i < h->highest_lun + 1; i++) {
  1070. int curr_queue = (start_queue + i) % (h->highest_lun + 1);
  1071. /* make sure the disk has been added and the drive is real
  1072. * because this can be called from the middle of init_one.
  1073. */
  1074. if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
  1075. continue;
  1076. blk_start_queue(h->gendisk[curr_queue]->queue);
  1077. /* check to see if we have maxed out the number of commands
  1078. * that can be placed on the queue.
  1079. */
  1080. if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
  1081. if (curr_queue == start_queue) {
  1082. h->next_to_run =
  1083. (start_queue + 1) % (h->highest_lun + 1);
  1084. break;
  1085. } else {
  1086. h->next_to_run = curr_queue;
  1087. break;
  1088. }
  1089. } else {
  1090. curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
  1091. }
  1092. }
  1093. }
  1094. static void cciss_softirq_done(struct request *rq)
  1095. {
  1096. CommandList_struct *cmd = rq->completion_data;
  1097. ctlr_info_t *h = hba[cmd->ctlr];
  1098. unsigned long flags;
  1099. u64bit temp64;
  1100. int i, ddir;
  1101. if (cmd->Request.Type.Direction == XFER_READ)
  1102. ddir = PCI_DMA_FROMDEVICE;
  1103. else
  1104. ddir = PCI_DMA_TODEVICE;
  1105. /* command did not need to be retried */
  1106. /* unmap the DMA mapping for all the scatter gather elements */
  1107. for (i = 0; i < cmd->Header.SGList; i++) {
  1108. temp64.val32.lower = cmd->SG[i].Addr.lower;
  1109. temp64.val32.upper = cmd->SG[i].Addr.upper;
  1110. pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
  1111. }
  1112. complete_buffers(rq->bio, rq->errors);
  1113. if (blk_fs_request(rq)) {
  1114. const int rw = rq_data_dir(rq);
  1115. disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
  1116. }
  1117. #ifdef CCISS_DEBUG
  1118. printk("Done with %p\n", rq);
  1119. #endif /* CCISS_DEBUG */
  1120. add_disk_randomness(rq->rq_disk);
  1121. spin_lock_irqsave(&h->lock, flags);
  1122. end_that_request_last(rq, rq->errors);
  1123. cmd_free(h, cmd, 1);
  1124. cciss_check_queues(h);
  1125. spin_unlock_irqrestore(&h->lock, flags);
  1126. }
  1127. /* This function will check the usage_count of the drive to be updated/added.
  1128. * If the usage_count is zero then the drive information will be updated and
  1129. * the disk will be re-registered with the kernel. If not then it will be
  1130. * left alone for the next reboot. The exception to this is disk 0 which
  1131. * will always be left registered with the kernel since it is also the
  1132. * controller node. Any changes to disk 0 will show up on the next
  1133. * reboot.
  1134. */
  1135. static void cciss_update_drive_info(int ctlr, int drv_index)
  1136. {
  1137. ctlr_info_t *h = hba[ctlr];
  1138. struct gendisk *disk;
  1139. InquiryData_struct *inq_buff = NULL;
  1140. unsigned int block_size;
  1141. sector_t total_size;
  1142. unsigned long flags = 0;
  1143. int ret = 0;
  1144. /* if the disk already exists then deregister it before proceeding */
  1145. if (h->drv[drv_index].raid_level != -1) {
  1146. spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
  1147. h->drv[drv_index].busy_configuring = 1;
  1148. spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
  1149. ret = deregister_disk(h->gendisk[drv_index],
  1150. &h->drv[drv_index], 0);
  1151. h->drv[drv_index].busy_configuring = 0;
  1152. }
  1153. /* If the disk is in use return */
  1154. if (ret)
  1155. return;
  1156. /* Get information about the disk and modify the driver structure */
  1157. inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
  1158. if (inq_buff == NULL)
  1159. goto mem_msg;
  1160. cciss_read_capacity(ctlr, drv_index, 1,
  1161. &total_size, &block_size);
  1162. /* total size = last LBA + 1 */
  1163. /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
  1164. /* so we assume this volume this must be >2TB in size */
  1165. if (total_size == (__u32) 0) {
  1166. cciss_read_capacity_16(ctlr, drv_index, 1,
  1167. &total_size, &block_size);
  1168. h->cciss_read = CCISS_READ_16;
  1169. h->cciss_write = CCISS_WRITE_16;
  1170. } else {
  1171. h->cciss_read = CCISS_READ_10;
  1172. h->cciss_write = CCISS_WRITE_10;
  1173. }
  1174. cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
  1175. inq_buff, &h->drv[drv_index]);
  1176. ++h->num_luns;
  1177. disk = h->gendisk[drv_index];
  1178. set_capacity(disk, h->drv[drv_index].nr_blocks);
  1179. /* if it's the controller it's already added */
  1180. if (drv_index) {
  1181. disk->queue = blk_init_queue(do_cciss_request, &h->lock);
  1182. sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
  1183. disk->major = h->major;
  1184. disk->first_minor = drv_index << NWD_SHIFT;
  1185. disk->fops = &cciss_fops;
  1186. disk->private_data = &h->drv[drv_index];
  1187. /* Set up queue information */
  1188. disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
  1189. blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
  1190. /* This is a hardware imposed limit. */
  1191. blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
  1192. /* This is a limit in the driver and could be eliminated. */
  1193. blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
  1194. blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
  1195. blk_queue_softirq_done(disk->queue, cciss_softirq_done);
  1196. disk->queue->queuedata = hba[ctlr];
  1197. blk_queue_hardsect_size(disk->queue,
  1198. hba[ctlr]->drv[drv_index].block_size);
  1199. h->drv[drv_index].queue = disk->queue;
  1200. add_disk(disk);
  1201. }
  1202. freeret:
  1203. kfree(inq_buff);
  1204. return;
  1205. mem_msg:
  1206. printk(KERN_ERR "cciss: out of memory\n");
  1207. goto freeret;
  1208. }
  1209. /* This function will find the first index of the controllers drive array
  1210. * that has a -1 for the raid_level and will return that index. This is
  1211. * where new drives will be added. If the index to be returned is greater
  1212. * than the highest_lun index for the controller then highest_lun is set
  1213. * to this new index. If there are no available indexes then -1 is returned.
  1214. */
  1215. static int cciss_find_free_drive_index(int ctlr)
  1216. {
  1217. int i;
  1218. for (i = 0; i < CISS_MAX_LUN; i++) {
  1219. if (hba[ctlr]->drv[i].raid_level == -1) {
  1220. if (i > hba[ctlr]->highest_lun)
  1221. hba[ctlr]->highest_lun = i;
  1222. return i;
  1223. }
  1224. }
  1225. return -1;
  1226. }
  1227. /* This function will add and remove logical drives from the Logical
  1228. * drive array of the controller and maintain persistency of ordering
  1229. * so that mount points are preserved until the next reboot. This allows
  1230. * for the removal of logical drives in the middle of the drive array
  1231. * without a re-ordering of those drives.
  1232. * INPUT
  1233. * h = The controller to perform the operations on
  1234. * del_disk = The disk to remove if specified. If the value given
  1235. * is NULL then no disk is removed.
  1236. */
  1237. static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
  1238. {
  1239. int ctlr = h->ctlr;
  1240. int num_luns;
  1241. ReportLunData_struct *ld_buff = NULL;
  1242. drive_info_struct *drv = NULL;
  1243. int return_code;
  1244. int listlength = 0;
  1245. int i;
  1246. int drv_found;
  1247. int drv_index = 0;
  1248. __u32 lunid = 0;
  1249. unsigned long flags;
  1250. /* Set busy_configuring flag for this operation */
  1251. spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
  1252. if (h->busy_configuring) {
  1253. spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
  1254. return -EBUSY;
  1255. }
  1256. h->busy_configuring = 1;
  1257. /* if del_disk is NULL then we are being called to add a new disk
  1258. * and update the logical drive table. If it is not NULL then
  1259. * we will check if the disk is in use or not.
  1260. */
  1261. if (del_disk != NULL) {
  1262. drv = get_drv(del_disk);
  1263. drv->busy_configuring = 1;
  1264. spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
  1265. return_code = deregister_disk(del_disk, drv, 1);
  1266. drv->busy_configuring = 0;
  1267. h->busy_configuring = 0;
  1268. return return_code;
  1269. } else {
  1270. spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
  1271. if (!capable(CAP_SYS_RAWIO))
  1272. return -EPERM;
  1273. ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
  1274. if (ld_buff == NULL)
  1275. goto mem_msg;
  1276. return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
  1277. sizeof(ReportLunData_struct), 0,
  1278. 0, 0, TYPE_CMD);
  1279. if (return_code == IO_OK) {
  1280. listlength =
  1281. be32_to_cpu(*(__u32 *) ld_buff->LUNListLength);
  1282. } else { /* reading number of logical volumes failed */
  1283. printk(KERN_WARNING "cciss: report logical volume"
  1284. " command failed\n");
  1285. listlength = 0;
  1286. goto freeret;
  1287. }
  1288. num_luns = listlength / 8; /* 8 bytes per entry */
  1289. if (num_luns > CISS_MAX_LUN) {
  1290. num_luns = CISS_MAX_LUN;
  1291. printk(KERN_WARNING "cciss: more luns configured"
  1292. " on controller than can be handled by"
  1293. " this driver.\n");
  1294. }
  1295. /* Compare controller drive array to drivers drive array.
  1296. * Check for updates in the drive information and any new drives
  1297. * on the controller.
  1298. */
  1299. for (i = 0; i < num_luns; i++) {
  1300. int j;
  1301. drv_found = 0;
  1302. lunid = (0xff &
  1303. (unsigned int)(ld_buff->LUN[i][3])) << 24;
  1304. lunid |= (0xff &
  1305. (unsigned int)(ld_buff->LUN[i][2])) << 16;
  1306. lunid |= (0xff &
  1307. (unsigned int)(ld_buff->LUN[i][1])) << 8;
  1308. lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
  1309. /* Find if the LUN is already in the drive array
  1310. * of the controller. If so then update its info
  1311. * if not is use. If it does not exist then find
  1312. * the first free index and add it.
  1313. */
  1314. for (j = 0; j <= h->highest_lun; j++) {
  1315. if (h->drv[j].LunID == lunid) {
  1316. drv_index = j;
  1317. drv_found = 1;
  1318. }
  1319. }
  1320. /* check if the drive was found already in the array */
  1321. if (!drv_found) {
  1322. drv_index = cciss_find_free_drive_index(ctlr);
  1323. if (drv_index == -1)
  1324. goto freeret;
  1325. /*Check if the gendisk needs to be allocated */
  1326. if (!h->gendisk[drv_index]){
  1327. h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
  1328. if (!h->gendisk[drv_index]){
  1329. printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
  1330. goto mem_msg;
  1331. }
  1332. }
  1333. }
  1334. h->drv[drv_index].LunID = lunid;
  1335. cciss_update_drive_info(ctlr, drv_index);
  1336. } /* end for */
  1337. } /* end else */
  1338. freeret:
  1339. kfree(ld_buff);
  1340. h->busy_configuring = 0;
  1341. /* We return -1 here to tell the ACU that we have registered/updated
  1342. * all of the drives that we can and to keep it from calling us
  1343. * additional times.
  1344. */
  1345. return -1;
  1346. mem_msg:
  1347. printk(KERN_ERR "cciss: out of memory\n");
  1348. goto freeret;
  1349. }
  1350. /* This function will deregister the disk and it's queue from the
  1351. * kernel. It must be called with the controller lock held and the
  1352. * drv structures busy_configuring flag set. It's parameters are:
  1353. *
  1354. * disk = This is the disk to be deregistered
  1355. * drv = This is the drive_info_struct associated with the disk to be
  1356. * deregistered. It contains information about the disk used
  1357. * by the driver.
  1358. * clear_all = This flag determines whether or not the disk information
  1359. * is going to be completely cleared out and the highest_lun
  1360. * reset. Sometimes we want to clear out information about
  1361. * the disk in preparation for re-adding it. In this case
  1362. * the highest_lun should be left unchanged and the LunID
  1363. * should not be cleared.
  1364. */
  1365. static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
  1366. int clear_all)
  1367. {
  1368. int i;
  1369. ctlr_info_t *h = get_host(disk);
  1370. if (!capable(CAP_SYS_RAWIO))
  1371. return -EPERM;
  1372. /* make sure logical volume is NOT is use */
  1373. if (clear_all || (h->gendisk[0] == disk)) {
  1374. if (drv->usage_count > 1)
  1375. return -EBUSY;
  1376. } else if (drv->usage_count > 0)
  1377. return -EBUSY;
  1378. /* invalidate the devices and deregister the disk. If it is disk
  1379. * zero do not deregister it but just zero out it's values. This
  1380. * allows us to delete disk zero but keep the controller registered.
  1381. */
  1382. if (h->gendisk[0] != disk) {
  1383. if (disk) {
  1384. request_queue_t *q = disk->queue;
  1385. if (disk->flags & GENHD_FL_UP)
  1386. del_gendisk(disk);
  1387. if (q) {
  1388. blk_cleanup_queue(q);
  1389. /* Set drv->queue to NULL so that we do not try
  1390. * to call blk_start_queue on this queue in the
  1391. * interrupt handler
  1392. */
  1393. drv->queue = NULL;
  1394. }
  1395. /* If clear_all is set then we are deleting the logical
  1396. * drive, not just refreshing its info. For drives
  1397. * other than disk 0 we will call put_disk. We do not
  1398. * do this for disk 0 as we need it to be able to
  1399. * configure the controller.
  1400. */
  1401. if (clear_all){
  1402. /* This isn't pretty, but we need to find the
  1403. * disk in our array and NULL our the pointer.
  1404. * This is so that we will call alloc_disk if
  1405. * this index is used again later.
  1406. */
  1407. for (i=0; i < CISS_MAX_LUN; i++){
  1408. if(h->gendisk[i] == disk){
  1409. h->gendisk[i] = NULL;
  1410. break;
  1411. }
  1412. }
  1413. put_disk(disk);
  1414. }
  1415. }
  1416. } else {
  1417. set_capacity(disk, 0);
  1418. }
  1419. --h->num_luns;
  1420. /* zero out the disk size info */
  1421. drv->nr_blocks = 0;
  1422. drv->block_size = 0;
  1423. drv->heads = 0;
  1424. drv->sectors = 0;
  1425. drv->cylinders = 0;
  1426. drv->raid_level = -1; /* This can be used as a flag variable to
  1427. * indicate that this element of the drive
  1428. * array is free.
  1429. */
  1430. if (clear_all) {
  1431. /* check to see if it was the last disk */
  1432. if (drv == h->drv + h->highest_lun) {
  1433. /* if so, find the new hightest lun */
  1434. int i, newhighest = -1;
  1435. for (i = 0; i < h->highest_lun; i++) {
  1436. /* if the disk has size > 0, it is available */
  1437. if (h->drv[i].heads)
  1438. newhighest = i;
  1439. }
  1440. h->highest_lun = newhighest;
  1441. }
  1442. drv->LunID = 0;
  1443. }
  1444. return 0;
  1445. }
  1446. static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
  1447. 1: address logical volume log_unit,
  1448. 2: periph device address is scsi3addr */
  1449. unsigned int log_unit, __u8 page_code,
  1450. unsigned char *scsi3addr, int cmd_type)
  1451. {
  1452. ctlr_info_t *h = hba[ctlr];
  1453. u64bit buff_dma_handle;
  1454. int status = IO_OK;
  1455. c->cmd_type = CMD_IOCTL_PEND;
  1456. c->Header.ReplyQueue = 0;
  1457. if (buff != NULL) {
  1458. c->Header.SGList = 1;
  1459. c->Header.SGTotal = 1;
  1460. } else {
  1461. c->Header.SGList = 0;
  1462. c->Header.SGTotal = 0;
  1463. }
  1464. c->Header.Tag.lower = c->busaddr;
  1465. c->Request.Type.Type = cmd_type;
  1466. if (cmd_type == TYPE_CMD) {
  1467. switch (cmd) {
  1468. case CISS_INQUIRY:
  1469. /* If the logical unit number is 0 then, this is going
  1470. to controller so It's a physical command
  1471. mode = 0 target = 0. So we have nothing to write.
  1472. otherwise, if use_unit_num == 1,
  1473. mode = 1(volume set addressing) target = LUNID
  1474. otherwise, if use_unit_num == 2,
  1475. mode = 0(periph dev addr) target = scsi3addr */
  1476. if (use_unit_num == 1) {
  1477. c->Header.LUN.LogDev.VolId =
  1478. h->drv[log_unit].LunID;
  1479. c->Header.LUN.LogDev.Mode = 1;
  1480. } else if (use_unit_num == 2) {
  1481. memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
  1482. 8);
  1483. c->Header.LUN.LogDev.Mode = 0;
  1484. }
  1485. /* are we trying to read a vital product page */
  1486. if (page_code != 0) {
  1487. c->Request.CDB[1] = 0x01;
  1488. c->Request.CDB[2] = page_code;
  1489. }
  1490. c->Request.CDBLen = 6;
  1491. c->Request.Type.Attribute = ATTR_SIMPLE;
  1492. c->Request.Type.Direction = XFER_READ;
  1493. c->Request.Timeout = 0;
  1494. c->Request.CDB[0] = CISS_INQUIRY;
  1495. c->Request.CDB[4] = size & 0xFF;
  1496. break;
  1497. case CISS_REPORT_LOG:
  1498. case CISS_REPORT_PHYS:
  1499. /* Talking to controller so It's a physical command
  1500. mode = 00 target = 0. Nothing to write.
  1501. */
  1502. c->Request.CDBLen = 12;
  1503. c->Request.Type.Attribute = ATTR_SIMPLE;
  1504. c->Request.Type.Direction = XFER_READ;
  1505. c->Request.Timeout = 0;
  1506. c->Request.CDB[0] = cmd;
  1507. c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
  1508. c->Request.CDB[7] = (size >> 16) & 0xFF;
  1509. c->Request.CDB[8] = (size >> 8) & 0xFF;
  1510. c->Request.CDB[9] = size & 0xFF;
  1511. break;
  1512. case CCISS_READ_CAPACITY:
  1513. c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
  1514. c->Header.LUN.LogDev.Mode = 1;
  1515. c->Request.CDBLen = 10;
  1516. c->Request.Type.Attribute = ATTR_SIMPLE;
  1517. c->Request.Type.Direction = XFER_READ;
  1518. c->Request.Timeout = 0;
  1519. c->Request.CDB[0] = cmd;
  1520. break;
  1521. case CCISS_READ_CAPACITY_16:
  1522. c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
  1523. c->Header.LUN.LogDev.Mode = 1;
  1524. c->Request.CDBLen = 16;
  1525. c->Request.Type.Attribute = ATTR_SIMPLE;
  1526. c->Request.Type.Direction = XFER_READ;
  1527. c->Request.Timeout = 0;
  1528. c->Request.CDB[0] = cmd;
  1529. c->Request.CDB[1] = 0x10;
  1530. c->Request.CDB[10] = (size >> 24) & 0xFF;
  1531. c->Request.CDB[11] = (size >> 16) & 0xFF;
  1532. c->Request.CDB[12] = (size >> 8) & 0xFF;
  1533. c->Request.CDB[13] = size & 0xFF;
  1534. c->Request.Timeout = 0;
  1535. c->Request.CDB[0] = cmd;
  1536. break;
  1537. case CCISS_CACHE_FLUSH:
  1538. c->Request.CDBLen = 12;
  1539. c->Request.Type.Attribute = ATTR_SIMPLE;
  1540. c->Request.Type.Direction = XFER_WRITE;
  1541. c->Request.Timeout = 0;
  1542. c->Request.CDB[0] = BMIC_WRITE;
  1543. c->Request.CDB[6] = BMIC_CACHE_FLUSH;
  1544. break;
  1545. default:
  1546. printk(KERN_WARNING
  1547. "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
  1548. return IO_ERROR;
  1549. }
  1550. } else if (cmd_type == TYPE_MSG) {
  1551. switch (cmd) {
  1552. case 0: /* ABORT message */
  1553. c->Request.CDBLen = 12;
  1554. c->Request.Type.Attribute = ATTR_SIMPLE;
  1555. c->Request.Type.Direction = XFER_WRITE;
  1556. c->Request.Timeout = 0;
  1557. c->Request.CDB[0] = cmd; /* abort */
  1558. c->Request.CDB[1] = 0; /* abort a command */
  1559. /* buff contains the tag of the command to abort */
  1560. memcpy(&c->Request.CDB[4], buff, 8);
  1561. break;
  1562. case 1: /* RESET message */
  1563. c->Request.CDBLen = 12;
  1564. c->Request.Type.Attribute = ATTR_SIMPLE;
  1565. c->Request.Type.Direction = XFER_WRITE;
  1566. c->Request.Timeout = 0;
  1567. memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
  1568. c->Request.CDB[0] = cmd; /* reset */
  1569. c->Request.CDB[1] = 0x04; /* reset a LUN */
  1570. break;
  1571. case 3: /* No-Op message */
  1572. c->Request.CDBLen = 1;
  1573. c->Request.Type.Attribute = ATTR_SIMPLE;
  1574. c->Request.Type.Direction = XFER_WRITE;
  1575. c->Request.Timeout = 0;
  1576. c->Request.CDB[0] = cmd;
  1577. break;
  1578. default:
  1579. printk(KERN_WARNING
  1580. "cciss%d: unknown message type %d\n", ctlr, cmd);
  1581. return IO_ERROR;
  1582. }
  1583. } else {
  1584. printk(KERN_WARNING
  1585. "cciss%d: unknown command type %d\n", ctlr, cmd_type);
  1586. return IO_ERROR;
  1587. }
  1588. /* Fill in the scatter gather information */
  1589. if (size > 0) {
  1590. buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
  1591. buff, size,
  1592. PCI_DMA_BIDIRECTIONAL);
  1593. c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
  1594. c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
  1595. c->SG[0].Len = size;
  1596. c->SG[0].Ext = 0; /* we are not chaining */
  1597. }
  1598. return status;
  1599. }
  1600. static int sendcmd_withirq(__u8 cmd,
  1601. int ctlr,
  1602. void *buff,
  1603. size_t size,
  1604. unsigned int use_unit_num,
  1605. unsigned int log_unit, __u8 page_code, int cmd_type)
  1606. {
  1607. ctlr_info_t *h = hba[ctlr];
  1608. CommandList_struct *c;
  1609. u64bit buff_dma_handle;
  1610. unsigned long flags;
  1611. int return_status;
  1612. DECLARE_COMPLETION_ONSTACK(wait);
  1613. if ((c = cmd_alloc(h, 0)) == NULL)
  1614. return -ENOMEM;
  1615. return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
  1616. log_unit, page_code, NULL, cmd_type);
  1617. if (return_status != IO_OK) {
  1618. cmd_free(h, c, 0);
  1619. return return_status;
  1620. }
  1621. resend_cmd2:
  1622. c->waiting = &wait;
  1623. /* Put the request on the tail of the queue and send it */
  1624. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  1625. addQ(&h->reqQ, c);
  1626. h->Qdepth++;
  1627. start_io(h);
  1628. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  1629. wait_for_completion(&wait);
  1630. if (c->err_info->CommandStatus != 0) { /* an error has occurred */
  1631. switch (c->err_info->CommandStatus) {
  1632. case CMD_TARGET_STATUS:
  1633. printk(KERN_WARNING "cciss: cmd %p has "
  1634. " completed with errors\n", c);
  1635. if (c->err_info->ScsiStatus) {
  1636. printk(KERN_WARNING "cciss: cmd %p "
  1637. "has SCSI Status = %x\n",
  1638. c, c->err_info->ScsiStatus);
  1639. }
  1640. break;
  1641. case CMD_DATA_UNDERRUN:
  1642. case CMD_DATA_OVERRUN:
  1643. /* expected for inquire and report lun commands */
  1644. break;
  1645. case CMD_INVALID:
  1646. printk(KERN_WARNING "cciss: Cmd %p is "
  1647. "reported invalid\n", c);
  1648. return_status = IO_ERROR;
  1649. break;
  1650. case CMD_PROTOCOL_ERR:
  1651. printk(KERN_WARNING "cciss: cmd %p has "
  1652. "protocol error \n", c);
  1653. return_status = IO_ERROR;
  1654. break;
  1655. case CMD_HARDWARE_ERR:
  1656. printk(KERN_WARNING "cciss: cmd %p had "
  1657. " hardware error\n", c);
  1658. return_status = IO_ERROR;
  1659. break;
  1660. case CMD_CONNECTION_LOST:
  1661. printk(KERN_WARNING "cciss: cmd %p had "
  1662. "connection lost\n", c);
  1663. return_status = IO_ERROR;
  1664. break;
  1665. case CMD_ABORTED:
  1666. printk(KERN_WARNING "cciss: cmd %p was "
  1667. "aborted\n", c);
  1668. return_status = IO_ERROR;
  1669. break;
  1670. case CMD_ABORT_FAILED:
  1671. printk(KERN_WARNING "cciss: cmd %p reports "
  1672. "abort failed\n", c);
  1673. return_status = IO_ERROR;
  1674. break;
  1675. case CMD_UNSOLICITED_ABORT:
  1676. printk(KERN_WARNING
  1677. "cciss%d: unsolicited abort %p\n", ctlr, c);
  1678. if (c->retry_count < MAX_CMD_RETRIES) {
  1679. printk(KERN_WARNING
  1680. "cciss%d: retrying %p\n", ctlr, c);
  1681. c->retry_count++;
  1682. /* erase the old error information */
  1683. memset(c->err_info, 0,
  1684. sizeof(ErrorInfo_struct));
  1685. return_status = IO_OK;
  1686. INIT_COMPLETION(wait);
  1687. goto resend_cmd2;
  1688. }
  1689. return_status = IO_ERROR;
  1690. break;
  1691. default:
  1692. printk(KERN_WARNING "cciss: cmd %p returned "
  1693. "unknown status %x\n", c,
  1694. c->err_info->CommandStatus);
  1695. return_status = IO_ERROR;
  1696. }
  1697. }
  1698. /* unlock the buffers from DMA */
  1699. buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
  1700. buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
  1701. pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
  1702. c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
  1703. cmd_free(h, c, 0);
  1704. return return_status;
  1705. }
  1706. static void cciss_geometry_inquiry(int ctlr, int logvol,
  1707. int withirq, sector_t total_size,
  1708. unsigned int block_size,
  1709. InquiryData_struct *inq_buff,
  1710. drive_info_struct *drv)
  1711. {
  1712. int return_code;
  1713. unsigned long t;
  1714. memset(inq_buff, 0, sizeof(InquiryData_struct));
  1715. if (withirq)
  1716. return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
  1717. inq_buff, sizeof(*inq_buff), 1,
  1718. logvol, 0xC1, TYPE_CMD);
  1719. else
  1720. return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
  1721. sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
  1722. TYPE_CMD);
  1723. if (return_code == IO_OK) {
  1724. if (inq_buff->data_byte[8] == 0xFF) {
  1725. printk(KERN_WARNING
  1726. "cciss: reading geometry failed, volume "
  1727. "does not support reading geometry\n");
  1728. drv->heads = 255;
  1729. drv->sectors = 32; // Sectors per track
  1730. drv->raid_level = RAID_UNKNOWN;
  1731. } else {
  1732. drv->heads = inq_buff->data_byte[6];
  1733. drv->sectors = inq_buff->data_byte[7];
  1734. drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
  1735. drv->cylinders += inq_buff->data_byte[5];
  1736. drv->raid_level = inq_buff->data_byte[8];
  1737. }
  1738. drv->block_size = block_size;
  1739. drv->nr_blocks = total_size;
  1740. t = drv->heads * drv->sectors;
  1741. if (t > 1) {
  1742. unsigned rem = sector_div(total_size, t);
  1743. if (rem)
  1744. total_size++;
  1745. drv->cylinders = total_size;
  1746. }
  1747. } else { /* Get geometry failed */
  1748. printk(KERN_WARNING "cciss: reading geometry failed\n");
  1749. }
  1750. printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
  1751. drv->heads, drv->sectors, drv->cylinders);
  1752. }
  1753. static void
  1754. cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
  1755. unsigned int *block_size)
  1756. {
  1757. ReadCapdata_struct *buf;
  1758. int return_code;
  1759. buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
  1760. if (buf == NULL) {
  1761. printk(KERN_WARNING "cciss: out of memory\n");
  1762. return;
  1763. }
  1764. memset(buf, 0, sizeof(ReadCapdata_struct));
  1765. if (withirq)
  1766. return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
  1767. ctlr, buf, sizeof(ReadCapdata_struct),
  1768. 1, logvol, 0, TYPE_CMD);
  1769. else
  1770. return_code = sendcmd(CCISS_READ_CAPACITY,
  1771. ctlr, buf, sizeof(ReadCapdata_struct),
  1772. 1, logvol, 0, NULL, TYPE_CMD);
  1773. if (return_code == IO_OK) {
  1774. *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
  1775. *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
  1776. } else { /* read capacity command failed */
  1777. printk(KERN_WARNING "cciss: read capacity failed\n");
  1778. *total_size = 0;
  1779. *block_size = BLOCK_SIZE;
  1780. }
  1781. if (*total_size != (__u32) 0)
  1782. printk(KERN_INFO " blocks= %llu block_size= %d\n",
  1783. (unsigned long long)*total_size, *block_size);
  1784. kfree(buf);
  1785. return;
  1786. }
  1787. static void
  1788. cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
  1789. {
  1790. ReadCapdata_struct_16 *buf;
  1791. int return_code;
  1792. buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
  1793. if (buf == NULL) {
  1794. printk(KERN_WARNING "cciss: out of memory\n");
  1795. return;
  1796. }
  1797. memset(buf, 0, sizeof(ReadCapdata_struct_16));
  1798. if (withirq) {
  1799. return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
  1800. ctlr, buf, sizeof(ReadCapdata_struct_16),
  1801. 1, logvol, 0, TYPE_CMD);
  1802. }
  1803. else {
  1804. return_code = sendcmd(CCISS_READ_CAPACITY_16,
  1805. ctlr, buf, sizeof(ReadCapdata_struct_16),
  1806. 1, logvol, 0, NULL, TYPE_CMD);
  1807. }
  1808. if (return_code == IO_OK) {
  1809. *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
  1810. *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
  1811. } else { /* read capacity command failed */
  1812. printk(KERN_WARNING "cciss: read capacity failed\n");
  1813. *total_size = 0;
  1814. *block_size = BLOCK_SIZE;
  1815. }
  1816. printk(KERN_INFO " blocks= %llu block_size= %d\n",
  1817. (unsigned long long)*total_size, *block_size);
  1818. kfree(buf);
  1819. return;
  1820. }
  1821. static int cciss_revalidate(struct gendisk *disk)
  1822. {
  1823. ctlr_info_t *h = get_host(disk);
  1824. drive_info_struct *drv = get_drv(disk);
  1825. int logvol;
  1826. int FOUND = 0;
  1827. unsigned int block_size;
  1828. sector_t total_size;
  1829. InquiryData_struct *inq_buff = NULL;
  1830. for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
  1831. if (h->drv[logvol].LunID == drv->LunID) {
  1832. FOUND = 1;
  1833. break;
  1834. }
  1835. }
  1836. if (!FOUND)
  1837. return 1;
  1838. inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
  1839. if (inq_buff == NULL) {
  1840. printk(KERN_WARNING "cciss: out of memory\n");
  1841. return 1;
  1842. }
  1843. if (h->cciss_read == CCISS_READ_10) {
  1844. cciss_read_capacity(h->ctlr, logvol, 1,
  1845. &total_size, &block_size);
  1846. } else {
  1847. cciss_read_capacity_16(h->ctlr, logvol, 1,
  1848. &total_size, &block_size);
  1849. }
  1850. cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
  1851. inq_buff, drv);
  1852. blk_queue_hardsect_size(drv->queue, drv->block_size);
  1853. set_capacity(disk, drv->nr_blocks);
  1854. kfree(inq_buff);
  1855. return 0;
  1856. }
  1857. /*
  1858. * Wait polling for a command to complete.
  1859. * The memory mapped FIFO is polled for the completion.
  1860. * Used only at init time, interrupts from the HBA are disabled.
  1861. */
  1862. static unsigned long pollcomplete(int ctlr)
  1863. {
  1864. unsigned long done;
  1865. int i;
  1866. /* Wait (up to 20 seconds) for a command to complete */
  1867. for (i = 20 * HZ; i > 0; i--) {
  1868. done = hba[ctlr]->access.command_completed(hba[ctlr]);
  1869. if (done == FIFO_EMPTY)
  1870. schedule_timeout_uninterruptible(1);
  1871. else
  1872. return done;
  1873. }
  1874. /* Invalid address to tell caller we ran out of time */
  1875. return 1;
  1876. }
  1877. static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
  1878. {
  1879. /* We get in here if sendcmd() is polling for completions
  1880. and gets some command back that it wasn't expecting --
  1881. something other than that which it just sent down.
  1882. Ordinarily, that shouldn't happen, but it can happen when
  1883. the scsi tape stuff gets into error handling mode, and
  1884. starts using sendcmd() to try to abort commands and
  1885. reset tape drives. In that case, sendcmd may pick up
  1886. completions of commands that were sent to logical drives
  1887. through the block i/o system, or cciss ioctls completing, etc.
  1888. In that case, we need to save those completions for later
  1889. processing by the interrupt handler.
  1890. */
  1891. #ifdef CONFIG_CISS_SCSI_TAPE
  1892. struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
  1893. /* If it's not the scsi tape stuff doing error handling, (abort */
  1894. /* or reset) then we don't expect anything weird. */
  1895. if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
  1896. #endif
  1897. printk(KERN_WARNING "cciss cciss%d: SendCmd "
  1898. "Invalid command list address returned! (%lx)\n",
  1899. ctlr, complete);
  1900. /* not much we can do. */
  1901. #ifdef CONFIG_CISS_SCSI_TAPE
  1902. return 1;
  1903. }
  1904. /* We've sent down an abort or reset, but something else
  1905. has completed */
  1906. if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
  1907. /* Uh oh. No room to save it for later... */
  1908. printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
  1909. "reject list overflow, command lost!\n", ctlr);
  1910. return 1;
  1911. }
  1912. /* Save it for later */
  1913. srl->complete[srl->ncompletions] = complete;
  1914. srl->ncompletions++;
  1915. #endif
  1916. return 0;
  1917. }
  1918. /*
  1919. * Send a command to the controller, and wait for it to complete.
  1920. * Only used at init time.
  1921. */
  1922. static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
  1923. 1: address logical volume log_unit,
  1924. 2: periph device address is scsi3addr */
  1925. unsigned int log_unit,
  1926. __u8 page_code, unsigned char *scsi3addr, int cmd_type)
  1927. {
  1928. CommandList_struct *c;
  1929. int i;
  1930. unsigned long complete;
  1931. ctlr_info_t *info_p = hba[ctlr];
  1932. u64bit buff_dma_handle;
  1933. int status, done = 0;
  1934. if ((c = cmd_alloc(info_p, 1)) == NULL) {
  1935. printk(KERN_WARNING "cciss: unable to get memory");
  1936. return IO_ERROR;
  1937. }
  1938. status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
  1939. log_unit, page_code, scsi3addr, cmd_type);
  1940. if (status != IO_OK) {
  1941. cmd_free(info_p, c, 1);
  1942. return status;
  1943. }
  1944. resend_cmd1:
  1945. /*
  1946. * Disable interrupt
  1947. */
  1948. #ifdef CCISS_DEBUG
  1949. printk(KERN_DEBUG "cciss: turning intr off\n");
  1950. #endif /* CCISS_DEBUG */
  1951. info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
  1952. /* Make sure there is room in the command FIFO */
  1953. /* Actually it should be completely empty at this time */
  1954. /* unless we are in here doing error handling for the scsi */
  1955. /* tape side of the driver. */
  1956. for (i = 200000; i > 0; i--) {
  1957. /* if fifo isn't full go */
  1958. if (!(info_p->access.fifo_full(info_p))) {
  1959. break;
  1960. }
  1961. udelay(10);
  1962. printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
  1963. " waiting!\n", ctlr);
  1964. }
  1965. /*
  1966. * Send the cmd
  1967. */
  1968. info_p->access.submit_command(info_p, c);
  1969. done = 0;
  1970. do {
  1971. complete = pollcomplete(ctlr);
  1972. #ifdef CCISS_DEBUG
  1973. printk(KERN_DEBUG "cciss: command completed\n");
  1974. #endif /* CCISS_DEBUG */
  1975. if (complete == 1) {
  1976. printk(KERN_WARNING
  1977. "cciss cciss%d: SendCmd Timeout out, "
  1978. "No command list address returned!\n", ctlr);
  1979. status = IO_ERROR;
  1980. done = 1;
  1981. break;
  1982. }
  1983. /* This will need to change for direct lookup completions */
  1984. if ((complete & CISS_ERROR_BIT)
  1985. && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
  1986. /* if data overrun or underun on Report command
  1987. ignore it
  1988. */
  1989. if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
  1990. (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
  1991. (c->Request.CDB[0] == CISS_INQUIRY)) &&
  1992. ((c->err_info->CommandStatus ==
  1993. CMD_DATA_OVERRUN) ||
  1994. (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
  1995. )) {
  1996. complete = c->busaddr;
  1997. } else {
  1998. if (c->err_info->CommandStatus ==
  1999. CMD_UNSOLICITED_ABORT) {
  2000. printk(KERN_WARNING "cciss%d: "
  2001. "unsolicited abort %p\n",
  2002. ctlr, c);
  2003. if (c->retry_count < MAX_CMD_RETRIES) {
  2004. printk(KERN_WARNING
  2005. "cciss%d: retrying %p\n",
  2006. ctlr, c);
  2007. c->retry_count++;
  2008. /* erase the old error */
  2009. /* information */
  2010. memset(c->err_info, 0,
  2011. sizeof
  2012. (ErrorInfo_struct));
  2013. goto resend_cmd1;
  2014. } else {
  2015. printk(KERN_WARNING
  2016. "cciss%d: retried %p too "
  2017. "many times\n", ctlr, c);
  2018. status = IO_ERROR;
  2019. goto cleanup1;
  2020. }
  2021. } else if (c->err_info->CommandStatus ==
  2022. CMD_UNABORTABLE) {
  2023. printk(KERN_WARNING
  2024. "cciss%d: command could not be aborted.\n",
  2025. ctlr);
  2026. status = IO_ERROR;
  2027. goto cleanup1;
  2028. }
  2029. printk(KERN_WARNING "ciss ciss%d: sendcmd"
  2030. " Error %x \n", ctlr,
  2031. c->err_info->CommandStatus);
  2032. printk(KERN_WARNING "ciss ciss%d: sendcmd"
  2033. " offensive info\n"
  2034. " size %x\n num %x value %x\n",
  2035. ctlr,
  2036. c->err_info->MoreErrInfo.Invalid_Cmd.
  2037. offense_size,
  2038. c->err_info->MoreErrInfo.Invalid_Cmd.
  2039. offense_num,
  2040. c->err_info->MoreErrInfo.Invalid_Cmd.
  2041. offense_value);
  2042. status = IO_ERROR;
  2043. goto cleanup1;
  2044. }
  2045. }
  2046. /* This will need changing for direct lookup completions */
  2047. if (complete != c->busaddr) {
  2048. if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
  2049. BUG(); /* we are pretty much hosed if we get here. */
  2050. }
  2051. continue;
  2052. } else
  2053. done = 1;
  2054. } while (!done);
  2055. cleanup1:
  2056. /* unlock the data buffer from DMA */
  2057. buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
  2058. buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
  2059. pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
  2060. c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
  2061. #ifdef CONFIG_CISS_SCSI_TAPE
  2062. /* if we saved some commands for later, process them now. */
  2063. if (info_p->scsi_rejects.ncompletions > 0)
  2064. do_cciss_intr(0, info_p);
  2065. #endif
  2066. cmd_free(info_p, c, 1);
  2067. return status;
  2068. }
  2069. /*
  2070. * Map (physical) PCI mem into (virtual) kernel space
  2071. */
  2072. static void __iomem *remap_pci_mem(ulong base, ulong size)
  2073. {
  2074. ulong page_base = ((ulong) base) & PAGE_MASK;
  2075. ulong page_offs = ((ulong) base) - page_base;
  2076. void __iomem *page_remapped = ioremap(page_base, page_offs + size);
  2077. return page_remapped ? (page_remapped + page_offs) : NULL;
  2078. }
  2079. /*
  2080. * Takes jobs of the Q and sends them to the hardware, then puts it on
  2081. * the Q to wait for completion.
  2082. */
  2083. static void start_io(ctlr_info_t *h)
  2084. {
  2085. CommandList_struct *c;
  2086. while ((c = h->reqQ) != NULL) {
  2087. /* can't do anything if fifo is full */
  2088. if ((h->access.fifo_full(h))) {
  2089. printk(KERN_WARNING "cciss: fifo full\n");
  2090. break;
  2091. }
  2092. /* Get the first entry from the Request Q */
  2093. removeQ(&(h->reqQ), c);
  2094. h->Qdepth--;
  2095. /* Tell the controller execute command */
  2096. h->access.submit_command(h, c);
  2097. /* Put job onto the completed Q */
  2098. addQ(&(h->cmpQ), c);
  2099. }
  2100. }
  2101. /* Assumes that CCISS_LOCK(h->ctlr) is held. */
  2102. /* Zeros out the error record and then resends the command back */
  2103. /* to the controller */
  2104. static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
  2105. {
  2106. /* erase the old error information */
  2107. memset(c->err_info, 0, sizeof(ErrorInfo_struct));
  2108. /* add it to software queue and then send it to the controller */
  2109. addQ(&(h->reqQ), c);
  2110. h->Qdepth++;
  2111. if (h->Qdepth > h->maxQsinceinit)
  2112. h->maxQsinceinit = h->Qdepth;
  2113. start_io(h);
  2114. }
  2115. /* checks the status of the job and calls complete buffers to mark all
  2116. * buffers for the completed job. Note that this function does not need
  2117. * to hold the hba/queue lock.
  2118. */
  2119. static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
  2120. int timeout)
  2121. {
  2122. int status = 1;
  2123. int retry_cmd = 0;
  2124. if (timeout)
  2125. status = 0;
  2126. if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
  2127. switch (cmd->err_info->CommandStatus) {
  2128. unsigned char sense_key;
  2129. case CMD_TARGET_STATUS:
  2130. status = 0;
  2131. if (cmd->err_info->ScsiStatus == 0x02) {
  2132. printk(KERN_WARNING "cciss: cmd %p "
  2133. "has CHECK CONDITION "
  2134. " byte 2 = 0x%x\n", cmd,
  2135. cmd->err_info->SenseInfo[2]
  2136. );
  2137. /* check the sense key */
  2138. sense_key = 0xf & cmd->err_info->SenseInfo[2];
  2139. /* no status or recovered error */
  2140. if ((sense_key == 0x0) || (sense_key == 0x1)) {
  2141. status = 1;
  2142. }
  2143. } else {
  2144. printk(KERN_WARNING "cciss: cmd %p "
  2145. "has SCSI Status 0x%x\n",
  2146. cmd, cmd->err_info->ScsiStatus);
  2147. }
  2148. break;
  2149. case CMD_DATA_UNDERRUN:
  2150. printk(KERN_WARNING "cciss: cmd %p has"
  2151. " completed with data underrun "
  2152. "reported\n", cmd);
  2153. break;
  2154. case CMD_DATA_OVERRUN:
  2155. printk(KERN_WARNING "cciss: cmd %p has"
  2156. " completed with data overrun "
  2157. "reported\n", cmd);
  2158. break;
  2159. case CMD_INVALID:
  2160. printk(KERN_WARNING "cciss: cmd %p is "
  2161. "reported invalid\n", cmd);
  2162. status = 0;
  2163. break;
  2164. case CMD_PROTOCOL_ERR:
  2165. printk(KERN_WARNING "cciss: cmd %p has "
  2166. "protocol error \n", cmd);
  2167. status = 0;
  2168. break;
  2169. case CMD_HARDWARE_ERR:
  2170. printk(KERN_WARNING "cciss: cmd %p had "
  2171. " hardware error\n", cmd);
  2172. status = 0;
  2173. break;
  2174. case CMD_CONNECTION_LOST:
  2175. printk(KERN_WARNING "cciss: cmd %p had "
  2176. "connection lost\n", cmd);
  2177. status = 0;
  2178. break;
  2179. case CMD_ABORTED:
  2180. printk(KERN_WARNING "cciss: cmd %p was "
  2181. "aborted\n", cmd);
  2182. status = 0;
  2183. break;
  2184. case CMD_ABORT_FAILED:
  2185. printk(KERN_WARNING "cciss: cmd %p reports "
  2186. "abort failed\n", cmd);
  2187. status = 0;
  2188. break;
  2189. case CMD_UNSOLICITED_ABORT:
  2190. printk(KERN_WARNING "cciss%d: unsolicited "
  2191. "abort %p\n", h->ctlr, cmd);
  2192. if (cmd->retry_count < MAX_CMD_RETRIES) {
  2193. retry_cmd = 1;
  2194. printk(KERN_WARNING
  2195. "cciss%d: retrying %p\n", h->ctlr, cmd);
  2196. cmd->retry_count++;
  2197. } else
  2198. printk(KERN_WARNING
  2199. "cciss%d: %p retried too "
  2200. "many times\n", h->ctlr, cmd);
  2201. status = 0;
  2202. break;
  2203. case CMD_TIMEOUT:
  2204. printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
  2205. status = 0;
  2206. break;
  2207. default:
  2208. printk(KERN_WARNING "cciss: cmd %p returned "
  2209. "unknown status %x\n", cmd,
  2210. cmd->err_info->CommandStatus);
  2211. status = 0;
  2212. }
  2213. }
  2214. /* We need to return this command */
  2215. if (retry_cmd) {
  2216. resend_cciss_cmd(h, cmd);
  2217. return;
  2218. }
  2219. cmd->rq->completion_data = cmd;
  2220. cmd->rq->errors = status;
  2221. blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
  2222. blk_complete_request(cmd->rq);
  2223. }
  2224. /*
  2225. * Get a request and submit it to the controller.
  2226. */
  2227. static void do_cciss_request(request_queue_t *q)
  2228. {
  2229. ctlr_info_t *h = q->queuedata;
  2230. CommandList_struct *c;
  2231. sector_t start_blk;
  2232. int seg;
  2233. struct request *creq;
  2234. u64bit temp64;
  2235. struct scatterlist tmp_sg[MAXSGENTRIES];
  2236. drive_info_struct *drv;
  2237. int i, dir;
  2238. /* We call start_io here in case there is a command waiting on the
  2239. * queue that has not been sent.
  2240. */
  2241. if (blk_queue_plugged(q))
  2242. goto startio;
  2243. queue:
  2244. creq = elv_next_request(q);
  2245. if (!creq)
  2246. goto startio;
  2247. BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
  2248. if ((c = cmd_alloc(h, 1)) == NULL)
  2249. goto full;
  2250. blkdev_dequeue_request(creq);
  2251. spin_unlock_irq(q->queue_lock);
  2252. c->cmd_type = CMD_RWREQ;
  2253. c->rq = creq;
  2254. /* fill in the request */
  2255. drv = creq->rq_disk->private_data;
  2256. c->Header.ReplyQueue = 0; // unused in simple mode
  2257. /* got command from pool, so use the command block index instead */
  2258. /* for direct lookups. */
  2259. /* The first 2 bits are reserved for controller error reporting. */
  2260. c->Header.Tag.lower = (c->cmdindex << 3);
  2261. c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
  2262. c->Header.LUN.LogDev.VolId = drv->LunID;
  2263. c->Header.LUN.LogDev.Mode = 1;
  2264. c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
  2265. c->Request.Type.Type = TYPE_CMD; // It is a command.
  2266. c->Request.Type.Attribute = ATTR_SIMPLE;
  2267. c->Request.Type.Direction =
  2268. (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
  2269. c->Request.Timeout = 0; // Don't time out
  2270. c->Request.CDB[0] =
  2271. (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
  2272. start_blk = creq->sector;
  2273. #ifdef CCISS_DEBUG
  2274. printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
  2275. (int)creq->nr_sectors);
  2276. #endif /* CCISS_DEBUG */
  2277. seg = blk_rq_map_sg(q, creq, tmp_sg);
  2278. /* get the DMA records for the setup */
  2279. if (c->Request.Type.Direction == XFER_READ)
  2280. dir = PCI_DMA_FROMDEVICE;
  2281. else
  2282. dir = PCI_DMA_TODEVICE;
  2283. for (i = 0; i < seg; i++) {
  2284. c->SG[i].Len = tmp_sg[i].length;
  2285. temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
  2286. tmp_sg[i].offset,
  2287. tmp_sg[i].length, dir);
  2288. c->SG[i].Addr.lower = temp64.val32.lower;
  2289. c->SG[i].Addr.upper = temp64.val32.upper;
  2290. c->SG[i].Ext = 0; // we are not chaining
  2291. }
  2292. /* track how many SG entries we are using */
  2293. if (seg > h->maxSG)
  2294. h->maxSG = seg;
  2295. #ifdef CCISS_DEBUG
  2296. printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
  2297. creq->nr_sectors, seg);
  2298. #endif /* CCISS_DEBUG */
  2299. c->Header.SGList = c->Header.SGTotal = seg;
  2300. if(h->cciss_read == CCISS_READ_10) {
  2301. c->Request.CDB[1] = 0;
  2302. c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
  2303. c->Request.CDB[3] = (start_blk >> 16) & 0xff;
  2304. c->Request.CDB[4] = (start_blk >> 8) & 0xff;
  2305. c->Request.CDB[5] = start_blk & 0xff;
  2306. c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
  2307. c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
  2308. c->Request.CDB[8] = creq->nr_sectors & 0xff;
  2309. c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
  2310. } else {
  2311. c->Request.CDBLen = 16;
  2312. c->Request.CDB[1]= 0;
  2313. c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
  2314. c->Request.CDB[3]= (start_blk >> 48) & 0xff;
  2315. c->Request.CDB[4]= (start_blk >> 40) & 0xff;
  2316. c->Request.CDB[5]= (start_blk >> 32) & 0xff;
  2317. c->Request.CDB[6]= (start_blk >> 24) & 0xff;
  2318. c->Request.CDB[7]= (start_blk >> 16) & 0xff;
  2319. c->Request.CDB[8]= (start_blk >> 8) & 0xff;
  2320. c->Request.CDB[9]= start_blk & 0xff;
  2321. c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
  2322. c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
  2323. c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
  2324. c->Request.CDB[13]= creq->nr_sectors & 0xff;
  2325. c->Request.CDB[14] = c->Request.CDB[15] = 0;
  2326. }
  2327. spin_lock_irq(q->queue_lock);
  2328. addQ(&(h->reqQ), c);
  2329. h->Qdepth++;
  2330. if (h->Qdepth > h->maxQsinceinit)
  2331. h->maxQsinceinit = h->Qdepth;
  2332. goto queue;
  2333. full:
  2334. blk_stop_queue(q);
  2335. startio:
  2336. /* We will already have the driver lock here so not need
  2337. * to lock it.
  2338. */
  2339. start_io(h);
  2340. }
  2341. static inline unsigned long get_next_completion(ctlr_info_t *h)
  2342. {
  2343. #ifdef CONFIG_CISS_SCSI_TAPE
  2344. /* Any rejects from sendcmd() lying around? Process them first */
  2345. if (h->scsi_rejects.ncompletions == 0)
  2346. return h->access.command_completed(h);
  2347. else {
  2348. struct sendcmd_reject_list *srl;
  2349. int n;
  2350. srl = &h->scsi_rejects;
  2351. n = --srl->ncompletions;
  2352. /* printk("cciss%d: processing saved reject\n", h->ctlr); */
  2353. printk("p");
  2354. return srl->complete[n];
  2355. }
  2356. #else
  2357. return h->access.command_completed(h);
  2358. #endif
  2359. }
  2360. static inline int interrupt_pending(ctlr_info_t *h)
  2361. {
  2362. #ifdef CONFIG_CISS_SCSI_TAPE
  2363. return (h->access.intr_pending(h)
  2364. || (h->scsi_rejects.ncompletions > 0));
  2365. #else
  2366. return h->access.intr_pending(h);
  2367. #endif
  2368. }
  2369. static inline long interrupt_not_for_us(ctlr_info_t *h)
  2370. {
  2371. #ifdef CONFIG_CISS_SCSI_TAPE
  2372. return (((h->access.intr_pending(h) == 0) ||
  2373. (h->interrupts_enabled == 0))
  2374. && (h->scsi_rejects.ncompletions == 0));
  2375. #else
  2376. return (((h->access.intr_pending(h) == 0) ||
  2377. (h->interrupts_enabled == 0)));
  2378. #endif
  2379. }
  2380. static irqreturn_t do_cciss_intr(int irq, void *dev_id)
  2381. {
  2382. ctlr_info_t *h = dev_id;
  2383. CommandList_struct *c;
  2384. unsigned long flags;
  2385. __u32 a, a1, a2;
  2386. if (interrupt_not_for_us(h))
  2387. return IRQ_NONE;
  2388. /*
  2389. * If there are completed commands in the completion queue,
  2390. * we had better do something about it.
  2391. */
  2392. spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
  2393. while (interrupt_pending(h)) {
  2394. while ((a = get_next_completion(h)) != FIFO_EMPTY) {
  2395. a1 = a;
  2396. if ((a & 0x04)) {
  2397. a2 = (a >> 3);
  2398. if (a2 >= h->nr_cmds) {
  2399. printk(KERN_WARNING
  2400. "cciss: controller cciss%d failed, stopping.\n",
  2401. h->ctlr);
  2402. fail_all_cmds(h->ctlr);
  2403. return IRQ_HANDLED;
  2404. }
  2405. c = h->cmd_pool + a2;
  2406. a = c->busaddr;
  2407. } else {
  2408. a &= ~3;
  2409. if ((c = h->cmpQ) == NULL) {
  2410. printk(KERN_WARNING
  2411. "cciss: Completion of %08x ignored\n",
  2412. a1);
  2413. continue;
  2414. }
  2415. while (c->busaddr != a) {
  2416. c = c->next;
  2417. if (c == h->cmpQ)
  2418. break;
  2419. }
  2420. }
  2421. /*
  2422. * If we've found the command, take it off the
  2423. * completion Q and free it
  2424. */
  2425. if (c->busaddr == a) {
  2426. removeQ(&h->cmpQ, c);
  2427. if (c->cmd_type == CMD_RWREQ) {
  2428. complete_command(h, c, 0);
  2429. } else if (c->cmd_type == CMD_IOCTL_PEND) {
  2430. complete(c->waiting);
  2431. }
  2432. # ifdef CONFIG_CISS_SCSI_TAPE
  2433. else if (c->cmd_type == CMD_SCSI)
  2434. complete_scsi_command(c, 0, a1);
  2435. # endif
  2436. continue;
  2437. }
  2438. }
  2439. }
  2440. spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
  2441. return IRQ_HANDLED;
  2442. }
  2443. /*
  2444. * We cannot read the structure directly, for portability we must use
  2445. * the io functions.
  2446. * This is for debug only.
  2447. */
  2448. #ifdef CCISS_DEBUG
  2449. static void print_cfg_table(CfgTable_struct *tb)
  2450. {
  2451. int i;
  2452. char temp_name[17];
  2453. printk("Controller Configuration information\n");
  2454. printk("------------------------------------\n");
  2455. for (i = 0; i < 4; i++)
  2456. temp_name[i] = readb(&(tb->Signature[i]));
  2457. temp_name[4] = '\0';
  2458. printk(" Signature = %s\n", temp_name);
  2459. printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
  2460. printk(" Transport methods supported = 0x%x\n",
  2461. readl(&(tb->TransportSupport)));
  2462. printk(" Transport methods active = 0x%x\n",
  2463. readl(&(tb->TransportActive)));
  2464. printk(" Requested transport Method = 0x%x\n",
  2465. readl(&(tb->HostWrite.TransportRequest)));
  2466. printk(" Coalesce Interrupt Delay = 0x%x\n",
  2467. readl(&(tb->HostWrite.CoalIntDelay)));
  2468. printk(" Coalesce Interrupt Count = 0x%x\n",
  2469. readl(&(tb->HostWrite.CoalIntCount)));
  2470. printk(" Max outstanding commands = 0x%d\n",
  2471. readl(&(tb->CmdsOutMax)));
  2472. printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
  2473. for (i = 0; i < 16; i++)
  2474. temp_name[i] = readb(&(tb->ServerName[i]));
  2475. temp_name[16] = '\0';
  2476. printk(" Server Name = %s\n", temp_name);
  2477. printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
  2478. }
  2479. #endif /* CCISS_DEBUG */
  2480. static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
  2481. {
  2482. int i, offset, mem_type, bar_type;
  2483. if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
  2484. return 0;
  2485. offset = 0;
  2486. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  2487. bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
  2488. if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
  2489. offset += 4;
  2490. else {
  2491. mem_type = pci_resource_flags(pdev, i) &
  2492. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  2493. switch (mem_type) {
  2494. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  2495. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  2496. offset += 4; /* 32 bit */
  2497. break;
  2498. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  2499. offset += 8;
  2500. break;
  2501. default: /* reserved in PCI 2.2 */
  2502. printk(KERN_WARNING
  2503. "Base address is invalid\n");
  2504. return -1;
  2505. break;
  2506. }
  2507. }
  2508. if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
  2509. return i + 1;
  2510. }
  2511. return -1;
  2512. }
  2513. /* If MSI/MSI-X is supported by the kernel we will try to enable it on
  2514. * controllers that are capable. If not, we use IO-APIC mode.
  2515. */
  2516. static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
  2517. struct pci_dev *pdev, __u32 board_id)
  2518. {
  2519. #ifdef CONFIG_PCI_MSI
  2520. int err;
  2521. struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
  2522. {0, 2}, {0, 3}
  2523. };
  2524. /* Some boards advertise MSI but don't really support it */
  2525. if ((board_id == 0x40700E11) ||
  2526. (board_id == 0x40800E11) ||
  2527. (board_id == 0x40820E11) || (board_id == 0x40830E11))
  2528. goto default_int_mode;
  2529. if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
  2530. err = pci_enable_msix(pdev, cciss_msix_entries, 4);
  2531. if (!err) {
  2532. c->intr[0] = cciss_msix_entries[0].vector;
  2533. c->intr[1] = cciss_msix_entries[1].vector;
  2534. c->intr[2] = cciss_msix_entries[2].vector;
  2535. c->intr[3] = cciss_msix_entries[3].vector;
  2536. c->msix_vector = 1;
  2537. return;
  2538. }
  2539. if (err > 0) {
  2540. printk(KERN_WARNING "cciss: only %d MSI-X vectors "
  2541. "available\n", err);
  2542. goto default_int_mode;
  2543. } else {
  2544. printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
  2545. err);
  2546. goto default_int_mode;
  2547. }
  2548. }
  2549. if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
  2550. if (!pci_enable_msi(pdev)) {
  2551. c->msi_vector = 1;
  2552. } else {
  2553. printk(KERN_WARNING "cciss: MSI init failed\n");
  2554. }
  2555. }
  2556. default_int_mode:
  2557. #endif /* CONFIG_PCI_MSI */
  2558. /* if we get here we're going to use the default interrupt mode */
  2559. c->intr[SIMPLE_MODE_INT] = pdev->irq;
  2560. return;
  2561. }
  2562. static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
  2563. {
  2564. ushort subsystem_vendor_id, subsystem_device_id, command;
  2565. __u32 board_id, scratchpad = 0;
  2566. __u64 cfg_offset;
  2567. __u32 cfg_base_addr;
  2568. __u64 cfg_base_addr_index;
  2569. int i, err;
  2570. /* check to see if controller has been disabled */
  2571. /* BEFORE trying to enable it */
  2572. (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
  2573. if (!(command & 0x02)) {
  2574. printk(KERN_WARNING
  2575. "cciss: controller appears to be disabled\n");
  2576. return -ENODEV;
  2577. }
  2578. err = pci_enable_device(pdev);
  2579. if (err) {
  2580. printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
  2581. return err;
  2582. }
  2583. err = pci_request_regions(pdev, "cciss");
  2584. if (err) {
  2585. printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
  2586. "aborting\n");
  2587. return err;
  2588. }
  2589. subsystem_vendor_id = pdev->subsystem_vendor;
  2590. subsystem_device_id = pdev->subsystem_device;
  2591. board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
  2592. subsystem_vendor_id);
  2593. #ifdef CCISS_DEBUG
  2594. printk("command = %x\n", command);
  2595. printk("irq = %x\n", pdev->irq);
  2596. printk("board_id = %x\n", board_id);
  2597. #endif /* CCISS_DEBUG */
  2598. /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
  2599. * else we use the IO-APIC interrupt assigned to us by system ROM.
  2600. */
  2601. cciss_interrupt_mode(c, pdev, board_id);
  2602. /*
  2603. * Memory base addr is first addr , the second points to the config
  2604. * table
  2605. */
  2606. c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
  2607. #ifdef CCISS_DEBUG
  2608. printk("address 0 = %x\n", c->paddr);
  2609. #endif /* CCISS_DEBUG */
  2610. c->vaddr = remap_pci_mem(c->paddr, 0x250);
  2611. /* Wait for the board to become ready. (PCI hotplug needs this.)
  2612. * We poll for up to 120 secs, once per 100ms. */
  2613. for (i = 0; i < 1200; i++) {
  2614. scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
  2615. if (scratchpad == CCISS_FIRMWARE_READY)
  2616. break;
  2617. set_current_state(TASK_INTERRUPTIBLE);
  2618. schedule_timeout(HZ / 10); /* wait 100ms */
  2619. }
  2620. if (scratchpad != CCISS_FIRMWARE_READY) {
  2621. printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
  2622. err = -ENODEV;
  2623. goto err_out_free_res;
  2624. }
  2625. /* get the address index number */
  2626. cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
  2627. cfg_base_addr &= (__u32) 0x0000ffff;
  2628. #ifdef CCISS_DEBUG
  2629. printk("cfg base address = %x\n", cfg_base_addr);
  2630. #endif /* CCISS_DEBUG */
  2631. cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
  2632. #ifdef CCISS_DEBUG
  2633. printk("cfg base address index = %x\n", cfg_base_addr_index);
  2634. #endif /* CCISS_DEBUG */
  2635. if (cfg_base_addr_index == -1) {
  2636. printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
  2637. err = -ENODEV;
  2638. goto err_out_free_res;
  2639. }
  2640. cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
  2641. #ifdef CCISS_DEBUG
  2642. printk("cfg offset = %x\n", cfg_offset);
  2643. #endif /* CCISS_DEBUG */
  2644. c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
  2645. cfg_base_addr_index) +
  2646. cfg_offset, sizeof(CfgTable_struct));
  2647. c->board_id = board_id;
  2648. #ifdef CCISS_DEBUG
  2649. print_cfg_table(c->cfgtable);
  2650. #endif /* CCISS_DEBUG */
  2651. for (i = 0; i < ARRAY_SIZE(products); i++) {
  2652. if (board_id == products[i].board_id) {
  2653. c->product_name = products[i].product_name;
  2654. c->access = *(products[i].access);
  2655. c->nr_cmds = products[i].nr_cmds;
  2656. break;
  2657. }
  2658. }
  2659. if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
  2660. (readb(&c->cfgtable->Signature[1]) != 'I') ||
  2661. (readb(&c->cfgtable->Signature[2]) != 'S') ||
  2662. (readb(&c->cfgtable->Signature[3]) != 'S')) {
  2663. printk("Does not appear to be a valid CISS config table\n");
  2664. err = -ENODEV;
  2665. goto err_out_free_res;
  2666. }
  2667. /* We didn't find the controller in our list. We know the
  2668. * signature is valid. If it's an HP device let's try to
  2669. * bind to the device and fire it up. Otherwise we bail.
  2670. */
  2671. if (i == ARRAY_SIZE(products)) {
  2672. if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
  2673. c->product_name = products[i-1].product_name;
  2674. c->access = *(products[i-1].access);
  2675. c->nr_cmds = products[i-1].nr_cmds;
  2676. printk(KERN_WARNING "cciss: This is an unknown "
  2677. "Smart Array controller.\n"
  2678. "cciss: Please update to the latest driver "
  2679. "available from www.hp.com.\n");
  2680. } else {
  2681. printk(KERN_WARNING "cciss: Sorry, I don't know how"
  2682. " to access the Smart Array controller %08lx\n"
  2683. , (unsigned long)board_id);
  2684. err = -ENODEV;
  2685. goto err_out_free_res;
  2686. }
  2687. }
  2688. #ifdef CONFIG_X86
  2689. {
  2690. /* Need to enable prefetch in the SCSI core for 6400 in x86 */
  2691. __u32 prefetch;
  2692. prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
  2693. prefetch |= 0x100;
  2694. writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
  2695. }
  2696. #endif
  2697. /* Disabling DMA prefetch for the P600
  2698. * An ASIC bug may result in a prefetch beyond
  2699. * physical memory.
  2700. */
  2701. if(board_id == 0x3225103C) {
  2702. __u32 dma_prefetch;
  2703. dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
  2704. dma_prefetch |= 0x8000;
  2705. writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
  2706. }
  2707. #ifdef CCISS_DEBUG
  2708. printk("Trying to put board into Simple mode\n");
  2709. #endif /* CCISS_DEBUG */
  2710. c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
  2711. /* Update the field, and then ring the doorbell */
  2712. writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
  2713. writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
  2714. /* under certain very rare conditions, this can take awhile.
  2715. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
  2716. * as we enter this code.) */
  2717. for (i = 0; i < MAX_CONFIG_WAIT; i++) {
  2718. if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
  2719. break;
  2720. /* delay and try again */
  2721. set_current_state(TASK_INTERRUPTIBLE);
  2722. schedule_timeout(10);
  2723. }
  2724. #ifdef CCISS_DEBUG
  2725. printk(KERN_DEBUG "I counter got to %d %x\n", i,
  2726. readl(c->vaddr + SA5_DOORBELL));
  2727. #endif /* CCISS_DEBUG */
  2728. #ifdef CCISS_DEBUG
  2729. print_cfg_table(c->cfgtable);
  2730. #endif /* CCISS_DEBUG */
  2731. if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
  2732. printk(KERN_WARNING "cciss: unable to get board into"
  2733. " simple mode\n");
  2734. err = -ENODEV;
  2735. goto err_out_free_res;
  2736. }
  2737. return 0;
  2738. err_out_free_res:
  2739. /*
  2740. * Deliberately omit pci_disable_device(): it does something nasty to
  2741. * Smart Array controllers that pci_enable_device does not undo
  2742. */
  2743. pci_release_regions(pdev);
  2744. return err;
  2745. }
  2746. /*
  2747. * Gets information about the local volumes attached to the controller.
  2748. */
  2749. static void cciss_getgeometry(int cntl_num)
  2750. {
  2751. ReportLunData_struct *ld_buff;
  2752. InquiryData_struct *inq_buff;
  2753. int return_code;
  2754. int i;
  2755. int listlength = 0;
  2756. __u32 lunid = 0;
  2757. int block_size;
  2758. sector_t total_size;
  2759. ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
  2760. if (ld_buff == NULL) {
  2761. printk(KERN_ERR "cciss: out of memory\n");
  2762. return;
  2763. }
  2764. inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
  2765. if (inq_buff == NULL) {
  2766. printk(KERN_ERR "cciss: out of memory\n");
  2767. kfree(ld_buff);
  2768. return;
  2769. }
  2770. /* Get the firmware version */
  2771. return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
  2772. sizeof(InquiryData_struct), 0, 0, 0, NULL,
  2773. TYPE_CMD);
  2774. if (return_code == IO_OK) {
  2775. hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
  2776. hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
  2777. hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
  2778. hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
  2779. } else { /* send command failed */
  2780. printk(KERN_WARNING "cciss: unable to determine firmware"
  2781. " version of controller\n");
  2782. }
  2783. /* Get the number of logical volumes */
  2784. return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
  2785. sizeof(ReportLunData_struct), 0, 0, 0, NULL,
  2786. TYPE_CMD);
  2787. if (return_code == IO_OK) {
  2788. #ifdef CCISS_DEBUG
  2789. printk("LUN Data\n--------------------------\n");
  2790. #endif /* CCISS_DEBUG */
  2791. listlength |=
  2792. (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
  2793. listlength |=
  2794. (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
  2795. listlength |=
  2796. (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
  2797. listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
  2798. } else { /* reading number of logical volumes failed */
  2799. printk(KERN_WARNING "cciss: report logical volume"
  2800. " command failed\n");
  2801. listlength = 0;
  2802. }
  2803. hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
  2804. if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
  2805. printk(KERN_ERR
  2806. "ciss: only %d number of logical volumes supported\n",
  2807. CISS_MAX_LUN);
  2808. hba[cntl_num]->num_luns = CISS_MAX_LUN;
  2809. }
  2810. #ifdef CCISS_DEBUG
  2811. printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
  2812. ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
  2813. ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
  2814. hba[cntl_num]->num_luns);
  2815. #endif /* CCISS_DEBUG */
  2816. hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
  2817. for (i = 0; i < CISS_MAX_LUN; i++) {
  2818. if (i < hba[cntl_num]->num_luns) {
  2819. lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
  2820. << 24;
  2821. lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
  2822. << 16;
  2823. lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
  2824. << 8;
  2825. lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
  2826. hba[cntl_num]->drv[i].LunID = lunid;
  2827. #ifdef CCISS_DEBUG
  2828. printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
  2829. ld_buff->LUN[i][0], ld_buff->LUN[i][1],
  2830. ld_buff->LUN[i][2], ld_buff->LUN[i][3],
  2831. hba[cntl_num]->drv[i].LunID);
  2832. #endif /* CCISS_DEBUG */
  2833. /* testing to see if 16-byte CDBs are already being used */
  2834. if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
  2835. cciss_read_capacity_16(cntl_num, i, 0,
  2836. &total_size, &block_size);
  2837. goto geo_inq;
  2838. }
  2839. cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
  2840. /* total_size = last LBA + 1 */
  2841. if(total_size == (__u32) 0) {
  2842. cciss_read_capacity_16(cntl_num, i, 0,
  2843. &total_size, &block_size);
  2844. hba[cntl_num]->cciss_read = CCISS_READ_16;
  2845. hba[cntl_num]->cciss_write = CCISS_WRITE_16;
  2846. } else {
  2847. hba[cntl_num]->cciss_read = CCISS_READ_10;
  2848. hba[cntl_num]->cciss_write = CCISS_WRITE_10;
  2849. }
  2850. geo_inq:
  2851. cciss_geometry_inquiry(cntl_num, i, 0, total_size,
  2852. block_size, inq_buff,
  2853. &hba[cntl_num]->drv[i]);
  2854. } else {
  2855. /* initialize raid_level to indicate a free space */
  2856. hba[cntl_num]->drv[i].raid_level = -1;
  2857. }
  2858. }
  2859. kfree(ld_buff);
  2860. kfree(inq_buff);
  2861. }
  2862. /* Function to find the first free pointer into our hba[] array */
  2863. /* Returns -1 if no free entries are left. */
  2864. static int alloc_cciss_hba(void)
  2865. {
  2866. int i;
  2867. for (i = 0; i < MAX_CTLR; i++) {
  2868. if (!hba[i]) {
  2869. ctlr_info_t *p;
  2870. p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
  2871. if (!p)
  2872. goto Enomem;
  2873. p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
  2874. if (!p->gendisk[0])
  2875. goto Enomem;
  2876. hba[i] = p;
  2877. return i;
  2878. }
  2879. }
  2880. printk(KERN_WARNING "cciss: This driver supports a maximum"
  2881. " of %d controllers.\n", MAX_CTLR);
  2882. return -1;
  2883. Enomem:
  2884. printk(KERN_ERR "cciss: out of memory.\n");
  2885. return -1;
  2886. }
  2887. static void free_hba(int i)
  2888. {
  2889. ctlr_info_t *p = hba[i];
  2890. int n;
  2891. hba[i] = NULL;
  2892. for (n = 0; n < CISS_MAX_LUN; n++)
  2893. put_disk(p->gendisk[n]);
  2894. kfree(p);
  2895. }
  2896. /*
  2897. * This is it. Find all the controllers and register them. I really hate
  2898. * stealing all these major device numbers.
  2899. * returns the number of block devices registered.
  2900. */
  2901. static int __devinit cciss_init_one(struct pci_dev *pdev,
  2902. const struct pci_device_id *ent)
  2903. {
  2904. int i;
  2905. int j = 0;
  2906. int rc;
  2907. int dac;
  2908. i = alloc_cciss_hba();
  2909. if (i < 0)
  2910. return -1;
  2911. hba[i]->busy_initializing = 1;
  2912. if (cciss_pci_init(hba[i], pdev) != 0)
  2913. goto clean1;
  2914. sprintf(hba[i]->devname, "cciss%d", i);
  2915. hba[i]->ctlr = i;
  2916. hba[i]->pdev = pdev;
  2917. /* configure PCI DMA stuff */
  2918. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
  2919. dac = 1;
  2920. else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
  2921. dac = 0;
  2922. else {
  2923. printk(KERN_ERR "cciss: no suitable DMA available\n");
  2924. goto clean1;
  2925. }
  2926. /*
  2927. * register with the major number, or get a dynamic major number
  2928. * by passing 0 as argument. This is done for greater than
  2929. * 8 controller support.
  2930. */
  2931. if (i < MAX_CTLR_ORIG)
  2932. hba[i]->major = COMPAQ_CISS_MAJOR + i;
  2933. rc = register_blkdev(hba[i]->major, hba[i]->devname);
  2934. if (rc == -EBUSY || rc == -EINVAL) {
  2935. printk(KERN_ERR
  2936. "cciss: Unable to get major number %d for %s "
  2937. "on hba %d\n", hba[i]->major, hba[i]->devname, i);
  2938. goto clean1;
  2939. } else {
  2940. if (i >= MAX_CTLR_ORIG)
  2941. hba[i]->major = rc;
  2942. }
  2943. /* make sure the board interrupts are off */
  2944. hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
  2945. if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
  2946. IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
  2947. printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
  2948. hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
  2949. goto clean2;
  2950. }
  2951. printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
  2952. hba[i]->devname, pdev->device, pci_name(pdev),
  2953. hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
  2954. hba[i]->cmd_pool_bits =
  2955. kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
  2956. 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
  2957. hba[i]->cmd_pool = (CommandList_struct *)
  2958. pci_alloc_consistent(hba[i]->pdev,
  2959. hba[i]->nr_cmds * sizeof(CommandList_struct),
  2960. &(hba[i]->cmd_pool_dhandle));
  2961. hba[i]->errinfo_pool = (ErrorInfo_struct *)
  2962. pci_alloc_consistent(hba[i]->pdev,
  2963. hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
  2964. &(hba[i]->errinfo_pool_dhandle));
  2965. if ((hba[i]->cmd_pool_bits == NULL)
  2966. || (hba[i]->cmd_pool == NULL)
  2967. || (hba[i]->errinfo_pool == NULL)) {
  2968. printk(KERN_ERR "cciss: out of memory");
  2969. goto clean4;
  2970. }
  2971. #ifdef CONFIG_CISS_SCSI_TAPE
  2972. hba[i]->scsi_rejects.complete =
  2973. kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
  2974. (hba[i]->nr_cmds + 5), GFP_KERNEL);
  2975. if (hba[i]->scsi_rejects.complete == NULL) {
  2976. printk(KERN_ERR "cciss: out of memory");
  2977. goto clean4;
  2978. }
  2979. #endif
  2980. spin_lock_init(&hba[i]->lock);
  2981. /* Initialize the pdev driver private data.
  2982. have it point to hba[i]. */
  2983. pci_set_drvdata(pdev, hba[i]);
  2984. /* command and error info recs zeroed out before
  2985. they are used */
  2986. memset(hba[i]->cmd_pool_bits, 0,
  2987. ((hba[i]->nr_cmds + BITS_PER_LONG -
  2988. 1) / BITS_PER_LONG) * sizeof(unsigned long));
  2989. #ifdef CCISS_DEBUG
  2990. printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
  2991. #endif /* CCISS_DEBUG */
  2992. cciss_getgeometry(i);
  2993. cciss_scsi_setup(i);
  2994. /* Turn the interrupts on so we can service requests */
  2995. hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
  2996. cciss_procinit(i);
  2997. hba[i]->cciss_max_sectors = 2048;
  2998. hba[i]->busy_initializing = 0;
  2999. do {
  3000. drive_info_struct *drv = &(hba[i]->drv[j]);
  3001. struct gendisk *disk = hba[i]->gendisk[j];
  3002. request_queue_t *q;
  3003. /* Check if the disk was allocated already */
  3004. if (!disk){
  3005. hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
  3006. disk = hba[i]->gendisk[j];
  3007. }
  3008. /* Check that the disk was able to be allocated */
  3009. if (!disk) {
  3010. printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
  3011. goto clean4;
  3012. }
  3013. q = blk_init_queue(do_cciss_request, &hba[i]->lock);
  3014. if (!q) {
  3015. printk(KERN_ERR
  3016. "cciss: unable to allocate queue for disk %d\n",
  3017. j);
  3018. goto clean4;
  3019. }
  3020. drv->queue = q;
  3021. q->backing_dev_info.ra_pages = READ_AHEAD;
  3022. blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
  3023. /* This is a hardware imposed limit. */
  3024. blk_queue_max_hw_segments(q, MAXSGENTRIES);
  3025. /* This is a limit in the driver and could be eliminated. */
  3026. blk_queue_max_phys_segments(q, MAXSGENTRIES);
  3027. blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
  3028. blk_queue_softirq_done(q, cciss_softirq_done);
  3029. q->queuedata = hba[i];
  3030. sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
  3031. disk->major = hba[i]->major;
  3032. disk->first_minor = j << NWD_SHIFT;
  3033. disk->fops = &cciss_fops;
  3034. disk->queue = q;
  3035. disk->private_data = drv;
  3036. disk->driverfs_dev = &pdev->dev;
  3037. /* we must register the controller even if no disks exist */
  3038. /* this is for the online array utilities */
  3039. if (!drv->heads && j)
  3040. continue;
  3041. blk_queue_hardsect_size(q, drv->block_size);
  3042. set_capacity(disk, drv->nr_blocks);
  3043. add_disk(disk);
  3044. j++;
  3045. } while (j <= hba[i]->highest_lun);
  3046. return 1;
  3047. clean4:
  3048. #ifdef CONFIG_CISS_SCSI_TAPE
  3049. kfree(hba[i]->scsi_rejects.complete);
  3050. #endif
  3051. kfree(hba[i]->cmd_pool_bits);
  3052. if (hba[i]->cmd_pool)
  3053. pci_free_consistent(hba[i]->pdev,
  3054. hba[i]->nr_cmds * sizeof(CommandList_struct),
  3055. hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
  3056. if (hba[i]->errinfo_pool)
  3057. pci_free_consistent(hba[i]->pdev,
  3058. hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
  3059. hba[i]->errinfo_pool,
  3060. hba[i]->errinfo_pool_dhandle);
  3061. free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
  3062. clean2:
  3063. unregister_blkdev(hba[i]->major, hba[i]->devname);
  3064. clean1:
  3065. hba[i]->busy_initializing = 0;
  3066. /* cleanup any queues that may have been initialized */
  3067. for (j=0; j <= hba[i]->highest_lun; j++){
  3068. drive_info_struct *drv = &(hba[i]->drv[j]);
  3069. if (drv->queue)
  3070. blk_cleanup_queue(drv->queue);
  3071. }
  3072. /*
  3073. * Deliberately omit pci_disable_device(): it does something nasty to
  3074. * Smart Array controllers that pci_enable_device does not undo
  3075. */
  3076. pci_release_regions(pdev);
  3077. pci_set_drvdata(pdev, NULL);
  3078. free_hba(i);
  3079. return -1;
  3080. }
  3081. static void __devexit cciss_remove_one(struct pci_dev *pdev)
  3082. {
  3083. ctlr_info_t *tmp_ptr;
  3084. int i, j;
  3085. char flush_buf[4];
  3086. int return_code;
  3087. if (pci_get_drvdata(pdev) == NULL) {
  3088. printk(KERN_ERR "cciss: Unable to remove device \n");
  3089. return;
  3090. }
  3091. tmp_ptr = pci_get_drvdata(pdev);
  3092. i = tmp_ptr->ctlr;
  3093. if (hba[i] == NULL) {
  3094. printk(KERN_ERR "cciss: device appears to "
  3095. "already be removed \n");
  3096. return;
  3097. }
  3098. /* Turn board interrupts off and send the flush cache command */
  3099. /* sendcmd will turn off interrupt, and send the flush...
  3100. * To write all data in the battery backed cache to disks */
  3101. memset(flush_buf, 0, 4);
  3102. return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
  3103. TYPE_CMD);
  3104. if (return_code != IO_OK) {
  3105. printk(KERN_WARNING "Error Flushing cache on controller %d\n",
  3106. i);
  3107. }
  3108. free_irq(hba[i]->intr[2], hba[i]);
  3109. #ifdef CONFIG_PCI_MSI
  3110. if (hba[i]->msix_vector)
  3111. pci_disable_msix(hba[i]->pdev);
  3112. else if (hba[i]->msi_vector)
  3113. pci_disable_msi(hba[i]->pdev);
  3114. #endif /* CONFIG_PCI_MSI */
  3115. iounmap(hba[i]->vaddr);
  3116. cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
  3117. unregister_blkdev(hba[i]->major, hba[i]->devname);
  3118. remove_proc_entry(hba[i]->devname, proc_cciss);
  3119. /* remove it from the disk list */
  3120. for (j = 0; j < CISS_MAX_LUN; j++) {
  3121. struct gendisk *disk = hba[i]->gendisk[j];
  3122. if (disk) {
  3123. request_queue_t *q = disk->queue;
  3124. if (disk->flags & GENHD_FL_UP)
  3125. del_gendisk(disk);
  3126. if (q)
  3127. blk_cleanup_queue(q);
  3128. }
  3129. }
  3130. pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
  3131. hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
  3132. pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
  3133. hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
  3134. kfree(hba[i]->cmd_pool_bits);
  3135. #ifdef CONFIG_CISS_SCSI_TAPE
  3136. kfree(hba[i]->scsi_rejects.complete);
  3137. #endif
  3138. /*
  3139. * Deliberately omit pci_disable_device(): it does something nasty to
  3140. * Smart Array controllers that pci_enable_device does not undo
  3141. */
  3142. pci_release_regions(pdev);
  3143. pci_set_drvdata(pdev, NULL);
  3144. free_hba(i);
  3145. }
  3146. static struct pci_driver cciss_pci_driver = {
  3147. .name = "cciss",
  3148. .probe = cciss_init_one,
  3149. .remove = __devexit_p(cciss_remove_one),
  3150. .id_table = cciss_pci_device_id, /* id_table */
  3151. };
  3152. /*
  3153. * This is it. Register the PCI driver information for the cards we control
  3154. * the OS will call our registered routines when it finds one of our cards.
  3155. */
  3156. static int __init cciss_init(void)
  3157. {
  3158. printk(KERN_INFO DRIVER_NAME "\n");
  3159. /* Register for our PCI devices */
  3160. return pci_register_driver(&cciss_pci_driver);
  3161. }
  3162. static void __exit cciss_cleanup(void)
  3163. {
  3164. int i;
  3165. pci_unregister_driver(&cciss_pci_driver);
  3166. /* double check that all controller entrys have been removed */
  3167. for (i = 0; i < MAX_CTLR; i++) {
  3168. if (hba[i] != NULL) {
  3169. printk(KERN_WARNING "cciss: had to remove"
  3170. " controller %d\n", i);
  3171. cciss_remove_one(hba[i]->pdev);
  3172. }
  3173. }
  3174. remove_proc_entry("cciss", proc_root_driver);
  3175. }
  3176. static void fail_all_cmds(unsigned long ctlr)
  3177. {
  3178. /* If we get here, the board is apparently dead. */
  3179. ctlr_info_t *h = hba[ctlr];
  3180. CommandList_struct *c;
  3181. unsigned long flags;
  3182. printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
  3183. h->alive = 0; /* the controller apparently died... */
  3184. spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
  3185. pci_disable_device(h->pdev); /* Make sure it is really dead. */
  3186. /* move everything off the request queue onto the completed queue */
  3187. while ((c = h->reqQ) != NULL) {
  3188. removeQ(&(h->reqQ), c);
  3189. h->Qdepth--;
  3190. addQ(&(h->cmpQ), c);
  3191. }
  3192. /* Now, fail everything on the completed queue with a HW error */
  3193. while ((c = h->cmpQ) != NULL) {
  3194. removeQ(&h->cmpQ, c);
  3195. c->err_info->CommandStatus = CMD_HARDWARE_ERR;
  3196. if (c->cmd_type == CMD_RWREQ) {
  3197. complete_command(h, c, 0);
  3198. } else if (c->cmd_type == CMD_IOCTL_PEND)
  3199. complete(c->waiting);
  3200. #ifdef CONFIG_CISS_SCSI_TAPE
  3201. else if (c->cmd_type == CMD_SCSI)
  3202. complete_scsi_command(c, 0, 0);
  3203. #endif
  3204. }
  3205. spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
  3206. return;
  3207. }
  3208. module_init(cciss_init);
  3209. module_exit(cciss_cleanup);