be_main.c 100 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390
  1. /**
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
  11. *
  12. * Contact Information:
  13. * linux-drivers@serverengines.com
  14. *
  15. * ServerEngines
  16. * 209 N. Fair Oaks Ave
  17. * Sunnyvale, CA 94085
  18. *
  19. */
  20. #include <linux/reboot.h>
  21. #include <linux/delay.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/pci.h>
  25. #include <linux/string.h>
  26. #include <linux/kernel.h>
  27. #include <linux/semaphore.h>
  28. #include <scsi/libiscsi.h>
  29. #include <scsi/scsi_transport_iscsi.h>
  30. #include <scsi/scsi_transport.h>
  31. #include <scsi/scsi_cmnd.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_host.h>
  34. #include <scsi/scsi.h>
  35. #include "be_main.h"
  36. #include "be_iscsi.h"
  37. #include "be_mgmt.h"
  38. static unsigned int be_iopoll_budget = 10;
  39. static unsigned int be_max_phys_size = 64;
  40. static unsigned int enable_msix;
  41. MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
  42. MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
  43. MODULE_AUTHOR("ServerEngines Corporation");
  44. MODULE_LICENSE("GPL");
  45. module_param(be_iopoll_budget, int, 0);
  46. module_param(enable_msix, int, 0);
  47. module_param(be_max_phys_size, uint, S_IRUGO);
  48. MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
  49. "contiguous memory that can be allocated."
  50. "Range is 16 - 128");
  51. static int beiscsi_slave_configure(struct scsi_device *sdev)
  52. {
  53. blk_queue_max_segment_size(sdev->request_queue, 65536);
  54. return 0;
  55. }
  56. static struct scsi_host_template beiscsi_sht = {
  57. .module = THIS_MODULE,
  58. .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
  59. .proc_name = DRV_NAME,
  60. .queuecommand = iscsi_queuecommand,
  61. .eh_abort_handler = iscsi_eh_abort,
  62. .change_queue_depth = iscsi_change_queue_depth,
  63. .slave_configure = beiscsi_slave_configure,
  64. .target_alloc = iscsi_target_alloc,
  65. .eh_device_reset_handler = iscsi_eh_device_reset,
  66. .eh_target_reset_handler = iscsi_eh_target_reset,
  67. .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
  68. .can_queue = BE2_IO_DEPTH,
  69. .this_id = -1,
  70. .max_sectors = BEISCSI_MAX_SECTORS,
  71. .cmd_per_lun = BEISCSI_CMD_PER_LUN,
  72. .use_clustering = ENABLE_CLUSTERING,
  73. };
  74. static struct scsi_transport_template *beiscsi_scsi_transport;
  75. /*------------------- PCI Driver operations and data ----------------- */
  76. static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
  77. { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  78. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  79. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  80. { 0 }
  81. };
  82. MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
  83. static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
  84. {
  85. struct beiscsi_hba *phba;
  86. struct Scsi_Host *shost;
  87. shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
  88. if (!shost) {
  89. dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
  90. "iscsi_host_alloc failed \n");
  91. return NULL;
  92. }
  93. shost->dma_boundary = pcidev->dma_mask;
  94. shost->max_id = BE2_MAX_SESSIONS;
  95. shost->max_channel = 0;
  96. shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
  97. shost->max_lun = BEISCSI_NUM_MAX_LUN;
  98. shost->transportt = beiscsi_scsi_transport;
  99. phba = iscsi_host_priv(shost);
  100. memset(phba, 0, sizeof(*phba));
  101. phba->shost = shost;
  102. phba->pcidev = pci_dev_get(pcidev);
  103. if (iscsi_host_add(shost, &phba->pcidev->dev))
  104. goto free_devices;
  105. return phba;
  106. free_devices:
  107. pci_dev_put(phba->pcidev);
  108. iscsi_host_free(phba->shost);
  109. return NULL;
  110. }
  111. static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
  112. {
  113. if (phba->csr_va) {
  114. iounmap(phba->csr_va);
  115. phba->csr_va = NULL;
  116. }
  117. if (phba->db_va) {
  118. iounmap(phba->db_va);
  119. phba->db_va = NULL;
  120. }
  121. if (phba->pci_va) {
  122. iounmap(phba->pci_va);
  123. phba->pci_va = NULL;
  124. }
  125. }
  126. static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
  127. struct pci_dev *pcidev)
  128. {
  129. u8 __iomem *addr;
  130. addr = ioremap_nocache(pci_resource_start(pcidev, 2),
  131. pci_resource_len(pcidev, 2));
  132. if (addr == NULL)
  133. return -ENOMEM;
  134. phba->ctrl.csr = addr;
  135. phba->csr_va = addr;
  136. phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
  137. addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
  138. if (addr == NULL)
  139. goto pci_map_err;
  140. phba->ctrl.db = addr;
  141. phba->db_va = addr;
  142. phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
  143. addr = ioremap_nocache(pci_resource_start(pcidev, 1),
  144. pci_resource_len(pcidev, 1));
  145. if (addr == NULL)
  146. goto pci_map_err;
  147. phba->ctrl.pcicfg = addr;
  148. phba->pci_va = addr;
  149. phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
  150. return 0;
  151. pci_map_err:
  152. beiscsi_unmap_pci_function(phba);
  153. return -ENOMEM;
  154. }
  155. static int beiscsi_enable_pci(struct pci_dev *pcidev)
  156. {
  157. int ret;
  158. ret = pci_enable_device(pcidev);
  159. if (ret) {
  160. dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
  161. "failed. Returning -ENODEV\n");
  162. return ret;
  163. }
  164. if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
  165. ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
  166. if (ret) {
  167. dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
  168. pci_disable_device(pcidev);
  169. return ret;
  170. }
  171. }
  172. return 0;
  173. }
  174. static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
  175. {
  176. struct be_ctrl_info *ctrl = &phba->ctrl;
  177. struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
  178. struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
  179. int status = 0;
  180. ctrl->pdev = pdev;
  181. status = beiscsi_map_pci_bars(phba, pdev);
  182. if (status)
  183. return status;
  184. mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
  185. mbox_mem_alloc->va = pci_alloc_consistent(pdev,
  186. mbox_mem_alloc->size,
  187. &mbox_mem_alloc->dma);
  188. if (!mbox_mem_alloc->va) {
  189. beiscsi_unmap_pci_function(phba);
  190. status = -ENOMEM;
  191. return status;
  192. }
  193. mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
  194. mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
  195. mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
  196. memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
  197. spin_lock_init(&ctrl->mbox_lock);
  198. return status;
  199. }
  200. static void beiscsi_get_params(struct beiscsi_hba *phba)
  201. {
  202. phba->params.ios_per_ctrl = BE2_IO_DEPTH;
  203. phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
  204. phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
  205. phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
  206. phba->params.num_sge_per_io = BE2_SGE;
  207. phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
  208. phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
  209. phba->params.eq_timer = 64;
  210. phba->params.num_eq_entries =
  211. (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
  212. 512) + 1) * 512;
  213. phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
  214. ? 1024 : phba->params.num_eq_entries;
  215. SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
  216. phba->params.num_eq_entries);
  217. phba->params.num_cq_entries =
  218. (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
  219. 512) + 1) * 512;
  220. SE_DEBUG(DBG_LVL_8,
  221. "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
  222. "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
  223. phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
  224. BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
  225. phba->params.wrbs_per_cxn = 256;
  226. }
  227. static void hwi_ring_eq_db(struct beiscsi_hba *phba,
  228. unsigned int id, unsigned int clr_interrupt,
  229. unsigned int num_processed,
  230. unsigned char rearm, unsigned char event)
  231. {
  232. u32 val = 0;
  233. val |= id & DB_EQ_RING_ID_MASK;
  234. if (rearm)
  235. val |= 1 << DB_EQ_REARM_SHIFT;
  236. if (clr_interrupt)
  237. val |= 1 << DB_EQ_CLR_SHIFT;
  238. if (event)
  239. val |= 1 << DB_EQ_EVNT_SHIFT;
  240. val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
  241. iowrite32(val, phba->db_va + DB_EQ_OFFSET);
  242. }
  243. /**
  244. * be_isr - The isr routine of the driver.
  245. * @irq: Not used
  246. * @dev_id: Pointer to host adapter structure
  247. */
  248. static irqreturn_t be_isr(int irq, void *dev_id)
  249. {
  250. struct beiscsi_hba *phba;
  251. struct hwi_controller *phwi_ctrlr;
  252. struct hwi_context_memory *phwi_context;
  253. struct be_eq_entry *eqe = NULL;
  254. struct be_queue_info *eq;
  255. struct be_queue_info *cq;
  256. unsigned long flags, index;
  257. unsigned int num_eq_processed;
  258. struct be_ctrl_info *ctrl;
  259. int isr;
  260. phba = dev_id;
  261. if (!enable_msix) {
  262. ctrl = &phba->ctrl;;
  263. isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
  264. (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
  265. if (!isr)
  266. return IRQ_NONE;
  267. }
  268. phwi_ctrlr = phba->phwi_ctrlr;
  269. phwi_context = phwi_ctrlr->phwi_ctxt;
  270. eq = &phwi_context->be_eq.q;
  271. cq = &phwi_context->be_cq;
  272. index = 0;
  273. eqe = queue_tail_node(eq);
  274. if (!eqe)
  275. SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
  276. num_eq_processed = 0;
  277. if (blk_iopoll_enabled) {
  278. while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
  279. & EQE_VALID_MASK) {
  280. if (!blk_iopoll_sched_prep(&phba->iopoll))
  281. blk_iopoll_sched(&phba->iopoll);
  282. AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
  283. queue_tail_inc(eq);
  284. eqe = queue_tail_node(eq);
  285. num_eq_processed++;
  286. SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
  287. }
  288. if (num_eq_processed) {
  289. hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1);
  290. return IRQ_HANDLED;
  291. } else
  292. return IRQ_NONE;
  293. } else {
  294. while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
  295. & EQE_VALID_MASK) {
  296. if (((eqe->dw[offsetof(struct amap_eq_entry,
  297. resource_id) / 32] &
  298. EQE_RESID_MASK) >> 16) != cq->id) {
  299. spin_lock_irqsave(&phba->isr_lock, flags);
  300. phba->todo_mcc_cq = 1;
  301. spin_unlock_irqrestore(&phba->isr_lock, flags);
  302. } else {
  303. spin_lock_irqsave(&phba->isr_lock, flags);
  304. phba->todo_cq = 1;
  305. spin_unlock_irqrestore(&phba->isr_lock, flags);
  306. }
  307. AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
  308. queue_tail_inc(eq);
  309. eqe = queue_tail_node(eq);
  310. num_eq_processed++;
  311. }
  312. if (phba->todo_cq || phba->todo_mcc_cq)
  313. queue_work(phba->wq, &phba->work_cqs);
  314. if (num_eq_processed) {
  315. hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1);
  316. return IRQ_HANDLED;
  317. } else
  318. return IRQ_NONE;
  319. }
  320. }
  321. static int beiscsi_init_irqs(struct beiscsi_hba *phba)
  322. {
  323. struct pci_dev *pcidev = phba->pcidev;
  324. int ret;
  325. ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba);
  326. if (ret) {
  327. shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
  328. "Failed to register irq\\n");
  329. return ret;
  330. }
  331. return 0;
  332. }
  333. static void hwi_ring_cq_db(struct beiscsi_hba *phba,
  334. unsigned int id, unsigned int num_processed,
  335. unsigned char rearm, unsigned char event)
  336. {
  337. u32 val = 0;
  338. val |= id & DB_CQ_RING_ID_MASK;
  339. if (rearm)
  340. val |= 1 << DB_CQ_REARM_SHIFT;
  341. val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
  342. iowrite32(val, phba->db_va + DB_CQ_OFFSET);
  343. }
  344. /*
  345. * async pdus include
  346. * a. unsolicited NOP-In (target initiated NOP-In)
  347. * b. Async Messages
  348. * c. Reject PDU
  349. * d. Login response
  350. * These headers arrive unprocessed by the EP firmware and iSCSI layer
  351. * process them
  352. */
  353. static unsigned int
  354. beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
  355. struct beiscsi_hba *phba,
  356. unsigned short cid,
  357. struct pdu_base *ppdu,
  358. unsigned long pdu_len,
  359. void *pbuffer, unsigned long buf_len)
  360. {
  361. struct iscsi_conn *conn = beiscsi_conn->conn;
  362. struct iscsi_session *session = conn->session;
  363. switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
  364. PDUBASE_OPCODE_MASK) {
  365. case ISCSI_OP_NOOP_IN:
  366. pbuffer = NULL;
  367. buf_len = 0;
  368. break;
  369. case ISCSI_OP_ASYNC_EVENT:
  370. break;
  371. case ISCSI_OP_REJECT:
  372. WARN_ON(!pbuffer);
  373. WARN_ON(!(buf_len == 48));
  374. SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
  375. break;
  376. case ISCSI_OP_LOGIN_RSP:
  377. break;
  378. default:
  379. shost_printk(KERN_WARNING, phba->shost,
  380. "Unrecognized opcode 0x%x in async msg \n",
  381. (ppdu->
  382. dw[offsetof(struct amap_pdu_base, opcode) / 32]
  383. & PDUBASE_OPCODE_MASK));
  384. return 1;
  385. }
  386. spin_lock_bh(&session->lock);
  387. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
  388. spin_unlock_bh(&session->lock);
  389. return 0;
  390. }
  391. static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
  392. {
  393. struct sgl_handle *psgl_handle;
  394. if (phba->io_sgl_hndl_avbl) {
  395. SE_DEBUG(DBG_LVL_8,
  396. "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
  397. phba->io_sgl_alloc_index);
  398. psgl_handle = phba->io_sgl_hndl_base[phba->
  399. io_sgl_alloc_index];
  400. phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
  401. phba->io_sgl_hndl_avbl--;
  402. if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1))
  403. phba->io_sgl_alloc_index = 0;
  404. else
  405. phba->io_sgl_alloc_index++;
  406. } else
  407. psgl_handle = NULL;
  408. return psgl_handle;
  409. }
  410. static void
  411. free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
  412. {
  413. SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
  414. phba->io_sgl_free_index);
  415. if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
  416. /*
  417. * this can happen if clean_task is called on a task that
  418. * failed in xmit_task or alloc_pdu.
  419. */
  420. SE_DEBUG(DBG_LVL_8,
  421. "Double Free in IO SGL io_sgl_free_index=%d,"
  422. "value there=%p \n", phba->io_sgl_free_index,
  423. phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
  424. return;
  425. }
  426. phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
  427. phba->io_sgl_hndl_avbl++;
  428. if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
  429. phba->io_sgl_free_index = 0;
  430. else
  431. phba->io_sgl_free_index++;
  432. }
  433. /**
  434. * alloc_wrb_handle - To allocate a wrb handle
  435. * @phba: The hba pointer
  436. * @cid: The cid to use for allocation
  437. * @index: index allocation and wrb index
  438. *
  439. * This happens under session_lock until submission to chip
  440. */
  441. struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
  442. int index)
  443. {
  444. struct hwi_wrb_context *pwrb_context;
  445. struct hwi_controller *phwi_ctrlr;
  446. struct wrb_handle *pwrb_handle;
  447. phwi_ctrlr = phba->phwi_ctrlr;
  448. pwrb_context = &phwi_ctrlr->wrb_context[cid];
  449. pwrb_handle = pwrb_context->pwrb_handle_base[index];
  450. pwrb_handle->wrb_index = index;
  451. pwrb_handle->nxt_wrb_index = index;
  452. return pwrb_handle;
  453. }
  454. /**
  455. * free_wrb_handle - To free the wrb handle back to pool
  456. * @phba: The hba pointer
  457. * @pwrb_context: The context to free from
  458. * @pwrb_handle: The wrb_handle to free
  459. *
  460. * This happens under session_lock until submission to chip
  461. */
  462. static void
  463. free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
  464. struct wrb_handle *pwrb_handle)
  465. {
  466. SE_DEBUG(DBG_LVL_8,
  467. "FREE WRB: pwrb_handle=%p free_index=%d=0x%x"
  468. "wrb_handles_available=%d \n",
  469. pwrb_handle, pwrb_context->free_index,
  470. pwrb_context->free_index, pwrb_context->wrb_handles_available);
  471. }
  472. static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
  473. {
  474. struct sgl_handle *psgl_handle;
  475. if (phba->eh_sgl_hndl_avbl) {
  476. psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
  477. phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
  478. SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
  479. phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
  480. phba->eh_sgl_hndl_avbl--;
  481. if (phba->eh_sgl_alloc_index ==
  482. (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
  483. 1))
  484. phba->eh_sgl_alloc_index = 0;
  485. else
  486. phba->eh_sgl_alloc_index++;
  487. } else
  488. psgl_handle = NULL;
  489. return psgl_handle;
  490. }
  491. void
  492. free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
  493. {
  494. if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
  495. /*
  496. * this can happen if clean_task is called on a task that
  497. * failed in xmit_task or alloc_pdu.
  498. */
  499. SE_DEBUG(DBG_LVL_8,
  500. "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
  501. phba->eh_sgl_free_index);
  502. return;
  503. }
  504. phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
  505. phba->eh_sgl_hndl_avbl++;
  506. if (phba->eh_sgl_free_index ==
  507. (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
  508. phba->eh_sgl_free_index = 0;
  509. else
  510. phba->eh_sgl_free_index++;
  511. }
  512. static void
  513. be_complete_io(struct beiscsi_conn *beiscsi_conn,
  514. struct iscsi_task *task, struct sol_cqe *psol)
  515. {
  516. struct beiscsi_io_task *io_task = task->dd_data;
  517. struct be_status_bhs *sts_bhs =
  518. (struct be_status_bhs *)io_task->cmd_bhs;
  519. struct iscsi_conn *conn = beiscsi_conn->conn;
  520. unsigned int sense_len;
  521. unsigned char *sense;
  522. u32 resid = 0, exp_cmdsn, max_cmdsn;
  523. u8 rsp, status, flags;
  524. exp_cmdsn = be32_to_cpu(psol->
  525. dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
  526. & SOL_EXP_CMD_SN_MASK);
  527. max_cmdsn = be32_to_cpu((psol->
  528. dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
  529. & SOL_EXP_CMD_SN_MASK) +
  530. ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
  531. / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
  532. rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
  533. & SOL_RESP_MASK) >> 16);
  534. status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
  535. & SOL_STS_MASK) >> 8);
  536. flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
  537. & SOL_FLAGS_MASK) >> 24) | 0x80;
  538. task->sc->result = (DID_OK << 16) | status;
  539. if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
  540. task->sc->result = DID_ERROR << 16;
  541. goto unmap;
  542. }
  543. /* bidi not initially supported */
  544. if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
  545. resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
  546. 32] & SOL_RES_CNT_MASK);
  547. if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
  548. task->sc->result = DID_ERROR << 16;
  549. if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
  550. scsi_set_resid(task->sc, resid);
  551. if (!status && (scsi_bufflen(task->sc) - resid <
  552. task->sc->underflow))
  553. task->sc->result = DID_ERROR << 16;
  554. }
  555. }
  556. if (status == SAM_STAT_CHECK_CONDITION) {
  557. sense = sts_bhs->sense_info + sizeof(unsigned short);
  558. sense_len =
  559. cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
  560. memcpy(task->sc->sense_buffer, sense,
  561. min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
  562. }
  563. if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
  564. if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
  565. & SOL_RES_CNT_MASK)
  566. conn->rxdata_octets += (psol->
  567. dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
  568. & SOL_RES_CNT_MASK);
  569. }
  570. unmap:
  571. scsi_dma_unmap(io_task->scsi_cmnd);
  572. iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
  573. }
  574. static void
  575. be_complete_logout(struct beiscsi_conn *beiscsi_conn,
  576. struct iscsi_task *task, struct sol_cqe *psol)
  577. {
  578. struct iscsi_logout_rsp *hdr;
  579. struct iscsi_conn *conn = beiscsi_conn->conn;
  580. hdr = (struct iscsi_logout_rsp *)task->hdr;
  581. hdr->t2wait = 5;
  582. hdr->t2retain = 0;
  583. hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
  584. & SOL_FLAGS_MASK) >> 24) | 0x80;
  585. hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
  586. 32] & SOL_RESP_MASK);
  587. hdr->exp_cmdsn = cpu_to_be32(psol->
  588. dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
  589. & SOL_EXP_CMD_SN_MASK);
  590. hdr->max_cmdsn = be32_to_cpu((psol->
  591. dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
  592. & SOL_EXP_CMD_SN_MASK) +
  593. ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
  594. / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
  595. hdr->hlength = 0;
  596. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
  597. }
  598. static void
  599. be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
  600. struct iscsi_task *task, struct sol_cqe *psol)
  601. {
  602. struct iscsi_tm_rsp *hdr;
  603. struct iscsi_conn *conn = beiscsi_conn->conn;
  604. hdr = (struct iscsi_tm_rsp *)task->hdr;
  605. hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
  606. & SOL_FLAGS_MASK) >> 24) | 0x80;
  607. hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
  608. 32] & SOL_RESP_MASK);
  609. hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
  610. i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
  611. hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
  612. i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
  613. ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
  614. / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
  615. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
  616. }
  617. static void
  618. hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
  619. struct beiscsi_hba *phba, struct sol_cqe *psol)
  620. {
  621. struct hwi_wrb_context *pwrb_context;
  622. struct wrb_handle *pwrb_handle;
  623. struct hwi_controller *phwi_ctrlr;
  624. struct iscsi_conn *conn = beiscsi_conn->conn;
  625. struct iscsi_session *session = conn->session;
  626. phwi_ctrlr = phba->phwi_ctrlr;
  627. pwrb_context = &phwi_ctrlr->wrb_context[((psol->
  628. dw[offsetof(struct amap_sol_cqe, cid) / 32] &
  629. SOL_CID_MASK) >> 6)];
  630. pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
  631. dw[offsetof(struct amap_sol_cqe, wrb_index) /
  632. 32] & SOL_WRB_INDEX_MASK) >> 16)];
  633. spin_lock_bh(&session->lock);
  634. free_wrb_handle(phba, pwrb_context, pwrb_handle);
  635. spin_unlock_bh(&session->lock);
  636. }
  637. static void
  638. be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
  639. struct iscsi_task *task, struct sol_cqe *psol)
  640. {
  641. struct iscsi_nopin *hdr;
  642. struct iscsi_conn *conn = beiscsi_conn->conn;
  643. hdr = (struct iscsi_nopin *)task->hdr;
  644. hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
  645. & SOL_FLAGS_MASK) >> 24) | 0x80;
  646. hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
  647. i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
  648. hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
  649. i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
  650. ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
  651. / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
  652. hdr->opcode = ISCSI_OP_NOOP_IN;
  653. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
  654. }
  655. static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
  656. struct beiscsi_hba *phba, struct sol_cqe *psol)
  657. {
  658. struct hwi_wrb_context *pwrb_context;
  659. struct wrb_handle *pwrb_handle;
  660. struct iscsi_wrb *pwrb = NULL;
  661. struct hwi_controller *phwi_ctrlr;
  662. struct iscsi_task *task;
  663. struct beiscsi_io_task *io_task;
  664. struct iscsi_conn *conn = beiscsi_conn->conn;
  665. struct iscsi_session *session = conn->session;
  666. phwi_ctrlr = phba->phwi_ctrlr;
  667. pwrb_context = &phwi_ctrlr->
  668. wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
  669. & SOL_CID_MASK) >> 6)];
  670. pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
  671. dw[offsetof(struct amap_sol_cqe, wrb_index) /
  672. 32] & SOL_WRB_INDEX_MASK) >> 16)];
  673. task = pwrb_handle->pio_handle;
  674. io_task = task->dd_data;
  675. spin_lock_bh(&session->lock);
  676. pwrb = pwrb_handle->pwrb;
  677. switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
  678. WRB_TYPE_MASK) >> 28) {
  679. case HWH_TYPE_IO:
  680. case HWH_TYPE_IO_RD:
  681. if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
  682. ISCSI_OP_NOOP_OUT) {
  683. be_complete_nopin_resp(beiscsi_conn, task, psol);
  684. } else
  685. be_complete_io(beiscsi_conn, task, psol);
  686. break;
  687. case HWH_TYPE_LOGOUT:
  688. be_complete_logout(beiscsi_conn, task, psol);
  689. break;
  690. case HWH_TYPE_LOGIN:
  691. SE_DEBUG(DBG_LVL_1,
  692. "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
  693. "- Solicited path \n");
  694. break;
  695. case HWH_TYPE_TMF:
  696. be_complete_tmf(beiscsi_conn, task, psol);
  697. break;
  698. case HWH_TYPE_NOP:
  699. be_complete_nopin_resp(beiscsi_conn, task, psol);
  700. break;
  701. default:
  702. shost_printk(KERN_WARNING, phba->shost,
  703. "wrb_index 0x%x CID 0x%x\n",
  704. ((psol->dw[offsetof(struct amap_iscsi_wrb, type) /
  705. 32] & SOL_WRB_INDEX_MASK) >> 16),
  706. ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
  707. & SOL_CID_MASK) >> 6));
  708. break;
  709. }
  710. spin_unlock_bh(&session->lock);
  711. }
  712. static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
  713. *pasync_ctx, unsigned int is_header,
  714. unsigned int host_write_ptr)
  715. {
  716. if (is_header)
  717. return &pasync_ctx->async_entry[host_write_ptr].
  718. header_busy_list;
  719. else
  720. return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
  721. }
  722. static struct async_pdu_handle *
  723. hwi_get_async_handle(struct beiscsi_hba *phba,
  724. struct beiscsi_conn *beiscsi_conn,
  725. struct hwi_async_pdu_context *pasync_ctx,
  726. struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
  727. {
  728. struct be_bus_address phys_addr;
  729. struct list_head *pbusy_list;
  730. struct async_pdu_handle *pasync_handle = NULL;
  731. int buffer_len = 0;
  732. unsigned char buffer_index = -1;
  733. unsigned char is_header = 0;
  734. phys_addr.u.a32.address_lo =
  735. pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
  736. ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
  737. & PDUCQE_DPL_MASK) >> 16);
  738. phys_addr.u.a32.address_hi =
  739. pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
  740. phys_addr.u.a64.address =
  741. *((unsigned long long *)(&phys_addr.u.a64.address));
  742. switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
  743. & PDUCQE_CODE_MASK) {
  744. case UNSOL_HDR_NOTIFY:
  745. is_header = 1;
  746. pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
  747. (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
  748. index) / 32] & PDUCQE_INDEX_MASK));
  749. buffer_len = (unsigned int)(phys_addr.u.a64.address -
  750. pasync_ctx->async_header.pa_base.u.a64.address);
  751. buffer_index = buffer_len /
  752. pasync_ctx->async_header.buffer_size;
  753. break;
  754. case UNSOL_DATA_NOTIFY:
  755. pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
  756. dw[offsetof(struct amap_i_t_dpdu_cqe,
  757. index) / 32] & PDUCQE_INDEX_MASK));
  758. buffer_len = (unsigned long)(phys_addr.u.a64.address -
  759. pasync_ctx->async_data.pa_base.u.
  760. a64.address);
  761. buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
  762. break;
  763. default:
  764. pbusy_list = NULL;
  765. shost_printk(KERN_WARNING, phba->shost,
  766. "Unexpected code=%d \n",
  767. pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
  768. code) / 32] & PDUCQE_CODE_MASK);
  769. return NULL;
  770. }
  771. WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
  772. WARN_ON(list_empty(pbusy_list));
  773. list_for_each_entry(pasync_handle, pbusy_list, link) {
  774. WARN_ON(pasync_handle->consumed);
  775. if (pasync_handle->index == buffer_index)
  776. break;
  777. }
  778. WARN_ON(!pasync_handle);
  779. pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
  780. pasync_handle->is_header = is_header;
  781. pasync_handle->buffer_len = ((pdpdu_cqe->
  782. dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
  783. & PDUCQE_DPL_MASK) >> 16);
  784. *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
  785. index) / 32] & PDUCQE_INDEX_MASK);
  786. return pasync_handle;
  787. }
  788. static unsigned int
  789. hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
  790. unsigned int is_header, unsigned int cq_index)
  791. {
  792. struct list_head *pbusy_list;
  793. struct async_pdu_handle *pasync_handle;
  794. unsigned int num_entries, writables = 0;
  795. unsigned int *pep_read_ptr, *pwritables;
  796. if (is_header) {
  797. pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
  798. pwritables = &pasync_ctx->async_header.writables;
  799. num_entries = pasync_ctx->async_header.num_entries;
  800. } else {
  801. pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
  802. pwritables = &pasync_ctx->async_data.writables;
  803. num_entries = pasync_ctx->async_data.num_entries;
  804. }
  805. while ((*pep_read_ptr) != cq_index) {
  806. (*pep_read_ptr)++;
  807. *pep_read_ptr = (*pep_read_ptr) % num_entries;
  808. pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
  809. *pep_read_ptr);
  810. if (writables == 0)
  811. WARN_ON(list_empty(pbusy_list));
  812. if (!list_empty(pbusy_list)) {
  813. pasync_handle = list_entry(pbusy_list->next,
  814. struct async_pdu_handle,
  815. link);
  816. WARN_ON(!pasync_handle);
  817. pasync_handle->consumed = 1;
  818. }
  819. writables++;
  820. }
  821. if (!writables) {
  822. SE_DEBUG(DBG_LVL_1,
  823. "Duplicate notification received - index 0x%x!!\n",
  824. cq_index);
  825. WARN_ON(1);
  826. }
  827. *pwritables = *pwritables + writables;
  828. return 0;
  829. }
  830. static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
  831. unsigned int cri)
  832. {
  833. struct hwi_controller *phwi_ctrlr;
  834. struct hwi_async_pdu_context *pasync_ctx;
  835. struct async_pdu_handle *pasync_handle, *tmp_handle;
  836. struct list_head *plist;
  837. unsigned int i = 0;
  838. phwi_ctrlr = phba->phwi_ctrlr;
  839. pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
  840. plist = &pasync_ctx->async_entry[cri].wait_queue.list;
  841. list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
  842. list_del(&pasync_handle->link);
  843. if (i == 0) {
  844. list_add_tail(&pasync_handle->link,
  845. &pasync_ctx->async_header.free_list);
  846. pasync_ctx->async_header.free_entries++;
  847. i++;
  848. } else {
  849. list_add_tail(&pasync_handle->link,
  850. &pasync_ctx->async_data.free_list);
  851. pasync_ctx->async_data.free_entries++;
  852. i++;
  853. }
  854. }
  855. INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
  856. pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
  857. pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
  858. return 0;
  859. }
  860. static struct phys_addr *
  861. hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
  862. unsigned int is_header, unsigned int host_write_ptr)
  863. {
  864. struct phys_addr *pasync_sge = NULL;
  865. if (is_header)
  866. pasync_sge = pasync_ctx->async_header.ring_base;
  867. else
  868. pasync_sge = pasync_ctx->async_data.ring_base;
  869. return pasync_sge + host_write_ptr;
  870. }
  871. static void hwi_post_async_buffers(struct beiscsi_hba *phba,
  872. unsigned int is_header)
  873. {
  874. struct hwi_controller *phwi_ctrlr;
  875. struct hwi_async_pdu_context *pasync_ctx;
  876. struct async_pdu_handle *pasync_handle;
  877. struct list_head *pfree_link, *pbusy_list;
  878. struct phys_addr *pasync_sge;
  879. unsigned int ring_id, num_entries;
  880. unsigned int host_write_num;
  881. unsigned int writables;
  882. unsigned int i = 0;
  883. u32 doorbell = 0;
  884. phwi_ctrlr = phba->phwi_ctrlr;
  885. pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
  886. if (is_header) {
  887. num_entries = pasync_ctx->async_header.num_entries;
  888. writables = min(pasync_ctx->async_header.writables,
  889. pasync_ctx->async_header.free_entries);
  890. pfree_link = pasync_ctx->async_header.free_list.next;
  891. host_write_num = pasync_ctx->async_header.host_write_ptr;
  892. ring_id = phwi_ctrlr->default_pdu_hdr.id;
  893. } else {
  894. num_entries = pasync_ctx->async_data.num_entries;
  895. writables = min(pasync_ctx->async_data.writables,
  896. pasync_ctx->async_data.free_entries);
  897. pfree_link = pasync_ctx->async_data.free_list.next;
  898. host_write_num = pasync_ctx->async_data.host_write_ptr;
  899. ring_id = phwi_ctrlr->default_pdu_data.id;
  900. }
  901. writables = (writables / 8) * 8;
  902. if (writables) {
  903. for (i = 0; i < writables; i++) {
  904. pbusy_list =
  905. hwi_get_async_busy_list(pasync_ctx, is_header,
  906. host_write_num);
  907. pasync_handle =
  908. list_entry(pfree_link, struct async_pdu_handle,
  909. link);
  910. WARN_ON(!pasync_handle);
  911. pasync_handle->consumed = 0;
  912. pfree_link = pfree_link->next;
  913. pasync_sge = hwi_get_ring_address(pasync_ctx,
  914. is_header, host_write_num);
  915. pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
  916. pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
  917. list_move(&pasync_handle->link, pbusy_list);
  918. host_write_num++;
  919. host_write_num = host_write_num % num_entries;
  920. }
  921. if (is_header) {
  922. pasync_ctx->async_header.host_write_ptr =
  923. host_write_num;
  924. pasync_ctx->async_header.free_entries -= writables;
  925. pasync_ctx->async_header.writables -= writables;
  926. pasync_ctx->async_header.busy_entries += writables;
  927. } else {
  928. pasync_ctx->async_data.host_write_ptr = host_write_num;
  929. pasync_ctx->async_data.free_entries -= writables;
  930. pasync_ctx->async_data.writables -= writables;
  931. pasync_ctx->async_data.busy_entries += writables;
  932. }
  933. doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
  934. doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
  935. doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
  936. doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
  937. << DB_DEF_PDU_CQPROC_SHIFT;
  938. iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
  939. }
  940. }
  941. static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
  942. struct beiscsi_conn *beiscsi_conn,
  943. struct i_t_dpdu_cqe *pdpdu_cqe)
  944. {
  945. struct hwi_controller *phwi_ctrlr;
  946. struct hwi_async_pdu_context *pasync_ctx;
  947. struct async_pdu_handle *pasync_handle = NULL;
  948. unsigned int cq_index = -1;
  949. phwi_ctrlr = phba->phwi_ctrlr;
  950. pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
  951. pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
  952. pdpdu_cqe, &cq_index);
  953. BUG_ON(pasync_handle->is_header != 0);
  954. if (pasync_handle->consumed == 0)
  955. hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
  956. cq_index);
  957. hwi_free_async_msg(phba, pasync_handle->cri);
  958. hwi_post_async_buffers(phba, pasync_handle->is_header);
  959. }
  960. static unsigned int
  961. hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
  962. struct beiscsi_hba *phba,
  963. struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
  964. {
  965. struct list_head *plist;
  966. struct async_pdu_handle *pasync_handle;
  967. void *phdr = NULL;
  968. unsigned int hdr_len = 0, buf_len = 0;
  969. unsigned int status, index = 0, offset = 0;
  970. void *pfirst_buffer = NULL;
  971. unsigned int num_buf = 0;
  972. plist = &pasync_ctx->async_entry[cri].wait_queue.list;
  973. list_for_each_entry(pasync_handle, plist, link) {
  974. if (index == 0) {
  975. phdr = pasync_handle->pbuffer;
  976. hdr_len = pasync_handle->buffer_len;
  977. } else {
  978. buf_len = pasync_handle->buffer_len;
  979. if (!num_buf) {
  980. pfirst_buffer = pasync_handle->pbuffer;
  981. num_buf++;
  982. }
  983. memcpy(pfirst_buffer + offset,
  984. pasync_handle->pbuffer, buf_len);
  985. offset = buf_len;
  986. }
  987. index++;
  988. }
  989. status = beiscsi_process_async_pdu(beiscsi_conn, phba,
  990. beiscsi_conn->beiscsi_conn_cid,
  991. phdr, hdr_len, pfirst_buffer,
  992. buf_len);
  993. if (status == 0)
  994. hwi_free_async_msg(phba, cri);
  995. return 0;
  996. }
  997. static unsigned int
  998. hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
  999. struct beiscsi_hba *phba,
  1000. struct async_pdu_handle *pasync_handle)
  1001. {
  1002. struct hwi_async_pdu_context *pasync_ctx;
  1003. struct hwi_controller *phwi_ctrlr;
  1004. unsigned int bytes_needed = 0, status = 0;
  1005. unsigned short cri = pasync_handle->cri;
  1006. struct pdu_base *ppdu;
  1007. phwi_ctrlr = phba->phwi_ctrlr;
  1008. pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
  1009. list_del(&pasync_handle->link);
  1010. if (pasync_handle->is_header) {
  1011. pasync_ctx->async_header.busy_entries--;
  1012. if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
  1013. hwi_free_async_msg(phba, cri);
  1014. BUG();
  1015. }
  1016. pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
  1017. pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
  1018. pasync_ctx->async_entry[cri].wait_queue.hdr_len =
  1019. (unsigned short)pasync_handle->buffer_len;
  1020. list_add_tail(&pasync_handle->link,
  1021. &pasync_ctx->async_entry[cri].wait_queue.list);
  1022. ppdu = pasync_handle->pbuffer;
  1023. bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
  1024. data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
  1025. 0xFFFF0000) | ((be16_to_cpu((ppdu->
  1026. dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
  1027. & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
  1028. if (status == 0) {
  1029. pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
  1030. bytes_needed;
  1031. if (bytes_needed == 0)
  1032. status = hwi_fwd_async_msg(beiscsi_conn, phba,
  1033. pasync_ctx, cri);
  1034. }
  1035. } else {
  1036. pasync_ctx->async_data.busy_entries--;
  1037. if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
  1038. list_add_tail(&pasync_handle->link,
  1039. &pasync_ctx->async_entry[cri].wait_queue.
  1040. list);
  1041. pasync_ctx->async_entry[cri].wait_queue.
  1042. bytes_received +=
  1043. (unsigned short)pasync_handle->buffer_len;
  1044. if (pasync_ctx->async_entry[cri].wait_queue.
  1045. bytes_received >=
  1046. pasync_ctx->async_entry[cri].wait_queue.
  1047. bytes_needed)
  1048. status = hwi_fwd_async_msg(beiscsi_conn, phba,
  1049. pasync_ctx, cri);
  1050. }
  1051. }
  1052. return status;
  1053. }
  1054. static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
  1055. struct beiscsi_hba *phba,
  1056. struct i_t_dpdu_cqe *pdpdu_cqe)
  1057. {
  1058. struct hwi_controller *phwi_ctrlr;
  1059. struct hwi_async_pdu_context *pasync_ctx;
  1060. struct async_pdu_handle *pasync_handle = NULL;
  1061. unsigned int cq_index = -1;
  1062. phwi_ctrlr = phba->phwi_ctrlr;
  1063. pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
  1064. pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
  1065. pdpdu_cqe, &cq_index);
  1066. if (pasync_handle->consumed == 0)
  1067. hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
  1068. cq_index);
  1069. hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
  1070. hwi_post_async_buffers(phba, pasync_handle->is_header);
  1071. }
  1072. static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
  1073. {
  1074. struct hwi_controller *phwi_ctrlr;
  1075. struct hwi_context_memory *phwi_context;
  1076. struct be_queue_info *cq;
  1077. struct sol_cqe *sol;
  1078. struct dmsg_cqe *dmsg;
  1079. unsigned int num_processed = 0;
  1080. unsigned int tot_nump = 0;
  1081. struct beiscsi_conn *beiscsi_conn;
  1082. phwi_ctrlr = phba->phwi_ctrlr;
  1083. phwi_context = phwi_ctrlr->phwi_ctxt;
  1084. cq = &phwi_context->be_cq;
  1085. sol = queue_tail_node(cq);
  1086. while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
  1087. CQE_VALID_MASK) {
  1088. be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
  1089. beiscsi_conn = phba->conn_table[(u32) (sol->
  1090. dw[offsetof(struct amap_sol_cqe, cid) / 32] &
  1091. SOL_CID_MASK) >> 6];
  1092. if (!beiscsi_conn || !beiscsi_conn->ep) {
  1093. shost_printk(KERN_WARNING, phba->shost,
  1094. "Connection table empty for cid = %d\n",
  1095. (u32)(sol->dw[offsetof(struct amap_sol_cqe,
  1096. cid) / 32] & SOL_CID_MASK) >> 6);
  1097. return 0;
  1098. }
  1099. if (num_processed >= 32) {
  1100. hwi_ring_cq_db(phba, phwi_context->be_cq.id,
  1101. num_processed, 0, 0);
  1102. tot_nump += num_processed;
  1103. num_processed = 0;
  1104. }
  1105. switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
  1106. 32] & CQE_CODE_MASK) {
  1107. case SOL_CMD_COMPLETE:
  1108. hwi_complete_cmd(beiscsi_conn, phba, sol);
  1109. break;
  1110. case DRIVERMSG_NOTIFY:
  1111. SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
  1112. dmsg = (struct dmsg_cqe *)sol;
  1113. hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
  1114. break;
  1115. case UNSOL_HDR_NOTIFY:
  1116. case UNSOL_DATA_NOTIFY:
  1117. SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n");
  1118. hwi_process_default_pdu_ring(beiscsi_conn, phba,
  1119. (struct i_t_dpdu_cqe *)sol);
  1120. break;
  1121. case CXN_INVALIDATE_INDEX_NOTIFY:
  1122. case CMD_INVALIDATED_NOTIFY:
  1123. case CXN_INVALIDATE_NOTIFY:
  1124. SE_DEBUG(DBG_LVL_1,
  1125. "Ignoring CQ Error notification for cmd/cxn"
  1126. "invalidate\n");
  1127. break;
  1128. case SOL_CMD_KILLED_DATA_DIGEST_ERR:
  1129. case CMD_KILLED_INVALID_STATSN_RCVD:
  1130. case CMD_KILLED_INVALID_R2T_RCVD:
  1131. case CMD_CXN_KILLED_LUN_INVALID:
  1132. case CMD_CXN_KILLED_ICD_INVALID:
  1133. case CMD_CXN_KILLED_ITT_INVALID:
  1134. case CMD_CXN_KILLED_SEQ_OUTOFORDER:
  1135. case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
  1136. SE_DEBUG(DBG_LVL_1,
  1137. "CQ Error notification for cmd.. "
  1138. "code %d cid 0x%x\n",
  1139. sol->dw[offsetof(struct amap_sol_cqe, code) /
  1140. 32] & CQE_CODE_MASK,
  1141. (sol->dw[offsetof(struct amap_sol_cqe, cid) /
  1142. 32] & SOL_CID_MASK));
  1143. break;
  1144. case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
  1145. SE_DEBUG(DBG_LVL_1,
  1146. "Digest error on def pdu ring, dropping..\n");
  1147. hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
  1148. (struct i_t_dpdu_cqe *) sol);
  1149. break;
  1150. case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
  1151. case CXN_KILLED_BURST_LEN_MISMATCH:
  1152. case CXN_KILLED_AHS_RCVD:
  1153. case CXN_KILLED_HDR_DIGEST_ERR:
  1154. case CXN_KILLED_UNKNOWN_HDR:
  1155. case CXN_KILLED_STALE_ITT_TTT_RCVD:
  1156. case CXN_KILLED_INVALID_ITT_TTT_RCVD:
  1157. case CXN_KILLED_TIMED_OUT:
  1158. case CXN_KILLED_FIN_RCVD:
  1159. case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
  1160. case CXN_KILLED_BAD_WRB_INDEX_ERROR:
  1161. case CXN_KILLED_OVER_RUN_RESIDUAL:
  1162. case CXN_KILLED_UNDER_RUN_RESIDUAL:
  1163. case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
  1164. SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID "
  1165. "0x%x...\n",
  1166. sol->dw[offsetof(struct amap_sol_cqe, code) /
  1167. 32] & CQE_CODE_MASK,
  1168. sol->dw[offsetof(struct amap_sol_cqe, cid) /
  1169. 32] & CQE_CID_MASK);
  1170. iscsi_conn_failure(beiscsi_conn->conn,
  1171. ISCSI_ERR_CONN_FAILED);
  1172. break;
  1173. case CXN_KILLED_RST_SENT:
  1174. case CXN_KILLED_RST_RCVD:
  1175. SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent "
  1176. "on CID 0x%x...\n",
  1177. sol->dw[offsetof(struct amap_sol_cqe, code) /
  1178. 32] & CQE_CODE_MASK,
  1179. sol->dw[offsetof(struct amap_sol_cqe, cid) /
  1180. 32] & CQE_CID_MASK);
  1181. iscsi_conn_failure(beiscsi_conn->conn,
  1182. ISCSI_ERR_CONN_FAILED);
  1183. break;
  1184. default:
  1185. SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
  1186. "received on CID 0x%x...\n",
  1187. sol->dw[offsetof(struct amap_sol_cqe, code) /
  1188. 32] & CQE_CODE_MASK,
  1189. sol->dw[offsetof(struct amap_sol_cqe, cid) /
  1190. 32] & CQE_CID_MASK);
  1191. break;
  1192. }
  1193. AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
  1194. queue_tail_inc(cq);
  1195. sol = queue_tail_node(cq);
  1196. num_processed++;
  1197. }
  1198. if (num_processed > 0) {
  1199. tot_nump += num_processed;
  1200. hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed,
  1201. 1, 0);
  1202. }
  1203. return tot_nump;
  1204. }
  1205. static void beiscsi_process_all_cqs(struct work_struct *work)
  1206. {
  1207. unsigned long flags;
  1208. struct beiscsi_hba *phba =
  1209. container_of(work, struct beiscsi_hba, work_cqs);
  1210. if (phba->todo_mcc_cq) {
  1211. spin_lock_irqsave(&phba->isr_lock, flags);
  1212. phba->todo_mcc_cq = 0;
  1213. spin_unlock_irqrestore(&phba->isr_lock, flags);
  1214. SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n");
  1215. }
  1216. if (phba->todo_cq) {
  1217. spin_lock_irqsave(&phba->isr_lock, flags);
  1218. phba->todo_cq = 0;
  1219. spin_unlock_irqrestore(&phba->isr_lock, flags);
  1220. beiscsi_process_cq(phba);
  1221. }
  1222. }
  1223. static int be_iopoll(struct blk_iopoll *iop, int budget)
  1224. {
  1225. static unsigned int ret;
  1226. struct beiscsi_hba *phba;
  1227. phba = container_of(iop, struct beiscsi_hba, iopoll);
  1228. ret = beiscsi_process_cq(phba);
  1229. if (ret < budget) {
  1230. struct hwi_controller *phwi_ctrlr;
  1231. struct hwi_context_memory *phwi_context;
  1232. phwi_ctrlr = phba->phwi_ctrlr;
  1233. phwi_context = phwi_ctrlr->phwi_ctxt;
  1234. blk_iopoll_complete(iop);
  1235. hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0,
  1236. 0, 1, 1);
  1237. }
  1238. return ret;
  1239. }
  1240. static void
  1241. hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
  1242. unsigned int num_sg, struct beiscsi_io_task *io_task)
  1243. {
  1244. struct iscsi_sge *psgl;
  1245. unsigned short sg_len, index;
  1246. unsigned int sge_len = 0;
  1247. unsigned long long addr;
  1248. struct scatterlist *l_sg;
  1249. unsigned int offset;
  1250. AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
  1251. io_task->bhs_pa.u.a32.address_lo);
  1252. AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
  1253. io_task->bhs_pa.u.a32.address_hi);
  1254. l_sg = sg;
  1255. for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
  1256. if (index == 0) {
  1257. sg_len = sg_dma_len(sg);
  1258. addr = (u64) sg_dma_address(sg);
  1259. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
  1260. (addr & 0xFFFFFFFF));
  1261. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
  1262. (addr >> 32));
  1263. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
  1264. sg_len);
  1265. sge_len = sg_len;
  1266. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
  1267. 1);
  1268. } else {
  1269. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
  1270. 0);
  1271. AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
  1272. pwrb, sge_len);
  1273. sg_len = sg_dma_len(sg);
  1274. addr = (u64) sg_dma_address(sg);
  1275. AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
  1276. (addr & 0xFFFFFFFF));
  1277. AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
  1278. (addr >> 32));
  1279. AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
  1280. sg_len);
  1281. }
  1282. }
  1283. psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
  1284. memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
  1285. AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
  1286. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
  1287. io_task->bhs_pa.u.a32.address_hi);
  1288. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
  1289. io_task->bhs_pa.u.a32.address_lo);
  1290. if (num_sg == 2)
  1291. AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
  1292. sg = l_sg;
  1293. psgl++;
  1294. psgl++;
  1295. offset = 0;
  1296. for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
  1297. sg_len = sg_dma_len(sg);
  1298. addr = (u64) sg_dma_address(sg);
  1299. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
  1300. (addr & 0xFFFFFFFF));
  1301. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
  1302. (addr >> 32));
  1303. AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
  1304. AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
  1305. AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
  1306. offset += sg_len;
  1307. }
  1308. psgl--;
  1309. AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
  1310. }
  1311. static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
  1312. {
  1313. struct iscsi_sge *psgl;
  1314. unsigned long long addr;
  1315. struct beiscsi_io_task *io_task = task->dd_data;
  1316. struct beiscsi_conn *beiscsi_conn = io_task->conn;
  1317. struct beiscsi_hba *phba = beiscsi_conn->phba;
  1318. io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
  1319. AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
  1320. io_task->bhs_pa.u.a32.address_lo);
  1321. AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
  1322. io_task->bhs_pa.u.a32.address_hi);
  1323. if (task->data) {
  1324. if (task->data_count) {
  1325. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
  1326. addr = (u64) pci_map_single(phba->pcidev,
  1327. task->data,
  1328. task->data_count, 1);
  1329. } else {
  1330. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
  1331. addr = 0;
  1332. }
  1333. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
  1334. (addr & 0xFFFFFFFF));
  1335. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
  1336. (addr >> 32));
  1337. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
  1338. task->data_count);
  1339. AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
  1340. } else {
  1341. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
  1342. addr = 0;
  1343. }
  1344. psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
  1345. AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
  1346. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
  1347. io_task->bhs_pa.u.a32.address_hi);
  1348. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
  1349. io_task->bhs_pa.u.a32.address_lo);
  1350. if (task->data) {
  1351. psgl++;
  1352. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
  1353. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
  1354. AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
  1355. AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
  1356. AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
  1357. AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
  1358. psgl++;
  1359. if (task->data) {
  1360. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
  1361. (addr & 0xFFFFFFFF));
  1362. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
  1363. (addr >> 32));
  1364. }
  1365. AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
  1366. }
  1367. AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
  1368. }
  1369. static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
  1370. {
  1371. unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages;
  1372. unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
  1373. unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
  1374. num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
  1375. sizeof(struct sol_cqe));
  1376. num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
  1377. sizeof(struct be_eq_entry));
  1378. num_async_pdu_buf_pages =
  1379. PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
  1380. phba->params.defpdu_hdr_sz);
  1381. num_async_pdu_buf_sgl_pages =
  1382. PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
  1383. sizeof(struct phys_addr));
  1384. num_async_pdu_data_pages =
  1385. PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
  1386. phba->params.defpdu_data_sz);
  1387. num_async_pdu_data_sgl_pages =
  1388. PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
  1389. sizeof(struct phys_addr));
  1390. phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
  1391. phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
  1392. BE_ISCSI_PDU_HEADER_SIZE;
  1393. phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
  1394. sizeof(struct hwi_context_memory);
  1395. phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
  1396. phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
  1397. phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
  1398. * (phba->params.wrbs_per_cxn)
  1399. * phba->params.cxns_per_ctrl;
  1400. wrb_sz_per_cxn = sizeof(struct wrb_handle) *
  1401. (phba->params.wrbs_per_cxn);
  1402. phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
  1403. phba->params.cxns_per_ctrl);
  1404. phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
  1405. phba->params.icds_per_ctrl;
  1406. phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
  1407. phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
  1408. phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
  1409. num_async_pdu_buf_pages * PAGE_SIZE;
  1410. phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
  1411. num_async_pdu_data_pages * PAGE_SIZE;
  1412. phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
  1413. num_async_pdu_buf_sgl_pages * PAGE_SIZE;
  1414. phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
  1415. num_async_pdu_data_sgl_pages * PAGE_SIZE;
  1416. phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
  1417. phba->params.asyncpdus_per_ctrl *
  1418. sizeof(struct async_pdu_handle);
  1419. phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
  1420. phba->params.asyncpdus_per_ctrl *
  1421. sizeof(struct async_pdu_handle);
  1422. phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
  1423. sizeof(struct hwi_async_pdu_context) +
  1424. (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
  1425. }
  1426. static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
  1427. {
  1428. struct be_mem_descriptor *mem_descr;
  1429. dma_addr_t bus_add;
  1430. struct mem_array *mem_arr, *mem_arr_orig;
  1431. unsigned int i, j, alloc_size, curr_alloc_size;
  1432. phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
  1433. if (!phba->phwi_ctrlr)
  1434. return -ENOMEM;
  1435. phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
  1436. GFP_KERNEL);
  1437. if (!phba->init_mem) {
  1438. kfree(phba->phwi_ctrlr);
  1439. return -ENOMEM;
  1440. }
  1441. mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
  1442. GFP_KERNEL);
  1443. if (!mem_arr_orig) {
  1444. kfree(phba->init_mem);
  1445. kfree(phba->phwi_ctrlr);
  1446. return -ENOMEM;
  1447. }
  1448. mem_descr = phba->init_mem;
  1449. for (i = 0; i < SE_MEM_MAX; i++) {
  1450. j = 0;
  1451. mem_arr = mem_arr_orig;
  1452. alloc_size = phba->mem_req[i];
  1453. memset(mem_arr, 0, sizeof(struct mem_array) *
  1454. BEISCSI_MAX_FRAGS_INIT);
  1455. curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
  1456. do {
  1457. mem_arr->virtual_address = pci_alloc_consistent(
  1458. phba->pcidev,
  1459. curr_alloc_size,
  1460. &bus_add);
  1461. if (!mem_arr->virtual_address) {
  1462. if (curr_alloc_size <= BE_MIN_MEM_SIZE)
  1463. goto free_mem;
  1464. if (curr_alloc_size -
  1465. rounddown_pow_of_two(curr_alloc_size))
  1466. curr_alloc_size = rounddown_pow_of_two
  1467. (curr_alloc_size);
  1468. else
  1469. curr_alloc_size = curr_alloc_size / 2;
  1470. } else {
  1471. mem_arr->bus_address.u.
  1472. a64.address = (__u64) bus_add;
  1473. mem_arr->size = curr_alloc_size;
  1474. alloc_size -= curr_alloc_size;
  1475. curr_alloc_size = min(be_max_phys_size *
  1476. 1024, alloc_size);
  1477. j++;
  1478. mem_arr++;
  1479. }
  1480. } while (alloc_size);
  1481. mem_descr->num_elements = j;
  1482. mem_descr->size_in_bytes = phba->mem_req[i];
  1483. mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
  1484. GFP_KERNEL);
  1485. if (!mem_descr->mem_array)
  1486. goto free_mem;
  1487. memcpy(mem_descr->mem_array, mem_arr_orig,
  1488. sizeof(struct mem_array) * j);
  1489. mem_descr++;
  1490. }
  1491. kfree(mem_arr_orig);
  1492. return 0;
  1493. free_mem:
  1494. mem_descr->num_elements = j;
  1495. while ((i) || (j)) {
  1496. for (j = mem_descr->num_elements; j > 0; j--) {
  1497. pci_free_consistent(phba->pcidev,
  1498. mem_descr->mem_array[j - 1].size,
  1499. mem_descr->mem_array[j - 1].
  1500. virtual_address,
  1501. mem_descr->mem_array[j - 1].
  1502. bus_address.u.a64.address);
  1503. }
  1504. if (i) {
  1505. i--;
  1506. kfree(mem_descr->mem_array);
  1507. mem_descr--;
  1508. }
  1509. }
  1510. kfree(mem_arr_orig);
  1511. kfree(phba->init_mem);
  1512. kfree(phba->phwi_ctrlr);
  1513. return -ENOMEM;
  1514. }
  1515. static int beiscsi_get_memory(struct beiscsi_hba *phba)
  1516. {
  1517. beiscsi_find_mem_req(phba);
  1518. return beiscsi_alloc_mem(phba);
  1519. }
  1520. static void iscsi_init_global_templates(struct beiscsi_hba *phba)
  1521. {
  1522. struct pdu_data_out *pdata_out;
  1523. struct pdu_nop_out *pnop_out;
  1524. struct be_mem_descriptor *mem_descr;
  1525. mem_descr = phba->init_mem;
  1526. mem_descr += ISCSI_MEM_GLOBAL_HEADER;
  1527. pdata_out =
  1528. (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
  1529. memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
  1530. AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
  1531. IIOC_SCSI_DATA);
  1532. pnop_out =
  1533. (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
  1534. virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
  1535. memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
  1536. AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
  1537. AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
  1538. AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
  1539. }
  1540. static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
  1541. {
  1542. struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
  1543. struct wrb_handle *pwrb_handle;
  1544. struct hwi_controller *phwi_ctrlr;
  1545. struct hwi_wrb_context *pwrb_context;
  1546. struct iscsi_wrb *pwrb;
  1547. unsigned int num_cxn_wrbh;
  1548. unsigned int num_cxn_wrb, j, idx, index;
  1549. mem_descr_wrbh = phba->init_mem;
  1550. mem_descr_wrbh += HWI_MEM_WRBH;
  1551. mem_descr_wrb = phba->init_mem;
  1552. mem_descr_wrb += HWI_MEM_WRB;
  1553. idx = 0;
  1554. pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
  1555. num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
  1556. ((sizeof(struct wrb_handle)) *
  1557. phba->params.wrbs_per_cxn));
  1558. phwi_ctrlr = phba->phwi_ctrlr;
  1559. for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
  1560. pwrb_context = &phwi_ctrlr->wrb_context[index];
  1561. SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
  1562. pwrb_context);
  1563. pwrb_context->pwrb_handle_base =
  1564. kzalloc(sizeof(struct wrb_handle *) *
  1565. phba->params.wrbs_per_cxn, GFP_KERNEL);
  1566. pwrb_context->pwrb_handle_basestd =
  1567. kzalloc(sizeof(struct wrb_handle *) *
  1568. phba->params.wrbs_per_cxn, GFP_KERNEL);
  1569. if (num_cxn_wrbh) {
  1570. pwrb_context->alloc_index = 0;
  1571. pwrb_context->wrb_handles_available = 0;
  1572. for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
  1573. pwrb_context->pwrb_handle_base[j] = pwrb_handle;
  1574. pwrb_context->pwrb_handle_basestd[j] =
  1575. pwrb_handle;
  1576. pwrb_context->wrb_handles_available++;
  1577. pwrb_handle++;
  1578. }
  1579. pwrb_context->free_index = 0;
  1580. num_cxn_wrbh--;
  1581. } else {
  1582. idx++;
  1583. pwrb_handle =
  1584. mem_descr_wrbh->mem_array[idx].virtual_address;
  1585. num_cxn_wrbh =
  1586. ((mem_descr_wrbh->mem_array[idx].size) /
  1587. ((sizeof(struct wrb_handle)) *
  1588. phba->params.wrbs_per_cxn));
  1589. pwrb_context->alloc_index = 0;
  1590. for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
  1591. pwrb_context->pwrb_handle_base[j] = pwrb_handle;
  1592. pwrb_context->pwrb_handle_basestd[j] =
  1593. pwrb_handle;
  1594. pwrb_context->wrb_handles_available++;
  1595. pwrb_handle++;
  1596. }
  1597. pwrb_context->free_index = 0;
  1598. num_cxn_wrbh--;
  1599. }
  1600. }
  1601. idx = 0;
  1602. pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
  1603. num_cxn_wrb =
  1604. ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
  1605. phba->params.wrbs_per_cxn);
  1606. for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
  1607. pwrb_context = &phwi_ctrlr->wrb_context[index];
  1608. if (num_cxn_wrb) {
  1609. for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
  1610. pwrb_handle = pwrb_context->pwrb_handle_base[j];
  1611. pwrb_handle->pwrb = pwrb;
  1612. pwrb++;
  1613. }
  1614. num_cxn_wrb--;
  1615. } else {
  1616. idx++;
  1617. pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
  1618. num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
  1619. (sizeof(struct iscsi_wrb)) *
  1620. phba->params.wrbs_per_cxn);
  1621. for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
  1622. pwrb_handle = pwrb_context->pwrb_handle_base[j];
  1623. pwrb_handle->pwrb = pwrb;
  1624. pwrb++;
  1625. }
  1626. num_cxn_wrb--;
  1627. }
  1628. }
  1629. }
  1630. static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
  1631. {
  1632. struct hwi_controller *phwi_ctrlr;
  1633. struct hba_parameters *p = &phba->params;
  1634. struct hwi_async_pdu_context *pasync_ctx;
  1635. struct async_pdu_handle *pasync_header_h, *pasync_data_h;
  1636. unsigned int index;
  1637. struct be_mem_descriptor *mem_descr;
  1638. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1639. mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
  1640. phwi_ctrlr = phba->phwi_ctrlr;
  1641. phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
  1642. mem_descr->mem_array[0].virtual_address;
  1643. pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
  1644. memset(pasync_ctx, 0, sizeof(*pasync_ctx));
  1645. pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
  1646. pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
  1647. pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
  1648. pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
  1649. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1650. mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
  1651. if (mem_descr->mem_array[0].virtual_address) {
  1652. SE_DEBUG(DBG_LVL_8,
  1653. "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
  1654. "va=%p \n", mem_descr->mem_array[0].virtual_address);
  1655. } else
  1656. shost_printk(KERN_WARNING, phba->shost,
  1657. "No Virtual address \n");
  1658. pasync_ctx->async_header.va_base =
  1659. mem_descr->mem_array[0].virtual_address;
  1660. pasync_ctx->async_header.pa_base.u.a64.address =
  1661. mem_descr->mem_array[0].bus_address.u.a64.address;
  1662. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1663. mem_descr += HWI_MEM_ASYNC_HEADER_RING;
  1664. if (mem_descr->mem_array[0].virtual_address) {
  1665. SE_DEBUG(DBG_LVL_8,
  1666. "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
  1667. "va=%p \n", mem_descr->mem_array[0].virtual_address);
  1668. } else
  1669. shost_printk(KERN_WARNING, phba->shost,
  1670. "No Virtual address \n");
  1671. pasync_ctx->async_header.ring_base =
  1672. mem_descr->mem_array[0].virtual_address;
  1673. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1674. mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
  1675. if (mem_descr->mem_array[0].virtual_address) {
  1676. SE_DEBUG(DBG_LVL_8,
  1677. "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
  1678. "va=%p \n", mem_descr->mem_array[0].virtual_address);
  1679. } else
  1680. shost_printk(KERN_WARNING, phba->shost,
  1681. "No Virtual address \n");
  1682. pasync_ctx->async_header.handle_base =
  1683. mem_descr->mem_array[0].virtual_address;
  1684. pasync_ctx->async_header.writables = 0;
  1685. INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
  1686. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1687. mem_descr += HWI_MEM_ASYNC_DATA_BUF;
  1688. if (mem_descr->mem_array[0].virtual_address) {
  1689. SE_DEBUG(DBG_LVL_8,
  1690. "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
  1691. "va=%p \n", mem_descr->mem_array[0].virtual_address);
  1692. } else
  1693. shost_printk(KERN_WARNING, phba->shost,
  1694. "No Virtual address \n");
  1695. pasync_ctx->async_data.va_base =
  1696. mem_descr->mem_array[0].virtual_address;
  1697. pasync_ctx->async_data.pa_base.u.a64.address =
  1698. mem_descr->mem_array[0].bus_address.u.a64.address;
  1699. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1700. mem_descr += HWI_MEM_ASYNC_DATA_RING;
  1701. if (mem_descr->mem_array[0].virtual_address) {
  1702. SE_DEBUG(DBG_LVL_8,
  1703. "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
  1704. "va=%p \n", mem_descr->mem_array[0].virtual_address);
  1705. } else
  1706. shost_printk(KERN_WARNING, phba->shost,
  1707. "No Virtual address \n");
  1708. pasync_ctx->async_data.ring_base =
  1709. mem_descr->mem_array[0].virtual_address;
  1710. mem_descr = (struct be_mem_descriptor *)phba->init_mem;
  1711. mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
  1712. if (!mem_descr->mem_array[0].virtual_address)
  1713. shost_printk(KERN_WARNING, phba->shost,
  1714. "No Virtual address \n");
  1715. pasync_ctx->async_data.handle_base =
  1716. mem_descr->mem_array[0].virtual_address;
  1717. pasync_ctx->async_data.writables = 0;
  1718. INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
  1719. pasync_header_h =
  1720. (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
  1721. pasync_data_h =
  1722. (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
  1723. for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
  1724. pasync_header_h->cri = -1;
  1725. pasync_header_h->index = (char)index;
  1726. INIT_LIST_HEAD(&pasync_header_h->link);
  1727. pasync_header_h->pbuffer =
  1728. (void *)((unsigned long)
  1729. (pasync_ctx->async_header.va_base) +
  1730. (p->defpdu_hdr_sz * index));
  1731. pasync_header_h->pa.u.a64.address =
  1732. pasync_ctx->async_header.pa_base.u.a64.address +
  1733. (p->defpdu_hdr_sz * index);
  1734. list_add_tail(&pasync_header_h->link,
  1735. &pasync_ctx->async_header.free_list);
  1736. pasync_header_h++;
  1737. pasync_ctx->async_header.free_entries++;
  1738. pasync_ctx->async_header.writables++;
  1739. INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
  1740. INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
  1741. header_busy_list);
  1742. pasync_data_h->cri = -1;
  1743. pasync_data_h->index = (char)index;
  1744. INIT_LIST_HEAD(&pasync_data_h->link);
  1745. pasync_data_h->pbuffer =
  1746. (void *)((unsigned long)
  1747. (pasync_ctx->async_data.va_base) +
  1748. (p->defpdu_data_sz * index));
  1749. pasync_data_h->pa.u.a64.address =
  1750. pasync_ctx->async_data.pa_base.u.a64.address +
  1751. (p->defpdu_data_sz * index);
  1752. list_add_tail(&pasync_data_h->link,
  1753. &pasync_ctx->async_data.free_list);
  1754. pasync_data_h++;
  1755. pasync_ctx->async_data.free_entries++;
  1756. pasync_ctx->async_data.writables++;
  1757. INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
  1758. }
  1759. pasync_ctx->async_header.host_write_ptr = 0;
  1760. pasync_ctx->async_header.ep_read_ptr = -1;
  1761. pasync_ctx->async_data.host_write_ptr = 0;
  1762. pasync_ctx->async_data.ep_read_ptr = -1;
  1763. }
  1764. static int
  1765. be_sgl_create_contiguous(void *virtual_address,
  1766. u64 physical_address, u32 length,
  1767. struct be_dma_mem *sgl)
  1768. {
  1769. WARN_ON(!virtual_address);
  1770. WARN_ON(!physical_address);
  1771. WARN_ON(!length > 0);
  1772. WARN_ON(!sgl);
  1773. sgl->va = virtual_address;
  1774. sgl->dma = physical_address;
  1775. sgl->size = length;
  1776. return 0;
  1777. }
  1778. static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
  1779. {
  1780. memset(sgl, 0, sizeof(*sgl));
  1781. }
  1782. static void
  1783. hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
  1784. struct mem_array *pmem, struct be_dma_mem *sgl)
  1785. {
  1786. if (sgl->va)
  1787. be_sgl_destroy_contiguous(sgl);
  1788. be_sgl_create_contiguous(pmem->virtual_address,
  1789. pmem->bus_address.u.a64.address,
  1790. pmem->size, sgl);
  1791. }
  1792. static void
  1793. hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
  1794. struct mem_array *pmem, struct be_dma_mem *sgl)
  1795. {
  1796. if (sgl->va)
  1797. be_sgl_destroy_contiguous(sgl);
  1798. be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
  1799. pmem->bus_address.u.a64.address,
  1800. pmem->size, sgl);
  1801. }
  1802. static int be_fill_queue(struct be_queue_info *q,
  1803. u16 len, u16 entry_size, void *vaddress)
  1804. {
  1805. struct be_dma_mem *mem = &q->dma_mem;
  1806. memset(q, 0, sizeof(*q));
  1807. q->len = len;
  1808. q->entry_size = entry_size;
  1809. mem->size = len * entry_size;
  1810. mem->va = vaddress;
  1811. if (!mem->va)
  1812. return -ENOMEM;
  1813. memset(mem->va, 0, mem->size);
  1814. return 0;
  1815. }
  1816. static int beiscsi_create_eq(struct beiscsi_hba *phba,
  1817. struct hwi_context_memory *phwi_context)
  1818. {
  1819. unsigned int idx;
  1820. int ret;
  1821. struct be_queue_info *eq;
  1822. struct be_dma_mem *mem;
  1823. struct be_mem_descriptor *mem_descr;
  1824. void *eq_vaddress;
  1825. idx = 0;
  1826. eq = &phwi_context->be_eq.q;
  1827. mem = &eq->dma_mem;
  1828. mem_descr = phba->init_mem;
  1829. mem_descr += HWI_MEM_EQ;
  1830. eq_vaddress = mem_descr->mem_array[idx].virtual_address;
  1831. ret = be_fill_queue(eq, phba->params.num_eq_entries,
  1832. sizeof(struct be_eq_entry), eq_vaddress);
  1833. if (ret) {
  1834. shost_printk(KERN_ERR, phba->shost,
  1835. "be_fill_queue Failed for EQ \n");
  1836. return ret;
  1837. }
  1838. mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
  1839. ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
  1840. phwi_context->be_eq.cur_eqd);
  1841. if (ret) {
  1842. shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create"
  1843. "Failedfor EQ \n");
  1844. return ret;
  1845. }
  1846. SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
  1847. return 0;
  1848. }
  1849. static int beiscsi_create_cq(struct beiscsi_hba *phba,
  1850. struct hwi_context_memory *phwi_context)
  1851. {
  1852. unsigned int idx;
  1853. int ret;
  1854. struct be_queue_info *cq, *eq;
  1855. struct be_dma_mem *mem;
  1856. struct be_mem_descriptor *mem_descr;
  1857. void *cq_vaddress;
  1858. idx = 0;
  1859. cq = &phwi_context->be_cq;
  1860. eq = &phwi_context->be_eq.q;
  1861. mem = &cq->dma_mem;
  1862. mem_descr = phba->init_mem;
  1863. mem_descr += HWI_MEM_CQ;
  1864. cq_vaddress = mem_descr->mem_array[idx].virtual_address;
  1865. ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
  1866. sizeof(struct sol_cqe), cq_vaddress);
  1867. if (ret) {
  1868. shost_printk(KERN_ERR, phba->shost,
  1869. "be_fill_queue Failed for ISCSI CQ \n");
  1870. return ret;
  1871. }
  1872. mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
  1873. ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0);
  1874. if (ret) {
  1875. shost_printk(KERN_ERR, phba->shost,
  1876. "beiscsi_cmd_eq_create Failed for ISCSI CQ \n");
  1877. return ret;
  1878. }
  1879. SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
  1880. SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
  1881. return 0;
  1882. }
  1883. static int
  1884. beiscsi_create_def_hdr(struct beiscsi_hba *phba,
  1885. struct hwi_context_memory *phwi_context,
  1886. struct hwi_controller *phwi_ctrlr,
  1887. unsigned int def_pdu_ring_sz)
  1888. {
  1889. unsigned int idx;
  1890. int ret;
  1891. struct be_queue_info *dq, *cq;
  1892. struct be_dma_mem *mem;
  1893. struct be_mem_descriptor *mem_descr;
  1894. void *dq_vaddress;
  1895. idx = 0;
  1896. dq = &phwi_context->be_def_hdrq;
  1897. cq = &phwi_context->be_cq;
  1898. mem = &dq->dma_mem;
  1899. mem_descr = phba->init_mem;
  1900. mem_descr += HWI_MEM_ASYNC_HEADER_RING;
  1901. dq_vaddress = mem_descr->mem_array[idx].virtual_address;
  1902. ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
  1903. sizeof(struct phys_addr),
  1904. sizeof(struct phys_addr), dq_vaddress);
  1905. if (ret) {
  1906. shost_printk(KERN_ERR, phba->shost,
  1907. "be_fill_queue Failed for DEF PDU HDR\n");
  1908. return ret;
  1909. }
  1910. mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
  1911. ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
  1912. def_pdu_ring_sz,
  1913. phba->params.defpdu_hdr_sz);
  1914. if (ret) {
  1915. shost_printk(KERN_ERR, phba->shost,
  1916. "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
  1917. return ret;
  1918. }
  1919. phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
  1920. SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
  1921. phwi_context->be_def_hdrq.id);
  1922. hwi_post_async_buffers(phba, 1);
  1923. return 0;
  1924. }
  1925. static int
  1926. beiscsi_create_def_data(struct beiscsi_hba *phba,
  1927. struct hwi_context_memory *phwi_context,
  1928. struct hwi_controller *phwi_ctrlr,
  1929. unsigned int def_pdu_ring_sz)
  1930. {
  1931. unsigned int idx;
  1932. int ret;
  1933. struct be_queue_info *dataq, *cq;
  1934. struct be_dma_mem *mem;
  1935. struct be_mem_descriptor *mem_descr;
  1936. void *dq_vaddress;
  1937. idx = 0;
  1938. dataq = &phwi_context->be_def_dataq;
  1939. cq = &phwi_context->be_cq;
  1940. mem = &dataq->dma_mem;
  1941. mem_descr = phba->init_mem;
  1942. mem_descr += HWI_MEM_ASYNC_DATA_RING;
  1943. dq_vaddress = mem_descr->mem_array[idx].virtual_address;
  1944. ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
  1945. sizeof(struct phys_addr),
  1946. sizeof(struct phys_addr), dq_vaddress);
  1947. if (ret) {
  1948. shost_printk(KERN_ERR, phba->shost,
  1949. "be_fill_queue Failed for DEF PDU DATA\n");
  1950. return ret;
  1951. }
  1952. mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
  1953. ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
  1954. def_pdu_ring_sz,
  1955. phba->params.defpdu_data_sz);
  1956. if (ret) {
  1957. shost_printk(KERN_ERR, phba->shost,
  1958. "be_cmd_create_default_pdu_queue Failed"
  1959. " for DEF PDU DATA\n");
  1960. return ret;
  1961. }
  1962. phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
  1963. SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
  1964. phwi_context->be_def_dataq.id);
  1965. hwi_post_async_buffers(phba, 0);
  1966. SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
  1967. return 0;
  1968. }
  1969. static int
  1970. beiscsi_post_pages(struct beiscsi_hba *phba)
  1971. {
  1972. struct be_mem_descriptor *mem_descr;
  1973. struct mem_array *pm_arr;
  1974. unsigned int page_offset, i;
  1975. struct be_dma_mem sgl;
  1976. int status;
  1977. mem_descr = phba->init_mem;
  1978. mem_descr += HWI_MEM_SGE;
  1979. pm_arr = mem_descr->mem_array;
  1980. page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
  1981. phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
  1982. for (i = 0; i < mem_descr->num_elements; i++) {
  1983. hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
  1984. status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
  1985. page_offset,
  1986. (pm_arr->size / PAGE_SIZE));
  1987. page_offset += pm_arr->size / PAGE_SIZE;
  1988. if (status != 0) {
  1989. shost_printk(KERN_ERR, phba->shost,
  1990. "post sgl failed.\n");
  1991. return status;
  1992. }
  1993. pm_arr++;
  1994. }
  1995. SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
  1996. return 0;
  1997. }
  1998. static int
  1999. beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
  2000. struct hwi_context_memory *phwi_context,
  2001. struct hwi_controller *phwi_ctrlr)
  2002. {
  2003. unsigned int wrb_mem_index, offset, size, num_wrb_rings;
  2004. u64 pa_addr_lo;
  2005. unsigned int idx, num, i;
  2006. struct mem_array *pwrb_arr;
  2007. void *wrb_vaddr;
  2008. struct be_dma_mem sgl;
  2009. struct be_mem_descriptor *mem_descr;
  2010. int status;
  2011. idx = 0;
  2012. mem_descr = phba->init_mem;
  2013. mem_descr += HWI_MEM_WRB;
  2014. pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
  2015. GFP_KERNEL);
  2016. if (!pwrb_arr) {
  2017. shost_printk(KERN_ERR, phba->shost,
  2018. "Memory alloc failed in create wrb ring.\n");
  2019. return -ENOMEM;
  2020. }
  2021. wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
  2022. pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
  2023. num_wrb_rings = mem_descr->mem_array[idx].size /
  2024. (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
  2025. for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
  2026. if (num_wrb_rings) {
  2027. pwrb_arr[num].virtual_address = wrb_vaddr;
  2028. pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
  2029. pwrb_arr[num].size = phba->params.wrbs_per_cxn *
  2030. sizeof(struct iscsi_wrb);
  2031. wrb_vaddr += pwrb_arr[num].size;
  2032. pa_addr_lo += pwrb_arr[num].size;
  2033. num_wrb_rings--;
  2034. } else {
  2035. idx++;
  2036. wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
  2037. pa_addr_lo = mem_descr->mem_array[idx].\
  2038. bus_address.u.a64.address;
  2039. num_wrb_rings = mem_descr->mem_array[idx].size /
  2040. (phba->params.wrbs_per_cxn *
  2041. sizeof(struct iscsi_wrb));
  2042. pwrb_arr[num].virtual_address = wrb_vaddr;
  2043. pwrb_arr[num].bus_address.u.a64.address\
  2044. = pa_addr_lo;
  2045. pwrb_arr[num].size = phba->params.wrbs_per_cxn *
  2046. sizeof(struct iscsi_wrb);
  2047. wrb_vaddr += pwrb_arr[num].size;
  2048. pa_addr_lo += pwrb_arr[num].size;
  2049. num_wrb_rings--;
  2050. }
  2051. }
  2052. for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
  2053. wrb_mem_index = 0;
  2054. offset = 0;
  2055. size = 0;
  2056. hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
  2057. status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
  2058. &phwi_context->be_wrbq[i]);
  2059. if (status != 0) {
  2060. shost_printk(KERN_ERR, phba->shost,
  2061. "wrbq create failed.");
  2062. return status;
  2063. }
  2064. phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
  2065. }
  2066. kfree(pwrb_arr);
  2067. return 0;
  2068. }
  2069. static void free_wrb_handles(struct beiscsi_hba *phba)
  2070. {
  2071. unsigned int index;
  2072. struct hwi_controller *phwi_ctrlr;
  2073. struct hwi_wrb_context *pwrb_context;
  2074. phwi_ctrlr = phba->phwi_ctrlr;
  2075. for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
  2076. pwrb_context = &phwi_ctrlr->wrb_context[index];
  2077. kfree(pwrb_context->pwrb_handle_base);
  2078. kfree(pwrb_context->pwrb_handle_basestd);
  2079. }
  2080. }
  2081. static void hwi_cleanup(struct beiscsi_hba *phba)
  2082. {
  2083. struct be_queue_info *q;
  2084. struct be_ctrl_info *ctrl = &phba->ctrl;
  2085. struct hwi_controller *phwi_ctrlr;
  2086. struct hwi_context_memory *phwi_context;
  2087. int i;
  2088. phwi_ctrlr = phba->phwi_ctrlr;
  2089. phwi_context = phwi_ctrlr->phwi_ctxt;
  2090. for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
  2091. q = &phwi_context->be_wrbq[i];
  2092. if (q->created)
  2093. beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
  2094. }
  2095. free_wrb_handles(phba);
  2096. q = &phwi_context->be_def_hdrq;
  2097. if (q->created)
  2098. beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
  2099. q = &phwi_context->be_def_dataq;
  2100. if (q->created)
  2101. beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
  2102. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  2103. q = &phwi_context->be_cq;
  2104. if (q->created)
  2105. beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
  2106. q = &phwi_context->be_eq.q;
  2107. if (q->created)
  2108. beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
  2109. }
  2110. static int hwi_init_port(struct beiscsi_hba *phba)
  2111. {
  2112. struct hwi_controller *phwi_ctrlr;
  2113. struct hwi_context_memory *phwi_context;
  2114. unsigned int def_pdu_ring_sz;
  2115. struct be_ctrl_info *ctrl = &phba->ctrl;
  2116. int status;
  2117. def_pdu_ring_sz =
  2118. phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
  2119. phwi_ctrlr = phba->phwi_ctrlr;
  2120. phwi_context = phwi_ctrlr->phwi_ctxt;
  2121. phwi_context->be_eq.max_eqd = 0;
  2122. phwi_context->be_eq.min_eqd = 0;
  2123. phwi_context->be_eq.cur_eqd = 64;
  2124. phwi_context->be_eq.enable_aic = false;
  2125. be_cmd_fw_initialize(&phba->ctrl);
  2126. status = beiscsi_create_eq(phba, phwi_context);
  2127. if (status != 0) {
  2128. shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
  2129. goto error;
  2130. }
  2131. status = mgmt_check_supported_fw(ctrl);
  2132. if (status != 0) {
  2133. shost_printk(KERN_ERR, phba->shost,
  2134. "Unsupported fw version \n");
  2135. goto error;
  2136. }
  2137. status = mgmt_get_fw_config(ctrl, phba);
  2138. if (status != 0) {
  2139. shost_printk(KERN_ERR, phba->shost,
  2140. "Error getting fw config\n");
  2141. goto error;
  2142. }
  2143. status = beiscsi_create_cq(phba, phwi_context);
  2144. if (status != 0) {
  2145. shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
  2146. goto error;
  2147. }
  2148. status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
  2149. def_pdu_ring_sz);
  2150. if (status != 0) {
  2151. shost_printk(KERN_ERR, phba->shost,
  2152. "Default Header not created\n");
  2153. goto error;
  2154. }
  2155. status = beiscsi_create_def_data(phba, phwi_context,
  2156. phwi_ctrlr, def_pdu_ring_sz);
  2157. if (status != 0) {
  2158. shost_printk(KERN_ERR, phba->shost,
  2159. "Default Data not created\n");
  2160. goto error;
  2161. }
  2162. status = beiscsi_post_pages(phba);
  2163. if (status != 0) {
  2164. shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
  2165. goto error;
  2166. }
  2167. status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
  2168. if (status != 0) {
  2169. shost_printk(KERN_ERR, phba->shost,
  2170. "WRB Rings not created\n");
  2171. goto error;
  2172. }
  2173. SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
  2174. return 0;
  2175. error:
  2176. shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
  2177. hwi_cleanup(phba);
  2178. return -ENOMEM;
  2179. }
  2180. static int hwi_init_controller(struct beiscsi_hba *phba)
  2181. {
  2182. struct hwi_controller *phwi_ctrlr;
  2183. phwi_ctrlr = phba->phwi_ctrlr;
  2184. if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
  2185. phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
  2186. init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
  2187. SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
  2188. phwi_ctrlr->phwi_ctxt);
  2189. } else {
  2190. shost_printk(KERN_ERR, phba->shost,
  2191. "HWI_MEM_ADDN_CONTEXT is more than one element."
  2192. "Failing to load\n");
  2193. return -ENOMEM;
  2194. }
  2195. iscsi_init_global_templates(phba);
  2196. beiscsi_init_wrb_handle(phba);
  2197. hwi_init_async_pdu_ctx(phba);
  2198. if (hwi_init_port(phba) != 0) {
  2199. shost_printk(KERN_ERR, phba->shost,
  2200. "hwi_init_controller failed\n");
  2201. return -ENOMEM;
  2202. }
  2203. return 0;
  2204. }
  2205. static void beiscsi_free_mem(struct beiscsi_hba *phba)
  2206. {
  2207. struct be_mem_descriptor *mem_descr;
  2208. int i, j;
  2209. mem_descr = phba->init_mem;
  2210. i = 0;
  2211. j = 0;
  2212. for (i = 0; i < SE_MEM_MAX; i++) {
  2213. for (j = mem_descr->num_elements; j > 0; j--) {
  2214. pci_free_consistent(phba->pcidev,
  2215. mem_descr->mem_array[j - 1].size,
  2216. mem_descr->mem_array[j - 1].virtual_address,
  2217. mem_descr->mem_array[j - 1].bus_address.
  2218. u.a64.address);
  2219. }
  2220. kfree(mem_descr->mem_array);
  2221. mem_descr++;
  2222. }
  2223. kfree(phba->init_mem);
  2224. kfree(phba->phwi_ctrlr);
  2225. }
  2226. static int beiscsi_init_controller(struct beiscsi_hba *phba)
  2227. {
  2228. int ret = -ENOMEM;
  2229. ret = beiscsi_get_memory(phba);
  2230. if (ret < 0) {
  2231. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
  2232. "Failed in beiscsi_alloc_memory \n");
  2233. return ret;
  2234. }
  2235. ret = hwi_init_controller(phba);
  2236. if (ret)
  2237. goto free_init;
  2238. SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
  2239. return 0;
  2240. free_init:
  2241. beiscsi_free_mem(phba);
  2242. return -ENOMEM;
  2243. }
  2244. static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
  2245. {
  2246. struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
  2247. struct sgl_handle *psgl_handle;
  2248. struct iscsi_sge *pfrag;
  2249. unsigned int arr_index, i, idx;
  2250. phba->io_sgl_hndl_avbl = 0;
  2251. phba->eh_sgl_hndl_avbl = 0;
  2252. mem_descr_sglh = phba->init_mem;
  2253. mem_descr_sglh += HWI_MEM_SGLH;
  2254. if (1 == mem_descr_sglh->num_elements) {
  2255. phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
  2256. phba->params.ios_per_ctrl,
  2257. GFP_KERNEL);
  2258. if (!phba->io_sgl_hndl_base) {
  2259. shost_printk(KERN_ERR, phba->shost,
  2260. "Mem Alloc Failed. Failing to load\n");
  2261. return -ENOMEM;
  2262. }
  2263. phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
  2264. (phba->params.icds_per_ctrl -
  2265. phba->params.ios_per_ctrl),
  2266. GFP_KERNEL);
  2267. if (!phba->eh_sgl_hndl_base) {
  2268. kfree(phba->io_sgl_hndl_base);
  2269. shost_printk(KERN_ERR, phba->shost,
  2270. "Mem Alloc Failed. Failing to load\n");
  2271. return -ENOMEM;
  2272. }
  2273. } else {
  2274. shost_printk(KERN_ERR, phba->shost,
  2275. "HWI_MEM_SGLH is more than one element."
  2276. "Failing to load\n");
  2277. return -ENOMEM;
  2278. }
  2279. arr_index = 0;
  2280. idx = 0;
  2281. while (idx < mem_descr_sglh->num_elements) {
  2282. psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
  2283. for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
  2284. sizeof(struct sgl_handle)); i++) {
  2285. if (arr_index < phba->params.ios_per_ctrl) {
  2286. phba->io_sgl_hndl_base[arr_index] = psgl_handle;
  2287. phba->io_sgl_hndl_avbl++;
  2288. arr_index++;
  2289. } else {
  2290. phba->eh_sgl_hndl_base[arr_index -
  2291. phba->params.ios_per_ctrl] =
  2292. psgl_handle;
  2293. arr_index++;
  2294. phba->eh_sgl_hndl_avbl++;
  2295. }
  2296. psgl_handle++;
  2297. }
  2298. idx++;
  2299. }
  2300. SE_DEBUG(DBG_LVL_8,
  2301. "phba->io_sgl_hndl_avbl=%d"
  2302. "phba->eh_sgl_hndl_avbl=%d \n",
  2303. phba->io_sgl_hndl_avbl,
  2304. phba->eh_sgl_hndl_avbl);
  2305. mem_descr_sg = phba->init_mem;
  2306. mem_descr_sg += HWI_MEM_SGE;
  2307. SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
  2308. mem_descr_sg->num_elements);
  2309. arr_index = 0;
  2310. idx = 0;
  2311. while (idx < mem_descr_sg->num_elements) {
  2312. pfrag = mem_descr_sg->mem_array[idx].virtual_address;
  2313. for (i = 0;
  2314. i < (mem_descr_sg->mem_array[idx].size) /
  2315. (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
  2316. i++) {
  2317. if (arr_index < phba->params.ios_per_ctrl)
  2318. psgl_handle = phba->io_sgl_hndl_base[arr_index];
  2319. else
  2320. psgl_handle = phba->eh_sgl_hndl_base[arr_index -
  2321. phba->params.ios_per_ctrl];
  2322. psgl_handle->pfrag = pfrag;
  2323. AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
  2324. AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
  2325. pfrag += phba->params.num_sge_per_io;
  2326. psgl_handle->sgl_index =
  2327. phba->fw_config.iscsi_cid_start + arr_index++;
  2328. }
  2329. idx++;
  2330. }
  2331. phba->io_sgl_free_index = 0;
  2332. phba->io_sgl_alloc_index = 0;
  2333. phba->eh_sgl_free_index = 0;
  2334. phba->eh_sgl_alloc_index = 0;
  2335. return 0;
  2336. }
  2337. static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
  2338. {
  2339. int i, new_cid;
  2340. phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
  2341. GFP_KERNEL);
  2342. if (!phba->cid_array) {
  2343. shost_printk(KERN_ERR, phba->shost,
  2344. "Failed to allocate memory in "
  2345. "hba_setup_cid_tbls\n");
  2346. return -ENOMEM;
  2347. }
  2348. phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
  2349. phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
  2350. if (!phba->ep_array) {
  2351. shost_printk(KERN_ERR, phba->shost,
  2352. "Failed to allocate memory in "
  2353. "hba_setup_cid_tbls \n");
  2354. kfree(phba->cid_array);
  2355. return -ENOMEM;
  2356. }
  2357. new_cid = phba->fw_config.iscsi_icd_start;
  2358. for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
  2359. phba->cid_array[i] = new_cid;
  2360. new_cid += 2;
  2361. }
  2362. phba->avlbl_cids = phba->params.cxns_per_ctrl;
  2363. return 0;
  2364. }
  2365. static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
  2366. {
  2367. struct be_ctrl_info *ctrl = &phba->ctrl;
  2368. struct hwi_controller *phwi_ctrlr;
  2369. struct hwi_context_memory *phwi_context;
  2370. struct be_queue_info *eq;
  2371. u8 __iomem *addr;
  2372. u32 reg;
  2373. u32 enabled;
  2374. phwi_ctrlr = phba->phwi_ctrlr;
  2375. phwi_context = phwi_ctrlr->phwi_ctxt;
  2376. eq = &phwi_context->be_eq.q;
  2377. addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
  2378. PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
  2379. reg = ioread32(addr);
  2380. SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
  2381. enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  2382. if (!enabled) {
  2383. reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  2384. SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
  2385. iowrite32(reg, addr);
  2386. SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
  2387. hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
  2388. } else
  2389. shost_printk(KERN_WARNING, phba->shost,
  2390. "In hwi_enable_intr, Not Enabled \n");
  2391. return true;
  2392. }
  2393. static void hwi_disable_intr(struct beiscsi_hba *phba)
  2394. {
  2395. struct be_ctrl_info *ctrl = &phba->ctrl;
  2396. u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
  2397. u32 reg = ioread32(addr);
  2398. u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  2399. if (enabled) {
  2400. reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  2401. iowrite32(reg, addr);
  2402. } else
  2403. shost_printk(KERN_WARNING, phba->shost,
  2404. "In hwi_disable_intr, Already Disabled \n");
  2405. }
  2406. static int beiscsi_init_port(struct beiscsi_hba *phba)
  2407. {
  2408. int ret;
  2409. ret = beiscsi_init_controller(phba);
  2410. if (ret < 0) {
  2411. shost_printk(KERN_ERR, phba->shost,
  2412. "beiscsi_dev_probe - Failed in"
  2413. "beiscsi_init_controller \n");
  2414. return ret;
  2415. }
  2416. ret = beiscsi_init_sgl_handle(phba);
  2417. if (ret < 0) {
  2418. shost_printk(KERN_ERR, phba->shost,
  2419. "beiscsi_dev_probe - Failed in"
  2420. "beiscsi_init_sgl_handle \n");
  2421. goto do_cleanup_ctrlr;
  2422. }
  2423. if (hba_setup_cid_tbls(phba)) {
  2424. shost_printk(KERN_ERR, phba->shost,
  2425. "Failed in hba_setup_cid_tbls\n");
  2426. kfree(phba->io_sgl_hndl_base);
  2427. kfree(phba->eh_sgl_hndl_base);
  2428. goto do_cleanup_ctrlr;
  2429. }
  2430. return ret;
  2431. do_cleanup_ctrlr:
  2432. hwi_cleanup(phba);
  2433. return ret;
  2434. }
  2435. static void hwi_purge_eq(struct beiscsi_hba *phba)
  2436. {
  2437. struct hwi_controller *phwi_ctrlr;
  2438. struct hwi_context_memory *phwi_context;
  2439. struct be_queue_info *eq;
  2440. struct be_eq_entry *eqe = NULL;
  2441. phwi_ctrlr = phba->phwi_ctrlr;
  2442. phwi_context = phwi_ctrlr->phwi_ctxt;
  2443. eq = &phwi_context->be_eq.q;
  2444. eqe = queue_tail_node(eq);
  2445. while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
  2446. & EQE_VALID_MASK) {
  2447. AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
  2448. queue_tail_inc(eq);
  2449. eqe = queue_tail_node(eq);
  2450. }
  2451. }
  2452. static void beiscsi_clean_port(struct beiscsi_hba *phba)
  2453. {
  2454. unsigned char mgmt_status;
  2455. mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
  2456. if (mgmt_status)
  2457. shost_printk(KERN_WARNING, phba->shost,
  2458. "mgmt_epfw_cleanup FAILED \n");
  2459. hwi_cleanup(phba);
  2460. hwi_purge_eq(phba);
  2461. kfree(phba->io_sgl_hndl_base);
  2462. kfree(phba->eh_sgl_hndl_base);
  2463. kfree(phba->cid_array);
  2464. kfree(phba->ep_array);
  2465. }
  2466. void
  2467. beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
  2468. struct beiscsi_offload_params *params)
  2469. {
  2470. struct wrb_handle *pwrb_handle;
  2471. struct iscsi_target_context_update_wrb *pwrb = NULL;
  2472. struct be_mem_descriptor *mem_descr;
  2473. struct beiscsi_hba *phba = beiscsi_conn->phba;
  2474. u32 doorbell = 0;
  2475. /*
  2476. * We can always use 0 here because it is reserved by libiscsi for
  2477. * login/startup related tasks.
  2478. */
  2479. pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
  2480. pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
  2481. memset(pwrb, 0, sizeof(*pwrb));
  2482. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2483. max_burst_length, pwrb, params->dw[offsetof
  2484. (struct amap_beiscsi_offload_params,
  2485. max_burst_length) / 32]);
  2486. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2487. max_send_data_segment_length, pwrb,
  2488. params->dw[offsetof(struct amap_beiscsi_offload_params,
  2489. max_send_data_segment_length) / 32]);
  2490. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2491. first_burst_length,
  2492. pwrb,
  2493. params->dw[offsetof(struct amap_beiscsi_offload_params,
  2494. first_burst_length) / 32]);
  2495. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
  2496. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2497. erl) / 32] & OFFLD_PARAMS_ERL));
  2498. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
  2499. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2500. dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
  2501. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
  2502. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2503. hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
  2504. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
  2505. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2506. ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
  2507. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
  2508. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2509. imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
  2510. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
  2511. pwrb,
  2512. (params->dw[offsetof(struct amap_beiscsi_offload_params,
  2513. exp_statsn) / 32] + 1));
  2514. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
  2515. 0x7);
  2516. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
  2517. pwrb, pwrb_handle->wrb_index);
  2518. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
  2519. pwrb, pwrb_handle->nxt_wrb_index);
  2520. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2521. session_state, pwrb, 0);
  2522. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
  2523. pwrb, 1);
  2524. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
  2525. pwrb, 0);
  2526. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
  2527. 0);
  2528. mem_descr = phba->init_mem;
  2529. mem_descr += ISCSI_MEM_GLOBAL_HEADER;
  2530. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2531. pad_buffer_addr_hi, pwrb,
  2532. mem_descr->mem_array[0].bus_address.u.a32.address_hi);
  2533. AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
  2534. pad_buffer_addr_lo, pwrb,
  2535. mem_descr->mem_array[0].bus_address.u.a32.address_lo);
  2536. be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
  2537. doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
  2538. doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) <<
  2539. DB_DEF_PDU_WRB_INDEX_SHIFT;
  2540. doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
  2541. iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
  2542. }
  2543. static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
  2544. int *index, int *age)
  2545. {
  2546. *index = be32_to_cpu(itt) >> 16;
  2547. if (age)
  2548. *age = conn->session->age;
  2549. }
  2550. /**
  2551. * beiscsi_alloc_pdu - allocates pdu and related resources
  2552. * @task: libiscsi task
  2553. * @opcode: opcode of pdu for task
  2554. *
  2555. * This is called with the session lock held. It will allocate
  2556. * the wrb and sgl if needed for the command. And it will prep
  2557. * the pdu's itt. beiscsi_parse_pdu will later translate
  2558. * the pdu itt to the libiscsi task itt.
  2559. */
  2560. static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
  2561. {
  2562. struct beiscsi_io_task *io_task = task->dd_data;
  2563. struct iscsi_conn *conn = task->conn;
  2564. struct beiscsi_conn *beiscsi_conn = conn->dd_data;
  2565. struct beiscsi_hba *phba = beiscsi_conn->phba;
  2566. struct hwi_wrb_context *pwrb_context;
  2567. struct hwi_controller *phwi_ctrlr;
  2568. itt_t itt;
  2569. struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
  2570. dma_addr_t paddr;
  2571. io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
  2572. GFP_KERNEL, &paddr);
  2573. if (!io_task->cmd_bhs)
  2574. return -ENOMEM;
  2575. io_task->bhs_pa.u.a64.address = paddr;
  2576. io_task->pwrb_handle = alloc_wrb_handle(phba,
  2577. beiscsi_conn->beiscsi_conn_cid,
  2578. task->itt);
  2579. io_task->pwrb_handle->pio_handle = task;
  2580. io_task->conn = beiscsi_conn;
  2581. task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
  2582. task->hdr_max = sizeof(struct be_cmd_bhs);
  2583. if (task->sc) {
  2584. spin_lock(&phba->io_sgl_lock);
  2585. io_task->psgl_handle = alloc_io_sgl_handle(phba);
  2586. spin_unlock(&phba->io_sgl_lock);
  2587. if (!io_task->psgl_handle)
  2588. goto free_hndls;
  2589. } else {
  2590. io_task->scsi_cmnd = NULL;
  2591. if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
  2592. if (!beiscsi_conn->login_in_progress) {
  2593. spin_lock(&phba->mgmt_sgl_lock);
  2594. io_task->psgl_handle = (struct sgl_handle *)
  2595. alloc_mgmt_sgl_handle(phba);
  2596. spin_unlock(&phba->mgmt_sgl_lock);
  2597. if (!io_task->psgl_handle)
  2598. goto free_hndls;
  2599. beiscsi_conn->login_in_progress = 1;
  2600. beiscsi_conn->plogin_sgl_handle =
  2601. io_task->psgl_handle;
  2602. } else {
  2603. io_task->psgl_handle =
  2604. beiscsi_conn->plogin_sgl_handle;
  2605. }
  2606. } else {
  2607. spin_lock(&phba->mgmt_sgl_lock);
  2608. io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
  2609. spin_unlock(&phba->mgmt_sgl_lock);
  2610. if (!io_task->psgl_handle)
  2611. goto free_hndls;
  2612. }
  2613. }
  2614. itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) |
  2615. (unsigned int)(io_task->psgl_handle->sgl_index));
  2616. io_task->cmd_bhs->iscsi_hdr.itt = itt;
  2617. return 0;
  2618. free_hndls:
  2619. phwi_ctrlr = phba->phwi_ctrlr;
  2620. pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
  2621. free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
  2622. io_task->pwrb_handle = NULL;
  2623. pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
  2624. io_task->bhs_pa.u.a64.address);
  2625. SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
  2626. return -ENOMEM;
  2627. }
  2628. static void beiscsi_cleanup_task(struct iscsi_task *task)
  2629. {
  2630. struct beiscsi_io_task *io_task = task->dd_data;
  2631. struct iscsi_conn *conn = task->conn;
  2632. struct beiscsi_conn *beiscsi_conn = conn->dd_data;
  2633. struct beiscsi_hba *phba = beiscsi_conn->phba;
  2634. struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
  2635. struct hwi_wrb_context *pwrb_context;
  2636. struct hwi_controller *phwi_ctrlr;
  2637. phwi_ctrlr = phba->phwi_ctrlr;
  2638. pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
  2639. if (io_task->pwrb_handle) {
  2640. free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
  2641. io_task->pwrb_handle = NULL;
  2642. }
  2643. if (io_task->cmd_bhs) {
  2644. pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
  2645. io_task->bhs_pa.u.a64.address);
  2646. }
  2647. if (task->sc) {
  2648. if (io_task->psgl_handle) {
  2649. spin_lock(&phba->io_sgl_lock);
  2650. free_io_sgl_handle(phba, io_task->psgl_handle);
  2651. spin_unlock(&phba->io_sgl_lock);
  2652. io_task->psgl_handle = NULL;
  2653. }
  2654. } else {
  2655. if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
  2656. return;
  2657. if (io_task->psgl_handle) {
  2658. spin_lock(&phba->mgmt_sgl_lock);
  2659. free_mgmt_sgl_handle(phba, io_task->psgl_handle);
  2660. spin_unlock(&phba->mgmt_sgl_lock);
  2661. io_task->psgl_handle = NULL;
  2662. }
  2663. }
  2664. }
  2665. static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
  2666. unsigned int num_sg, unsigned int xferlen,
  2667. unsigned int writedir)
  2668. {
  2669. struct beiscsi_io_task *io_task = task->dd_data;
  2670. struct iscsi_conn *conn = task->conn;
  2671. struct beiscsi_conn *beiscsi_conn = conn->dd_data;
  2672. struct beiscsi_hba *phba = beiscsi_conn->phba;
  2673. struct iscsi_wrb *pwrb = NULL;
  2674. unsigned int doorbell = 0;
  2675. pwrb = io_task->pwrb_handle->pwrb;
  2676. io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
  2677. io_task->bhs_len = sizeof(struct be_cmd_bhs);
  2678. if (writedir) {
  2679. SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
  2680. memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
  2681. AMAP_SET_BITS(struct amap_pdu_data_out, itt,
  2682. &io_task->cmd_bhs->iscsi_data_pdu,
  2683. (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
  2684. AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
  2685. &io_task->cmd_bhs->iscsi_data_pdu,
  2686. ISCSI_OPCODE_SCSI_DATA_OUT);
  2687. AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
  2688. &io_task->cmd_bhs->iscsi_data_pdu, 1);
  2689. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
  2690. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
  2691. } else {
  2692. SE_DEBUG(DBG_LVL_4, "READ Command \t");
  2693. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
  2694. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
  2695. }
  2696. memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
  2697. dw[offsetof(struct amap_pdu_data_out, lun) / 32],
  2698. io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
  2699. AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
  2700. cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
  2701. lun[0]));
  2702. AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
  2703. AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
  2704. io_task->pwrb_handle->wrb_index);
  2705. AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
  2706. be32_to_cpu(task->cmdsn));
  2707. AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
  2708. io_task->psgl_handle->sgl_index);
  2709. hwi_write_sgl(pwrb, sg, num_sg, io_task);
  2710. AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
  2711. io_task->pwrb_handle->nxt_wrb_index);
  2712. be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
  2713. doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
  2714. doorbell |= (io_task->pwrb_handle->wrb_index &
  2715. DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
  2716. doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
  2717. iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
  2718. return 0;
  2719. }
  2720. static int beiscsi_mtask(struct iscsi_task *task)
  2721. {
  2722. struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
  2723. struct iscsi_conn *conn = task->conn;
  2724. struct beiscsi_conn *beiscsi_conn = conn->dd_data;
  2725. struct beiscsi_hba *phba = beiscsi_conn->phba;
  2726. struct iscsi_wrb *pwrb = NULL;
  2727. unsigned int doorbell = 0;
  2728. struct iscsi_task *aborted_task;
  2729. pwrb = io_task->pwrb_handle->pwrb;
  2730. AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
  2731. be32_to_cpu(task->cmdsn));
  2732. AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
  2733. io_task->pwrb_handle->wrb_index);
  2734. AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
  2735. io_task->psgl_handle->sgl_index);
  2736. switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
  2737. case ISCSI_OP_LOGIN:
  2738. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD);
  2739. AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
  2740. AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
  2741. hwi_write_buffer(pwrb, task);
  2742. break;
  2743. case ISCSI_OP_NOOP_OUT:
  2744. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
  2745. hwi_write_buffer(pwrb, task);
  2746. break;
  2747. case ISCSI_OP_TEXT:
  2748. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
  2749. AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
  2750. hwi_write_buffer(pwrb, task);
  2751. break;
  2752. case ISCSI_OP_SCSI_TMFUNC:
  2753. aborted_task = iscsi_itt_to_task(conn,
  2754. ((struct iscsi_tm *)task->hdr)->rtt);
  2755. if (!aborted_task)
  2756. return 0;
  2757. aborted_io_task = aborted_task->dd_data;
  2758. if (!aborted_io_task->scsi_cmnd)
  2759. return 0;
  2760. mgmt_invalidate_icds(phba,
  2761. aborted_io_task->psgl_handle->sgl_index,
  2762. beiscsi_conn->beiscsi_conn_cid);
  2763. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD);
  2764. AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
  2765. hwi_write_buffer(pwrb, task);
  2766. break;
  2767. case ISCSI_OP_LOGOUT:
  2768. AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
  2769. AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
  2770. HWH_TYPE_LOGOUT);
  2771. hwi_write_buffer(pwrb, task);
  2772. break;
  2773. default:
  2774. SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
  2775. task->hdr->opcode & ISCSI_OPCODE_MASK);
  2776. return -EINVAL;
  2777. }
  2778. AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
  2779. be32_to_cpu(task->data_count));
  2780. AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
  2781. io_task->pwrb_handle->nxt_wrb_index);
  2782. be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
  2783. doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
  2784. doorbell |= (io_task->pwrb_handle->wrb_index &
  2785. DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
  2786. doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
  2787. iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
  2788. return 0;
  2789. }
  2790. static int beiscsi_task_xmit(struct iscsi_task *task)
  2791. {
  2792. struct iscsi_conn *conn = task->conn;
  2793. struct beiscsi_io_task *io_task = task->dd_data;
  2794. struct scsi_cmnd *sc = task->sc;
  2795. struct beiscsi_conn *beiscsi_conn = conn->dd_data;
  2796. struct scatterlist *sg;
  2797. int num_sg;
  2798. unsigned int writedir = 0, xferlen = 0;
  2799. SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
  2800. "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
  2801. task, conn, beiscsi_conn);
  2802. if (!sc)
  2803. return beiscsi_mtask(task);
  2804. io_task->scsi_cmnd = sc;
  2805. num_sg = scsi_dma_map(sc);
  2806. if (num_sg < 0) {
  2807. SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
  2808. return num_sg;
  2809. }
  2810. SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
  2811. (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
  2812. xferlen = scsi_bufflen(sc);
  2813. sg = scsi_sglist(sc);
  2814. if (sc->sc_data_direction == DMA_TO_DEVICE) {
  2815. writedir = 1;
  2816. SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
  2817. task->imm_count);
  2818. } else
  2819. writedir = 0;
  2820. return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
  2821. }
  2822. static void beiscsi_remove(struct pci_dev *pcidev)
  2823. {
  2824. struct beiscsi_hba *phba = NULL;
  2825. phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
  2826. if (!phba) {
  2827. dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
  2828. return;
  2829. }
  2830. hwi_disable_intr(phba);
  2831. if (phba->pcidev->irq)
  2832. free_irq(phba->pcidev->irq, phba);
  2833. destroy_workqueue(phba->wq);
  2834. if (blk_iopoll_enabled)
  2835. blk_iopoll_disable(&phba->iopoll);
  2836. beiscsi_clean_port(phba);
  2837. beiscsi_free_mem(phba);
  2838. beiscsi_unmap_pci_function(phba);
  2839. pci_free_consistent(phba->pcidev,
  2840. phba->ctrl.mbox_mem_alloced.size,
  2841. phba->ctrl.mbox_mem_alloced.va,
  2842. phba->ctrl.mbox_mem_alloced.dma);
  2843. iscsi_host_remove(phba->shost);
  2844. pci_dev_put(phba->pcidev);
  2845. iscsi_host_free(phba->shost);
  2846. }
  2847. static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
  2848. const struct pci_device_id *id)
  2849. {
  2850. struct beiscsi_hba *phba = NULL;
  2851. int ret;
  2852. ret = beiscsi_enable_pci(pcidev);
  2853. if (ret < 0) {
  2854. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2855. "Failed to enable pci device \n");
  2856. return ret;
  2857. }
  2858. phba = beiscsi_hba_alloc(pcidev);
  2859. if (!phba) {
  2860. dev_err(&pcidev->dev, "beiscsi_dev_probe-"
  2861. " Failed in beiscsi_hba_alloc \n");
  2862. goto disable_pci;
  2863. }
  2864. pci_set_drvdata(pcidev, phba);
  2865. ret = be_ctrl_init(phba, pcidev);
  2866. if (ret) {
  2867. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2868. "Failed in be_ctrl_init\n");
  2869. goto hba_free;
  2870. }
  2871. spin_lock_init(&phba->io_sgl_lock);
  2872. spin_lock_init(&phba->mgmt_sgl_lock);
  2873. spin_lock_init(&phba->isr_lock);
  2874. beiscsi_get_params(phba);
  2875. ret = beiscsi_init_port(phba);
  2876. if (ret < 0) {
  2877. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2878. "Failed in beiscsi_init_port\n");
  2879. goto free_port;
  2880. }
  2881. snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
  2882. phba->shost->host_no);
  2883. phba->wq = create_singlethread_workqueue(phba->wq_name);
  2884. if (!phba->wq) {
  2885. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2886. "Failed to allocate work queue\n");
  2887. goto free_twq;
  2888. }
  2889. INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
  2890. if (blk_iopoll_enabled) {
  2891. blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll);
  2892. blk_iopoll_enable(&phba->iopoll);
  2893. }
  2894. ret = beiscsi_init_irqs(phba);
  2895. if (ret < 0) {
  2896. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2897. "Failed to beiscsi_init_irqs\n");
  2898. goto free_blkenbld;
  2899. }
  2900. ret = hwi_enable_intr(phba);
  2901. if (ret < 0) {
  2902. shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
  2903. "Failed to hwi_enable_intr\n");
  2904. goto free_ctrlr;
  2905. }
  2906. SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
  2907. return 0;
  2908. free_ctrlr:
  2909. if (phba->pcidev->irq)
  2910. free_irq(phba->pcidev->irq, phba);
  2911. free_blkenbld:
  2912. destroy_workqueue(phba->wq);
  2913. if (blk_iopoll_enabled)
  2914. blk_iopoll_disable(&phba->iopoll);
  2915. free_twq:
  2916. beiscsi_clean_port(phba);
  2917. beiscsi_free_mem(phba);
  2918. free_port:
  2919. pci_free_consistent(phba->pcidev,
  2920. phba->ctrl.mbox_mem_alloced.size,
  2921. phba->ctrl.mbox_mem_alloced.va,
  2922. phba->ctrl.mbox_mem_alloced.dma);
  2923. beiscsi_unmap_pci_function(phba);
  2924. hba_free:
  2925. iscsi_host_remove(phba->shost);
  2926. pci_dev_put(phba->pcidev);
  2927. iscsi_host_free(phba->shost);
  2928. disable_pci:
  2929. pci_disable_device(pcidev);
  2930. return ret;
  2931. }
  2932. struct iscsi_transport beiscsi_iscsi_transport = {
  2933. .owner = THIS_MODULE,
  2934. .name = DRV_NAME,
  2935. .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
  2936. CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
  2937. .param_mask = ISCSI_MAX_RECV_DLENGTH |
  2938. ISCSI_MAX_XMIT_DLENGTH |
  2939. ISCSI_HDRDGST_EN |
  2940. ISCSI_DATADGST_EN |
  2941. ISCSI_INITIAL_R2T_EN |
  2942. ISCSI_MAX_R2T |
  2943. ISCSI_IMM_DATA_EN |
  2944. ISCSI_FIRST_BURST |
  2945. ISCSI_MAX_BURST |
  2946. ISCSI_PDU_INORDER_EN |
  2947. ISCSI_DATASEQ_INORDER_EN |
  2948. ISCSI_ERL |
  2949. ISCSI_CONN_PORT |
  2950. ISCSI_CONN_ADDRESS |
  2951. ISCSI_EXP_STATSN |
  2952. ISCSI_PERSISTENT_PORT |
  2953. ISCSI_PERSISTENT_ADDRESS |
  2954. ISCSI_TARGET_NAME | ISCSI_TPGT |
  2955. ISCSI_USERNAME | ISCSI_PASSWORD |
  2956. ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
  2957. ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
  2958. ISCSI_LU_RESET_TMO |
  2959. ISCSI_PING_TMO | ISCSI_RECV_TMO |
  2960. ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
  2961. .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
  2962. ISCSI_HOST_INITIATOR_NAME,
  2963. .create_session = beiscsi_session_create,
  2964. .destroy_session = beiscsi_session_destroy,
  2965. .create_conn = beiscsi_conn_create,
  2966. .bind_conn = beiscsi_conn_bind,
  2967. .destroy_conn = iscsi_conn_teardown,
  2968. .set_param = beiscsi_set_param,
  2969. .get_conn_param = beiscsi_conn_get_param,
  2970. .get_session_param = iscsi_session_get_param,
  2971. .get_host_param = beiscsi_get_host_param,
  2972. .start_conn = beiscsi_conn_start,
  2973. .stop_conn = beiscsi_conn_stop,
  2974. .send_pdu = iscsi_conn_send_pdu,
  2975. .xmit_task = beiscsi_task_xmit,
  2976. .cleanup_task = beiscsi_cleanup_task,
  2977. .alloc_pdu = beiscsi_alloc_pdu,
  2978. .parse_pdu_itt = beiscsi_parse_pdu,
  2979. .get_stats = beiscsi_conn_get_stats,
  2980. .ep_connect = beiscsi_ep_connect,
  2981. .ep_poll = beiscsi_ep_poll,
  2982. .ep_disconnect = beiscsi_ep_disconnect,
  2983. .session_recovery_timedout = iscsi_session_recovery_timedout,
  2984. };
  2985. static struct pci_driver beiscsi_pci_driver = {
  2986. .name = DRV_NAME,
  2987. .probe = beiscsi_dev_probe,
  2988. .remove = beiscsi_remove,
  2989. .id_table = beiscsi_pci_id_table
  2990. };
  2991. static int __init beiscsi_module_init(void)
  2992. {
  2993. int ret;
  2994. beiscsi_scsi_transport =
  2995. iscsi_register_transport(&beiscsi_iscsi_transport);
  2996. if (!beiscsi_scsi_transport) {
  2997. SE_DEBUG(DBG_LVL_1,
  2998. "beiscsi_module_init - Unable to register beiscsi"
  2999. "transport.\n");
  3000. ret = -ENOMEM;
  3001. }
  3002. SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
  3003. &beiscsi_iscsi_transport);
  3004. ret = pci_register_driver(&beiscsi_pci_driver);
  3005. if (ret) {
  3006. SE_DEBUG(DBG_LVL_1,
  3007. "beiscsi_module_init - Unable to register"
  3008. "beiscsi pci driver.\n");
  3009. goto unregister_iscsi_transport;
  3010. }
  3011. return 0;
  3012. unregister_iscsi_transport:
  3013. iscsi_unregister_transport(&beiscsi_iscsi_transport);
  3014. return ret;
  3015. }
  3016. static void __exit beiscsi_module_exit(void)
  3017. {
  3018. pci_unregister_driver(&beiscsi_pci_driver);
  3019. iscsi_unregister_transport(&beiscsi_iscsi_transport);
  3020. }
  3021. module_init(beiscsi_module_init);
  3022. module_exit(beiscsi_module_exit);