vxge-traffic.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2009 Neterion Inc.
  13. ******************************************************************************/
  14. #include <linux/etherdevice.h>
  15. #include "vxge-traffic.h"
  16. #include "vxge-config.h"
  17. #include "vxge-main.h"
  18. /*
  19. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  20. * @vp: Virtual Path handle.
  21. *
  22. * Enable vpath interrupts. The function is to be executed the last in
  23. * vpath initialization sequence.
  24. *
  25. * See also: vxge_hw_vpath_intr_disable()
  26. */
  27. enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  28. {
  29. u64 val64;
  30. struct __vxge_hw_virtualpath *vpath;
  31. struct vxge_hw_vpath_reg __iomem *vp_reg;
  32. enum vxge_hw_status status = VXGE_HW_OK;
  33. if (vp == NULL) {
  34. status = VXGE_HW_ERR_INVALID_HANDLE;
  35. goto exit;
  36. }
  37. vpath = vp->vpath;
  38. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  39. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  40. goto exit;
  41. }
  42. vp_reg = vpath->vp_reg;
  43. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  44. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  45. &vp_reg->general_errors_reg);
  46. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  47. &vp_reg->pci_config_errors_reg);
  48. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  49. &vp_reg->mrpcim_to_vpath_alarm_reg);
  50. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  51. &vp_reg->srpcim_to_vpath_alarm_reg);
  52. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  53. &vp_reg->vpath_ppif_int_status);
  54. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  55. &vp_reg->srpcim_msg_to_vpath_reg);
  56. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  57. &vp_reg->vpath_pcipif_int_status);
  58. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  59. &vp_reg->prc_alarm_reg);
  60. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  61. &vp_reg->wrdma_alarm_status);
  62. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  63. &vp_reg->asic_ntwk_vp_err_reg);
  64. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  65. &vp_reg->xgmac_vp_int_status);
  66. val64 = readq(&vp_reg->vpath_general_int_status);
  67. /* Mask unwanted interrupts */
  68. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  69. &vp_reg->vpath_pcipif_int_mask);
  70. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  71. &vp_reg->srpcim_msg_to_vpath_mask);
  72. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  73. &vp_reg->srpcim_to_vpath_alarm_mask);
  74. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  75. &vp_reg->mrpcim_to_vpath_alarm_mask);
  76. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  77. &vp_reg->pci_config_errors_mask);
  78. /* Unmask the individual interrupts */
  79. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  80. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  81. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  82. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  83. &vp_reg->general_errors_mask);
  84. __vxge_hw_pio_mem_write32_upper(
  85. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  86. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  87. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  88. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  89. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  90. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  91. &vp_reg->kdfcctl_errors_mask);
  92. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  93. __vxge_hw_pio_mem_write32_upper(
  94. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  95. &vp_reg->prc_alarm_mask);
  96. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  97. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  98. if (vpath->hldev->first_vp_id != vpath->vp_id)
  99. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  100. &vp_reg->asic_ntwk_vp_err_mask);
  101. else
  102. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  103. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
  104. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
  105. &vp_reg->asic_ntwk_vp_err_mask);
  106. __vxge_hw_pio_mem_write32_upper(0,
  107. &vp_reg->vpath_general_int_mask);
  108. exit:
  109. return status;
  110. }
  111. /*
  112. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  113. * @vp: Virtual Path handle.
  114. *
  115. * Disable vpath interrupts. The function is to be executed the last in
  116. * vpath initialization sequence.
  117. *
  118. * See also: vxge_hw_vpath_intr_enable()
  119. */
  120. enum vxge_hw_status vxge_hw_vpath_intr_disable(
  121. struct __vxge_hw_vpath_handle *vp)
  122. {
  123. u64 val64;
  124. struct __vxge_hw_virtualpath *vpath;
  125. enum vxge_hw_status status = VXGE_HW_OK;
  126. struct vxge_hw_vpath_reg __iomem *vp_reg;
  127. if (vp == NULL) {
  128. status = VXGE_HW_ERR_INVALID_HANDLE;
  129. goto exit;
  130. }
  131. vpath = vp->vpath;
  132. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  133. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  134. goto exit;
  135. }
  136. vp_reg = vpath->vp_reg;
  137. __vxge_hw_pio_mem_write32_upper(
  138. (u32)VXGE_HW_INTR_MASK_ALL,
  139. &vp_reg->vpath_general_int_mask);
  140. val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
  141. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  142. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  143. &vp_reg->general_errors_mask);
  144. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  145. &vp_reg->pci_config_errors_mask);
  146. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  147. &vp_reg->mrpcim_to_vpath_alarm_mask);
  148. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  149. &vp_reg->srpcim_to_vpath_alarm_mask);
  150. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  151. &vp_reg->vpath_ppif_int_mask);
  152. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  153. &vp_reg->srpcim_msg_to_vpath_mask);
  154. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  155. &vp_reg->vpath_pcipif_int_mask);
  156. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  157. &vp_reg->wrdma_alarm_mask);
  158. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  159. &vp_reg->prc_alarm_mask);
  160. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  161. &vp_reg->xgmac_vp_int_mask);
  162. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  163. &vp_reg->asic_ntwk_vp_err_mask);
  164. exit:
  165. return status;
  166. }
  167. /**
  168. * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  169. * @channeh: Channel for rx or tx handle
  170. * @msix_id: MSIX ID
  171. *
  172. * The function masks the msix interrupt for the given msix_id
  173. *
  174. * Returns: 0
  175. */
  176. void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
  177. {
  178. __vxge_hw_pio_mem_write32_upper(
  179. (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
  180. 0, 32),
  181. &channel->common_reg->set_msix_mask_vect[msix_id%4]);
  182. return;
  183. }
  184. /**
  185. * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
  186. * @channeh: Channel for rx or tx handle
  187. * @msix_id: MSI ID
  188. *
  189. * The function unmasks the msix interrupt for the given msix_id
  190. *
  191. * Returns: 0
  192. */
  193. void
  194. vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
  195. {
  196. __vxge_hw_pio_mem_write32_upper(
  197. (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
  198. 0, 32),
  199. &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
  200. return;
  201. }
  202. /**
  203. * vxge_hw_device_set_intr_type - Updates the configuration
  204. * with new interrupt type.
  205. * @hldev: HW device handle.
  206. * @intr_mode: New interrupt type
  207. */
  208. u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
  209. {
  210. if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  211. (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  212. (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  213. (intr_mode != VXGE_HW_INTR_MODE_DEF))
  214. intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  215. hldev->config.intr_mode = intr_mode;
  216. return intr_mode;
  217. }
  218. /**
  219. * vxge_hw_device_intr_enable - Enable interrupts.
  220. * @hldev: HW device handle.
  221. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  222. * the type(s) of interrupts to enable.
  223. *
  224. * Enable Titan interrupts. The function is to be executed the last in
  225. * Titan initialization sequence.
  226. *
  227. * See also: vxge_hw_device_intr_disable()
  228. */
  229. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  230. {
  231. u32 i;
  232. u64 val64;
  233. u32 val32;
  234. vxge_hw_device_mask_all(hldev);
  235. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  236. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  237. continue;
  238. vxge_hw_vpath_intr_enable(
  239. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  240. }
  241. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
  242. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  243. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  244. if (val64 != 0) {
  245. writeq(val64, &hldev->common_reg->tim_int_status0);
  246. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  247. }
  248. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  249. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  250. if (val32 != 0) {
  251. __vxge_hw_pio_mem_write32_upper(val32,
  252. &hldev->common_reg->tim_int_status1);
  253. __vxge_hw_pio_mem_write32_upper(~val32,
  254. &hldev->common_reg->tim_int_mask1);
  255. }
  256. }
  257. val64 = readq(&hldev->common_reg->titan_general_int_status);
  258. vxge_hw_device_unmask_all(hldev);
  259. return;
  260. }
  261. /**
  262. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  263. * @hldev: HW device handle.
  264. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  265. * the type(s) of interrupts to disable.
  266. *
  267. * Disable Titan interrupts.
  268. *
  269. * See also: vxge_hw_device_intr_enable()
  270. */
  271. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  272. {
  273. u32 i;
  274. vxge_hw_device_mask_all(hldev);
  275. /* mask all the tim interrupts */
  276. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  277. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  278. &hldev->common_reg->tim_int_mask1);
  279. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  280. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  281. continue;
  282. vxge_hw_vpath_intr_disable(
  283. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  284. }
  285. return;
  286. }
  287. /**
  288. * vxge_hw_device_mask_all - Mask all device interrupts.
  289. * @hldev: HW device handle.
  290. *
  291. * Mask all device interrupts.
  292. *
  293. * See also: vxge_hw_device_unmask_all()
  294. */
  295. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  296. {
  297. u64 val64;
  298. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  299. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  300. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  301. &hldev->common_reg->titan_mask_all_int);
  302. return;
  303. }
  304. /**
  305. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  306. * @hldev: HW device handle.
  307. *
  308. * Unmask all device interrupts.
  309. *
  310. * See also: vxge_hw_device_mask_all()
  311. */
  312. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  313. {
  314. u64 val64 = 0;
  315. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
  316. val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  317. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  318. &hldev->common_reg->titan_mask_all_int);
  319. return;
  320. }
  321. /**
  322. * vxge_hw_device_flush_io - Flush io writes.
  323. * @hldev: HW device handle.
  324. *
  325. * The function performs a read operation to flush io writes.
  326. *
  327. * Returns: void
  328. */
  329. void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
  330. {
  331. u32 val32;
  332. val32 = readl(&hldev->common_reg->titan_general_int_status);
  333. }
  334. /**
  335. * vxge_hw_device_begin_irq - Begin IRQ processing.
  336. * @hldev: HW device handle.
  337. * @skip_alarms: Do not clear the alarms
  338. * @reason: "Reason" for the interrupt, the value of Titan's
  339. * general_int_status register.
  340. *
  341. * The function performs two actions, It first checks whether (shared IRQ) the
  342. * interrupt was raised by the device. Next, it masks the device interrupts.
  343. *
  344. * Note:
  345. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  346. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  347. *
  348. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  349. * device remain enabled).
  350. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  351. * status.
  352. */
  353. enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
  354. u32 skip_alarms, u64 *reason)
  355. {
  356. u32 i;
  357. u64 val64;
  358. u64 adapter_status;
  359. u64 vpath_mask;
  360. enum vxge_hw_status ret = VXGE_HW_OK;
  361. val64 = readq(&hldev->common_reg->titan_general_int_status);
  362. if (unlikely(!val64)) {
  363. /* not Titan interrupt */
  364. *reason = 0;
  365. ret = VXGE_HW_ERR_WRONG_IRQ;
  366. goto exit;
  367. }
  368. if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
  369. adapter_status = readq(&hldev->common_reg->adapter_status);
  370. if (adapter_status == VXGE_HW_ALL_FOXES) {
  371. __vxge_hw_device_handle_error(hldev,
  372. NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
  373. *reason = 0;
  374. ret = VXGE_HW_ERR_SLOT_FREEZE;
  375. goto exit;
  376. }
  377. }
  378. hldev->stats.sw_dev_info_stats.total_intr_cnt++;
  379. *reason = val64;
  380. vpath_mask = hldev->vpaths_deployed >>
  381. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  382. if (val64 &
  383. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
  384. hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
  385. return VXGE_HW_OK;
  386. }
  387. hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
  388. if (unlikely(val64 &
  389. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
  390. enum vxge_hw_status error_level = VXGE_HW_OK;
  391. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  392. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  393. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  394. continue;
  395. ret = __vxge_hw_vpath_alarm_process(
  396. &hldev->virtual_paths[i], skip_alarms);
  397. error_level = VXGE_HW_SET_LEVEL(ret, error_level);
  398. if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
  399. (ret == VXGE_HW_ERR_SLOT_FREEZE)))
  400. break;
  401. }
  402. ret = error_level;
  403. }
  404. exit:
  405. return ret;
  406. }
  407. /*
  408. * __vxge_hw_device_handle_link_up_ind
  409. * @hldev: HW device handle.
  410. *
  411. * Link up indication handler. The function is invoked by HW when
  412. * Titan indicates that the link is up for programmable amount of time.
  413. */
  414. enum vxge_hw_status
  415. __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
  416. {
  417. /*
  418. * If the previous link state is not down, return.
  419. */
  420. if (hldev->link_state == VXGE_HW_LINK_UP)
  421. goto exit;
  422. hldev->link_state = VXGE_HW_LINK_UP;
  423. /* notify driver */
  424. if (hldev->uld_callbacks.link_up)
  425. hldev->uld_callbacks.link_up(hldev);
  426. exit:
  427. return VXGE_HW_OK;
  428. }
  429. /*
  430. * __vxge_hw_device_handle_link_down_ind
  431. * @hldev: HW device handle.
  432. *
  433. * Link down indication handler. The function is invoked by HW when
  434. * Titan indicates that the link is down.
  435. */
  436. enum vxge_hw_status
  437. __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
  438. {
  439. /*
  440. * If the previous link state is not down, return.
  441. */
  442. if (hldev->link_state == VXGE_HW_LINK_DOWN)
  443. goto exit;
  444. hldev->link_state = VXGE_HW_LINK_DOWN;
  445. /* notify driver */
  446. if (hldev->uld_callbacks.link_down)
  447. hldev->uld_callbacks.link_down(hldev);
  448. exit:
  449. return VXGE_HW_OK;
  450. }
  451. /**
  452. * __vxge_hw_device_handle_error - Handle error
  453. * @hldev: HW device
  454. * @vp_id: Vpath Id
  455. * @type: Error type. Please see enum vxge_hw_event{}
  456. *
  457. * Handle error.
  458. */
  459. enum vxge_hw_status
  460. __vxge_hw_device_handle_error(
  461. struct __vxge_hw_device *hldev,
  462. u32 vp_id,
  463. enum vxge_hw_event type)
  464. {
  465. switch (type) {
  466. case VXGE_HW_EVENT_UNKNOWN:
  467. break;
  468. case VXGE_HW_EVENT_RESET_START:
  469. case VXGE_HW_EVENT_RESET_COMPLETE:
  470. case VXGE_HW_EVENT_LINK_DOWN:
  471. case VXGE_HW_EVENT_LINK_UP:
  472. goto out;
  473. case VXGE_HW_EVENT_ALARM_CLEARED:
  474. goto out;
  475. case VXGE_HW_EVENT_ECCERR:
  476. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  477. goto out;
  478. case VXGE_HW_EVENT_FIFO_ERR:
  479. case VXGE_HW_EVENT_VPATH_ERR:
  480. case VXGE_HW_EVENT_CRITICAL_ERR:
  481. case VXGE_HW_EVENT_SERR:
  482. break;
  483. case VXGE_HW_EVENT_SRPCIM_SERR:
  484. case VXGE_HW_EVENT_MRPCIM_SERR:
  485. goto out;
  486. case VXGE_HW_EVENT_SLOT_FREEZE:
  487. break;
  488. default:
  489. vxge_assert(0);
  490. goto out;
  491. }
  492. /* notify driver */
  493. if (hldev->uld_callbacks.crit_err)
  494. hldev->uld_callbacks.crit_err(
  495. (struct __vxge_hw_device *)hldev,
  496. type, vp_id);
  497. out:
  498. return VXGE_HW_OK;
  499. }
  500. /**
  501. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  502. * condition that has caused the Tx and RX interrupt.
  503. * @hldev: HW device.
  504. *
  505. * Acknowledge (that is, clear) the condition that has caused
  506. * the Tx and Rx interrupt.
  507. * See also: vxge_hw_device_begin_irq(),
  508. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  509. */
  510. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  511. {
  512. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  513. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  514. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  515. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  516. &hldev->common_reg->tim_int_status0);
  517. }
  518. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  519. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  520. __vxge_hw_pio_mem_write32_upper(
  521. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  522. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  523. &hldev->common_reg->tim_int_status1);
  524. }
  525. return;
  526. }
  527. /*
  528. * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
  529. * @channel: Channel
  530. * @dtrh: Buffer to return the DTR pointer
  531. *
  532. * Allocates a dtr from the reserve array. If the reserve array is empty,
  533. * it swaps the reserve and free arrays.
  534. *
  535. */
  536. enum vxge_hw_status
  537. vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
  538. {
  539. void **tmp_arr;
  540. if (channel->reserve_ptr - channel->reserve_top > 0) {
  541. _alloc_after_swap:
  542. *dtrh = channel->reserve_arr[--channel->reserve_ptr];
  543. return VXGE_HW_OK;
  544. }
  545. /* switch between empty and full arrays */
  546. /* the idea behind such a design is that by having free and reserved
  547. * arrays separated we basically separated irq and non-irq parts.
  548. * i.e. no additional lock need to be done when we free a resource */
  549. if (channel->length - channel->free_ptr > 0) {
  550. tmp_arr = channel->reserve_arr;
  551. channel->reserve_arr = channel->free_arr;
  552. channel->free_arr = tmp_arr;
  553. channel->reserve_ptr = channel->length;
  554. channel->reserve_top = channel->free_ptr;
  555. channel->free_ptr = channel->length;
  556. channel->stats->reserve_free_swaps_cnt++;
  557. goto _alloc_after_swap;
  558. }
  559. channel->stats->full_cnt++;
  560. *dtrh = NULL;
  561. return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
  562. }
  563. /*
  564. * vxge_hw_channel_dtr_post - Post a dtr to the channel
  565. * @channelh: Channel
  566. * @dtrh: DTR pointer
  567. *
  568. * Posts a dtr to work array.
  569. *
  570. */
  571. void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
  572. {
  573. vxge_assert(channel->work_arr[channel->post_index] == NULL);
  574. channel->work_arr[channel->post_index++] = dtrh;
  575. /* wrap-around */
  576. if (channel->post_index == channel->length)
  577. channel->post_index = 0;
  578. }
  579. /*
  580. * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
  581. * @channel: Channel
  582. * @dtr: Buffer to return the next completed DTR pointer
  583. *
  584. * Returns the next completed dtr with out removing it from work array
  585. *
  586. */
  587. void
  588. vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
  589. {
  590. vxge_assert(channel->compl_index < channel->length);
  591. *dtrh = channel->work_arr[channel->compl_index];
  592. prefetch(*dtrh);
  593. }
  594. /*
  595. * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
  596. * @channel: Channel handle
  597. *
  598. * Removes the next completed dtr from work array
  599. *
  600. */
  601. void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
  602. {
  603. channel->work_arr[channel->compl_index] = NULL;
  604. /* wrap-around */
  605. if (++channel->compl_index == channel->length)
  606. channel->compl_index = 0;
  607. channel->stats->total_compl_cnt++;
  608. }
  609. /*
  610. * vxge_hw_channel_dtr_free - Frees a dtr
  611. * @channel: Channel handle
  612. * @dtr: DTR pointer
  613. *
  614. * Returns the dtr to free array
  615. *
  616. */
  617. void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
  618. {
  619. channel->free_arr[--channel->free_ptr] = dtrh;
  620. }
  621. /*
  622. * vxge_hw_channel_dtr_count
  623. * @channel: Channel handle. Obtained via vxge_hw_channel_open().
  624. *
  625. * Retreive number of DTRs available. This function can not be called
  626. * from data path. ring_initial_replenishi() is the only user.
  627. */
  628. int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
  629. {
  630. return (channel->reserve_ptr - channel->reserve_top) +
  631. (channel->length - channel->free_ptr);
  632. }
  633. /**
  634. * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
  635. * @ring: Handle to the ring object used for receive
  636. * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
  637. * with a valid handle.
  638. *
  639. * Reserve Rx descriptor for the subsequent filling-in driver
  640. * and posting on the corresponding channel (@channelh)
  641. * via vxge_hw_ring_rxd_post().
  642. *
  643. * Returns: VXGE_HW_OK - success.
  644. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
  645. *
  646. */
  647. enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
  648. void **rxdh)
  649. {
  650. enum vxge_hw_status status;
  651. struct __vxge_hw_channel *channel;
  652. channel = &ring->channel;
  653. status = vxge_hw_channel_dtr_alloc(channel, rxdh);
  654. if (status == VXGE_HW_OK) {
  655. struct vxge_hw_ring_rxd_1 *rxdp =
  656. (struct vxge_hw_ring_rxd_1 *)*rxdh;
  657. rxdp->control_0 = rxdp->control_1 = 0;
  658. }
  659. return status;
  660. }
  661. /**
  662. * vxge_hw_ring_rxd_free - Free descriptor.
  663. * @ring: Handle to the ring object used for receive
  664. * @rxdh: Descriptor handle.
  665. *
  666. * Free the reserved descriptor. This operation is "symmetrical" to
  667. * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
  668. * lifecycle.
  669. *
  670. * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
  671. * be:
  672. *
  673. * - reserved (vxge_hw_ring_rxd_reserve);
  674. *
  675. * - posted (vxge_hw_ring_rxd_post);
  676. *
  677. * - completed (vxge_hw_ring_rxd_next_completed);
  678. *
  679. * - and recycled again (vxge_hw_ring_rxd_free).
  680. *
  681. * For alternative state transitions and more details please refer to
  682. * the design doc.
  683. *
  684. */
  685. void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
  686. {
  687. struct __vxge_hw_channel *channel;
  688. channel = &ring->channel;
  689. vxge_hw_channel_dtr_free(channel, rxdh);
  690. }
  691. /**
  692. * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
  693. * @ring: Handle to the ring object used for receive
  694. * @rxdh: Descriptor handle.
  695. *
  696. * This routine prepares a rxd and posts
  697. */
  698. void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
  699. {
  700. struct __vxge_hw_channel *channel;
  701. channel = &ring->channel;
  702. vxge_hw_channel_dtr_post(channel, rxdh);
  703. }
  704. /**
  705. * vxge_hw_ring_rxd_post_post - Process rxd after post.
  706. * @ring: Handle to the ring object used for receive
  707. * @rxdh: Descriptor handle.
  708. *
  709. * Processes rxd after post
  710. */
  711. void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
  712. {
  713. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  714. struct __vxge_hw_channel *channel;
  715. channel = &ring->channel;
  716. rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  717. if (ring->stats->common_stats.usage_cnt > 0)
  718. ring->stats->common_stats.usage_cnt--;
  719. }
  720. /**
  721. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  722. * @ring: Handle to the ring object used for receive
  723. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  724. *
  725. * Post descriptor on the ring.
  726. * Prior to posting the descriptor should be filled in accordance with
  727. * Host/Titan interface specification for a given service (LL, etc.).
  728. *
  729. */
  730. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  731. {
  732. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  733. struct __vxge_hw_channel *channel;
  734. channel = &ring->channel;
  735. wmb();
  736. rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  737. vxge_hw_channel_dtr_post(channel, rxdh);
  738. if (ring->stats->common_stats.usage_cnt > 0)
  739. ring->stats->common_stats.usage_cnt--;
  740. }
  741. /**
  742. * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
  743. * @ring: Handle to the ring object used for receive
  744. * @rxdh: Descriptor handle.
  745. *
  746. * Processes rxd after post with memory barrier.
  747. */
  748. void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
  749. {
  750. struct __vxge_hw_channel *channel;
  751. channel = &ring->channel;
  752. wmb();
  753. vxge_hw_ring_rxd_post_post(ring, rxdh);
  754. }
  755. /**
  756. * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
  757. * @ring: Handle to the ring object used for receive
  758. * @rxdh: Descriptor handle. Returned by HW.
  759. * @t_code: Transfer code, as per Titan User Guide,
  760. * Receive Descriptor Format. Returned by HW.
  761. *
  762. * Retrieve the _next_ completed descriptor.
  763. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
  764. * driver of new completed descriptors. After that
  765. * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
  766. * completions (the very first completion is passed by HW via
  767. * vxge_hw_ring_callback_f).
  768. *
  769. * Implementation-wise, the driver is free to call
  770. * vxge_hw_ring_rxd_next_completed either immediately from inside the
  771. * ring callback, or in a deferred fashion and separate (from HW)
  772. * context.
  773. *
  774. * Non-zero @t_code means failure to fill-in receive buffer(s)
  775. * of the descriptor.
  776. * For instance, parity error detected during the data transfer.
  777. * In this case Titan will complete the descriptor and indicate
  778. * for the host that the received data is not to be used.
  779. * For details please refer to Titan User Guide.
  780. *
  781. * Returns: VXGE_HW_OK - success.
  782. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  783. * are currently available for processing.
  784. *
  785. * See also: vxge_hw_ring_callback_f{},
  786. * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
  787. */
  788. enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
  789. struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
  790. {
  791. struct __vxge_hw_channel *channel;
  792. struct vxge_hw_ring_rxd_1 *rxdp;
  793. enum vxge_hw_status status = VXGE_HW_OK;
  794. channel = &ring->channel;
  795. vxge_hw_channel_dtr_try_complete(channel, rxdh);
  796. rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
  797. if (rxdp == NULL) {
  798. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  799. goto exit;
  800. }
  801. /* check whether it is not the end */
  802. if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
  803. vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
  804. 0);
  805. ++ring->cmpl_cnt;
  806. vxge_hw_channel_dtr_complete(channel);
  807. *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
  808. vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
  809. ring->stats->common_stats.usage_cnt++;
  810. if (ring->stats->common_stats.usage_max <
  811. ring->stats->common_stats.usage_cnt)
  812. ring->stats->common_stats.usage_max =
  813. ring->stats->common_stats.usage_cnt;
  814. status = VXGE_HW_OK;
  815. goto exit;
  816. }
  817. /* reset it. since we don't want to return
  818. * garbage to the driver */
  819. *rxdh = NULL;
  820. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  821. exit:
  822. return status;
  823. }
  824. /**
  825. * vxge_hw_ring_handle_tcode - Handle transfer code.
  826. * @ring: Handle to the ring object used for receive
  827. * @rxdh: Descriptor handle.
  828. * @t_code: One of the enumerated (and documented in the Titan user guide)
  829. * "transfer codes".
  830. *
  831. * Handle descriptor's transfer code. The latter comes with each completed
  832. * descriptor.
  833. *
  834. * Returns: one of the enum vxge_hw_status{} enumerated types.
  835. * VXGE_HW_OK - for success.
  836. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  837. */
  838. enum vxge_hw_status vxge_hw_ring_handle_tcode(
  839. struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
  840. {
  841. struct __vxge_hw_channel *channel;
  842. enum vxge_hw_status status = VXGE_HW_OK;
  843. channel = &ring->channel;
  844. /* If the t_code is not supported and if the
  845. * t_code is other than 0x5 (unparseable packet
  846. * such as unknown UPV6 header), Drop it !!!
  847. */
  848. if (t_code == 0 || t_code == 5) {
  849. status = VXGE_HW_OK;
  850. goto exit;
  851. }
  852. if (t_code > 0xF) {
  853. status = VXGE_HW_ERR_INVALID_TCODE;
  854. goto exit;
  855. }
  856. ring->stats->rxd_t_code_err_cnt[t_code]++;
  857. exit:
  858. return status;
  859. }
  860. /**
  861. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  862. *
  863. * @fifo: fifohandle
  864. * @txdl_ptr: The starting location of the TxDL in host memory
  865. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  866. * @no_snoop: No snoop flags
  867. *
  868. * This function posts a non-offload doorbell to doorbell FIFO
  869. *
  870. */
  871. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  872. u64 txdl_ptr, u32 num_txds, u32 no_snoop)
  873. {
  874. struct __vxge_hw_channel *channel;
  875. channel = &fifo->channel;
  876. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  877. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
  878. VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
  879. &fifo->nofl_db->control_0);
  880. mmiowb();
  881. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  882. mmiowb();
  883. }
  884. /**
  885. * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
  886. * the fifo
  887. * @fifoh: Handle to the fifo object used for non offload send
  888. */
  889. u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
  890. {
  891. return vxge_hw_channel_dtr_count(&fifoh->channel);
  892. }
  893. /**
  894. * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
  895. * @fifoh: Handle to the fifo object used for non offload send
  896. * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
  897. * with a valid handle.
  898. * @txdl_priv: Buffer to return the pointer to per txdl space
  899. *
  900. * Reserve a single TxDL (that is, fifo descriptor)
  901. * for the subsequent filling-in by driver)
  902. * and posting on the corresponding channel (@channelh)
  903. * via vxge_hw_fifo_txdl_post().
  904. *
  905. * Note: it is the responsibility of driver to reserve multiple descriptors
  906. * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
  907. * carries up to configured number (fifo.max_frags) of contiguous buffers.
  908. *
  909. * Returns: VXGE_HW_OK - success;
  910. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
  911. *
  912. */
  913. enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
  914. struct __vxge_hw_fifo *fifo,
  915. void **txdlh, void **txdl_priv)
  916. {
  917. struct __vxge_hw_channel *channel;
  918. enum vxge_hw_status status;
  919. int i;
  920. channel = &fifo->channel;
  921. status = vxge_hw_channel_dtr_alloc(channel, txdlh);
  922. if (status == VXGE_HW_OK) {
  923. struct vxge_hw_fifo_txd *txdp =
  924. (struct vxge_hw_fifo_txd *)*txdlh;
  925. struct __vxge_hw_fifo_txdl_priv *priv;
  926. priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  927. /* reset the TxDL's private */
  928. priv->align_dma_offset = 0;
  929. priv->align_vaddr_start = priv->align_vaddr;
  930. priv->align_used_frags = 0;
  931. priv->frags = 0;
  932. priv->alloc_frags = fifo->config->max_frags;
  933. priv->next_txdl_priv = NULL;
  934. *txdl_priv = (void *)(size_t)txdp->host_control;
  935. for (i = 0; i < fifo->config->max_frags; i++) {
  936. txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
  937. txdp->control_0 = txdp->control_1 = 0;
  938. }
  939. }
  940. return status;
  941. }
  942. /**
  943. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  944. * descriptor.
  945. * @fifo: Handle to the fifo object used for non offload send
  946. * @txdlh: Descriptor handle.
  947. * @frag_idx: Index of the data buffer in the caller's scatter-gather list
  948. * (of buffers).
  949. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
  950. * @size: Size of the data buffer (in bytes).
  951. *
  952. * This API is part of the preparation of the transmit descriptor for posting
  953. * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
  954. * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
  955. * All three APIs fill in the fields of the fifo descriptor,
  956. * in accordance with the Titan specification.
  957. *
  958. */
  959. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  960. void *txdlh, u32 frag_idx,
  961. dma_addr_t dma_pointer, u32 size)
  962. {
  963. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  964. struct vxge_hw_fifo_txd *txdp, *txdp_last;
  965. struct __vxge_hw_channel *channel;
  966. channel = &fifo->channel;
  967. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  968. txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
  969. if (frag_idx != 0)
  970. txdp->control_0 = txdp->control_1 = 0;
  971. else {
  972. txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  973. VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
  974. txdp->control_1 |= fifo->interrupt_type;
  975. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
  976. fifo->tx_intr_num);
  977. if (txdl_priv->frags) {
  978. txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
  979. (txdl_priv->frags - 1);
  980. txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  981. VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  982. }
  983. }
  984. vxge_assert(frag_idx < txdl_priv->alloc_frags);
  985. txdp->buffer_pointer = (u64)dma_pointer;
  986. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
  987. fifo->stats->total_buffers++;
  988. txdl_priv->frags++;
  989. }
  990. /**
  991. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  992. * @fifo: Handle to the fifo object used for non offload send
  993. * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
  994. * @frags: Number of contiguous buffers that are part of a single
  995. * transmit operation.
  996. *
  997. * Post descriptor on the 'fifo' type channel for transmission.
  998. * Prior to posting the descriptor should be filled in accordance with
  999. * Host/Titan interface specification for a given service (LL, etc.).
  1000. *
  1001. */
  1002. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
  1003. {
  1004. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1005. struct vxge_hw_fifo_txd *txdp_last;
  1006. struct vxge_hw_fifo_txd *txdp_first;
  1007. struct __vxge_hw_channel *channel;
  1008. channel = &fifo->channel;
  1009. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1010. txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
  1011. txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
  1012. txdp_last->control_0 |=
  1013. VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1014. txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  1015. vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
  1016. __vxge_hw_non_offload_db_post(fifo,
  1017. (u64)txdl_priv->dma_addr,
  1018. txdl_priv->frags - 1,
  1019. fifo->no_snoop_bits);
  1020. fifo->stats->total_posts++;
  1021. fifo->stats->common_stats.usage_cnt++;
  1022. if (fifo->stats->common_stats.usage_max <
  1023. fifo->stats->common_stats.usage_cnt)
  1024. fifo->stats->common_stats.usage_max =
  1025. fifo->stats->common_stats.usage_cnt;
  1026. }
  1027. /**
  1028. * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
  1029. * @fifo: Handle to the fifo object used for non offload send
  1030. * @txdlh: Descriptor handle. Returned by HW.
  1031. * @t_code: Transfer code, as per Titan User Guide,
  1032. * Transmit Descriptor Format.
  1033. * Returned by HW.
  1034. *
  1035. * Retrieve the _next_ completed descriptor.
  1036. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
  1037. * driver of new completed descriptors. After that
  1038. * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
  1039. * completions (the very first completion is passed by HW via
  1040. * vxge_hw_channel_callback_f).
  1041. *
  1042. * Implementation-wise, the driver is free to call
  1043. * vxge_hw_fifo_txdl_next_completed either immediately from inside the
  1044. * channel callback, or in a deferred fashion and separate (from HW)
  1045. * context.
  1046. *
  1047. * Non-zero @t_code means failure to process the descriptor.
  1048. * The failure could happen, for instance, when the link is
  1049. * down, in which case Titan completes the descriptor because it
  1050. * is not able to send the data out.
  1051. *
  1052. * For details please refer to Titan User Guide.
  1053. *
  1054. * Returns: VXGE_HW_OK - success.
  1055. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1056. * are currently available for processing.
  1057. *
  1058. */
  1059. enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
  1060. struct __vxge_hw_fifo *fifo, void **txdlh,
  1061. enum vxge_hw_fifo_tcode *t_code)
  1062. {
  1063. struct __vxge_hw_channel *channel;
  1064. struct vxge_hw_fifo_txd *txdp;
  1065. enum vxge_hw_status status = VXGE_HW_OK;
  1066. channel = &fifo->channel;
  1067. vxge_hw_channel_dtr_try_complete(channel, txdlh);
  1068. txdp = (struct vxge_hw_fifo_txd *)*txdlh;
  1069. if (txdp == NULL) {
  1070. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1071. goto exit;
  1072. }
  1073. /* check whether host owns it */
  1074. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
  1075. vxge_assert(txdp->host_control != 0);
  1076. vxge_hw_channel_dtr_complete(channel);
  1077. *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
  1078. if (fifo->stats->common_stats.usage_cnt > 0)
  1079. fifo->stats->common_stats.usage_cnt--;
  1080. status = VXGE_HW_OK;
  1081. goto exit;
  1082. }
  1083. /* no more completions */
  1084. *txdlh = NULL;
  1085. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1086. exit:
  1087. return status;
  1088. }
  1089. /**
  1090. * vxge_hw_fifo_handle_tcode - Handle transfer code.
  1091. * @fifo: Handle to the fifo object used for non offload send
  1092. * @txdlh: Descriptor handle.
  1093. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1094. * "transfer codes".
  1095. *
  1096. * Handle descriptor's transfer code. The latter comes with each completed
  1097. * descriptor.
  1098. *
  1099. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1100. * VXGE_HW_OK - for success.
  1101. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1102. */
  1103. enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
  1104. void *txdlh,
  1105. enum vxge_hw_fifo_tcode t_code)
  1106. {
  1107. struct __vxge_hw_channel *channel;
  1108. enum vxge_hw_status status = VXGE_HW_OK;
  1109. channel = &fifo->channel;
  1110. if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
  1111. status = VXGE_HW_ERR_INVALID_TCODE;
  1112. goto exit;
  1113. }
  1114. fifo->stats->txd_t_code_err_cnt[t_code]++;
  1115. exit:
  1116. return status;
  1117. }
  1118. /**
  1119. * vxge_hw_fifo_txdl_free - Free descriptor.
  1120. * @fifo: Handle to the fifo object used for non offload send
  1121. * @txdlh: Descriptor handle.
  1122. *
  1123. * Free the reserved descriptor. This operation is "symmetrical" to
  1124. * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
  1125. * lifecycle.
  1126. *
  1127. * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
  1128. * be:
  1129. *
  1130. * - reserved (vxge_hw_fifo_txdl_reserve);
  1131. *
  1132. * - posted (vxge_hw_fifo_txdl_post);
  1133. *
  1134. * - completed (vxge_hw_fifo_txdl_next_completed);
  1135. *
  1136. * - and recycled again (vxge_hw_fifo_txdl_free).
  1137. *
  1138. * For alternative state transitions and more details please refer to
  1139. * the design doc.
  1140. *
  1141. */
  1142. void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
  1143. {
  1144. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1145. u32 max_frags;
  1146. struct __vxge_hw_channel *channel;
  1147. channel = &fifo->channel;
  1148. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
  1149. (struct vxge_hw_fifo_txd *)txdlh);
  1150. max_frags = fifo->config->max_frags;
  1151. vxge_hw_channel_dtr_free(channel, txdlh);
  1152. }
  1153. /**
  1154. * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
  1155. * to MAC address table.
  1156. * @vp: Vpath handle.
  1157. * @macaddr: MAC address to be added for this vpath into the list
  1158. * @macaddr_mask: MAC address mask for macaddr
  1159. * @duplicate_mode: Duplicate MAC address add mode. Please see
  1160. * enum vxge_hw_vpath_mac_addr_add_mode{}
  1161. *
  1162. * Adds the given mac address and mac address mask into the list for this
  1163. * vpath.
  1164. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
  1165. * vxge_hw_vpath_mac_addr_get_next
  1166. *
  1167. */
  1168. enum vxge_hw_status
  1169. vxge_hw_vpath_mac_addr_add(
  1170. struct __vxge_hw_vpath_handle *vp,
  1171. u8 (macaddr)[ETH_ALEN],
  1172. u8 (macaddr_mask)[ETH_ALEN],
  1173. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
  1174. {
  1175. u32 i;
  1176. u64 data1 = 0ULL;
  1177. u64 data2 = 0ULL;
  1178. enum vxge_hw_status status = VXGE_HW_OK;
  1179. if (vp == NULL) {
  1180. status = VXGE_HW_ERR_INVALID_HANDLE;
  1181. goto exit;
  1182. }
  1183. for (i = 0; i < ETH_ALEN; i++) {
  1184. data1 <<= 8;
  1185. data1 |= (u8)macaddr[i];
  1186. data2 <<= 8;
  1187. data2 |= (u8)macaddr_mask[i];
  1188. }
  1189. switch (duplicate_mode) {
  1190. case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
  1191. i = 0;
  1192. break;
  1193. case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
  1194. i = 1;
  1195. break;
  1196. case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
  1197. i = 2;
  1198. break;
  1199. default:
  1200. i = 0;
  1201. break;
  1202. }
  1203. status = __vxge_hw_vpath_rts_table_set(vp,
  1204. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1205. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1206. 0,
  1207. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1208. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
  1209. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
  1210. exit:
  1211. return status;
  1212. }
  1213. /**
  1214. * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
  1215. * from MAC address table.
  1216. * @vp: Vpath handle.
  1217. * @macaddr: First MAC address entry for this vpath in the list
  1218. * @macaddr_mask: MAC address mask for macaddr
  1219. *
  1220. * Returns the first mac address and mac address mask in the list for this
  1221. * vpath.
  1222. * see also: vxge_hw_vpath_mac_addr_get_next
  1223. *
  1224. */
  1225. enum vxge_hw_status
  1226. vxge_hw_vpath_mac_addr_get(
  1227. struct __vxge_hw_vpath_handle *vp,
  1228. u8 (macaddr)[ETH_ALEN],
  1229. u8 (macaddr_mask)[ETH_ALEN])
  1230. {
  1231. u32 i;
  1232. u64 data1 = 0ULL;
  1233. u64 data2 = 0ULL;
  1234. enum vxge_hw_status status = VXGE_HW_OK;
  1235. if (vp == NULL) {
  1236. status = VXGE_HW_ERR_INVALID_HANDLE;
  1237. goto exit;
  1238. }
  1239. status = __vxge_hw_vpath_rts_table_get(vp,
  1240. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1241. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1242. 0, &data1, &data2);
  1243. if (status != VXGE_HW_OK)
  1244. goto exit;
  1245. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1246. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1247. for (i = ETH_ALEN; i > 0; i--) {
  1248. macaddr[i-1] = (u8)(data1 & 0xFF);
  1249. data1 >>= 8;
  1250. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1251. data2 >>= 8;
  1252. }
  1253. exit:
  1254. return status;
  1255. }
  1256. /**
  1257. * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
  1258. * vpath
  1259. * from MAC address table.
  1260. * @vp: Vpath handle.
  1261. * @macaddr: Next MAC address entry for this vpath in the list
  1262. * @macaddr_mask: MAC address mask for macaddr
  1263. *
  1264. * Returns the next mac address and mac address mask in the list for this
  1265. * vpath.
  1266. * see also: vxge_hw_vpath_mac_addr_get
  1267. *
  1268. */
  1269. enum vxge_hw_status
  1270. vxge_hw_vpath_mac_addr_get_next(
  1271. struct __vxge_hw_vpath_handle *vp,
  1272. u8 (macaddr)[ETH_ALEN],
  1273. u8 (macaddr_mask)[ETH_ALEN])
  1274. {
  1275. u32 i;
  1276. u64 data1 = 0ULL;
  1277. u64 data2 = 0ULL;
  1278. enum vxge_hw_status status = VXGE_HW_OK;
  1279. if (vp == NULL) {
  1280. status = VXGE_HW_ERR_INVALID_HANDLE;
  1281. goto exit;
  1282. }
  1283. status = __vxge_hw_vpath_rts_table_get(vp,
  1284. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1285. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1286. 0, &data1, &data2);
  1287. if (status != VXGE_HW_OK)
  1288. goto exit;
  1289. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1290. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1291. for (i = ETH_ALEN; i > 0; i--) {
  1292. macaddr[i-1] = (u8)(data1 & 0xFF);
  1293. data1 >>= 8;
  1294. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1295. data2 >>= 8;
  1296. }
  1297. exit:
  1298. return status;
  1299. }
  1300. /**
  1301. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
  1302. * to MAC address table.
  1303. * @vp: Vpath handle.
  1304. * @macaddr: MAC address to be added for this vpath into the list
  1305. * @macaddr_mask: MAC address mask for macaddr
  1306. *
  1307. * Delete the given mac address and mac address mask into the list for this
  1308. * vpath.
  1309. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
  1310. * vxge_hw_vpath_mac_addr_get_next
  1311. *
  1312. */
  1313. enum vxge_hw_status
  1314. vxge_hw_vpath_mac_addr_delete(
  1315. struct __vxge_hw_vpath_handle *vp,
  1316. u8 (macaddr)[ETH_ALEN],
  1317. u8 (macaddr_mask)[ETH_ALEN])
  1318. {
  1319. u32 i;
  1320. u64 data1 = 0ULL;
  1321. u64 data2 = 0ULL;
  1322. enum vxge_hw_status status = VXGE_HW_OK;
  1323. if (vp == NULL) {
  1324. status = VXGE_HW_ERR_INVALID_HANDLE;
  1325. goto exit;
  1326. }
  1327. for (i = 0; i < ETH_ALEN; i++) {
  1328. data1 <<= 8;
  1329. data1 |= (u8)macaddr[i];
  1330. data2 <<= 8;
  1331. data2 |= (u8)macaddr_mask[i];
  1332. }
  1333. status = __vxge_hw_vpath_rts_table_set(vp,
  1334. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1335. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1336. 0,
  1337. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1338. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
  1339. exit:
  1340. return status;
  1341. }
  1342. /**
  1343. * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
  1344. * to vlan id table.
  1345. * @vp: Vpath handle.
  1346. * @vid: vlan id to be added for this vpath into the list
  1347. *
  1348. * Adds the given vlan id into the list for this vpath.
  1349. * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
  1350. * vxge_hw_vpath_vid_get_next
  1351. *
  1352. */
  1353. enum vxge_hw_status
  1354. vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1355. {
  1356. enum vxge_hw_status status = VXGE_HW_OK;
  1357. if (vp == NULL) {
  1358. status = VXGE_HW_ERR_INVALID_HANDLE;
  1359. goto exit;
  1360. }
  1361. status = __vxge_hw_vpath_rts_table_set(vp,
  1362. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1363. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1364. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1365. exit:
  1366. return status;
  1367. }
  1368. /**
  1369. * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
  1370. * from vlan id table.
  1371. * @vp: Vpath handle.
  1372. * @vid: Buffer to return vlan id
  1373. *
  1374. * Returns the first vlan id in the list for this vpath.
  1375. * see also: vxge_hw_vpath_vid_get_next
  1376. *
  1377. */
  1378. enum vxge_hw_status
  1379. vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
  1380. {
  1381. u64 data;
  1382. enum vxge_hw_status status = VXGE_HW_OK;
  1383. if (vp == NULL) {
  1384. status = VXGE_HW_ERR_INVALID_HANDLE;
  1385. goto exit;
  1386. }
  1387. status = __vxge_hw_vpath_rts_table_get(vp,
  1388. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1389. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1390. 0, vid, &data);
  1391. *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
  1392. exit:
  1393. return status;
  1394. }
  1395. /**
  1396. * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
  1397. * from vlan id table.
  1398. * @vp: Vpath handle.
  1399. * @vid: Buffer to return vlan id
  1400. *
  1401. * Returns the next vlan id in the list for this vpath.
  1402. * see also: vxge_hw_vpath_vid_get
  1403. *
  1404. */
  1405. enum vxge_hw_status
  1406. vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
  1407. {
  1408. u64 data;
  1409. enum vxge_hw_status status = VXGE_HW_OK;
  1410. if (vp == NULL) {
  1411. status = VXGE_HW_ERR_INVALID_HANDLE;
  1412. goto exit;
  1413. }
  1414. status = __vxge_hw_vpath_rts_table_get(vp,
  1415. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1416. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1417. 0, vid, &data);
  1418. *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
  1419. exit:
  1420. return status;
  1421. }
  1422. /**
  1423. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  1424. * to vlan id table.
  1425. * @vp: Vpath handle.
  1426. * @vid: vlan id to be added for this vpath into the list
  1427. *
  1428. * Adds the given vlan id into the list for this vpath.
  1429. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
  1430. * vxge_hw_vpath_vid_get_next
  1431. *
  1432. */
  1433. enum vxge_hw_status
  1434. vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1435. {
  1436. enum vxge_hw_status status = VXGE_HW_OK;
  1437. if (vp == NULL) {
  1438. status = VXGE_HW_ERR_INVALID_HANDLE;
  1439. goto exit;
  1440. }
  1441. status = __vxge_hw_vpath_rts_table_set(vp,
  1442. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1443. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1444. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1445. exit:
  1446. return status;
  1447. }
  1448. /**
  1449. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
  1450. * @vp: Vpath handle.
  1451. *
  1452. * Enable promiscuous mode of Titan-e operation.
  1453. *
  1454. * See also: vxge_hw_vpath_promisc_disable().
  1455. */
  1456. enum vxge_hw_status vxge_hw_vpath_promisc_enable(
  1457. struct __vxge_hw_vpath_handle *vp)
  1458. {
  1459. u64 val64;
  1460. struct __vxge_hw_virtualpath *vpath;
  1461. enum vxge_hw_status status = VXGE_HW_OK;
  1462. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1463. status = VXGE_HW_ERR_INVALID_HANDLE;
  1464. goto exit;
  1465. }
  1466. vpath = vp->vpath;
  1467. /* Enable promiscous mode for function 0 only */
  1468. if (!(vpath->hldev->access_rights &
  1469. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
  1470. return VXGE_HW_OK;
  1471. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1472. if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
  1473. val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1474. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1475. VXGE_HW_RXMAC_VCFG0_BCAST_EN |
  1476. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
  1477. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1478. }
  1479. exit:
  1480. return status;
  1481. }
  1482. /**
  1483. * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
  1484. * @vp: Vpath handle.
  1485. *
  1486. * Disable promiscuous mode of Titan-e operation.
  1487. *
  1488. * See also: vxge_hw_vpath_promisc_enable().
  1489. */
  1490. enum vxge_hw_status vxge_hw_vpath_promisc_disable(
  1491. struct __vxge_hw_vpath_handle *vp)
  1492. {
  1493. u64 val64;
  1494. struct __vxge_hw_virtualpath *vpath;
  1495. enum vxge_hw_status status = VXGE_HW_OK;
  1496. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1497. status = VXGE_HW_ERR_INVALID_HANDLE;
  1498. goto exit;
  1499. }
  1500. vpath = vp->vpath;
  1501. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1502. if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
  1503. val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1504. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1505. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
  1506. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1507. }
  1508. exit:
  1509. return status;
  1510. }
  1511. /*
  1512. * vxge_hw_vpath_bcast_enable - Enable broadcast
  1513. * @vp: Vpath handle.
  1514. *
  1515. * Enable receiving broadcasts.
  1516. */
  1517. enum vxge_hw_status vxge_hw_vpath_bcast_enable(
  1518. struct __vxge_hw_vpath_handle *vp)
  1519. {
  1520. u64 val64;
  1521. struct __vxge_hw_virtualpath *vpath;
  1522. enum vxge_hw_status status = VXGE_HW_OK;
  1523. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1524. status = VXGE_HW_ERR_INVALID_HANDLE;
  1525. goto exit;
  1526. }
  1527. vpath = vp->vpath;
  1528. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1529. if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
  1530. val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
  1531. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1532. }
  1533. exit:
  1534. return status;
  1535. }
  1536. /**
  1537. * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
  1538. * @vp: Vpath handle.
  1539. *
  1540. * Enable Titan-e multicast addresses.
  1541. * Returns: VXGE_HW_OK on success.
  1542. *
  1543. */
  1544. enum vxge_hw_status vxge_hw_vpath_mcast_enable(
  1545. struct __vxge_hw_vpath_handle *vp)
  1546. {
  1547. u64 val64;
  1548. struct __vxge_hw_virtualpath *vpath;
  1549. enum vxge_hw_status status = VXGE_HW_OK;
  1550. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1551. status = VXGE_HW_ERR_INVALID_HANDLE;
  1552. goto exit;
  1553. }
  1554. vpath = vp->vpath;
  1555. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1556. if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
  1557. val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1558. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1559. }
  1560. exit:
  1561. return status;
  1562. }
  1563. /**
  1564. * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
  1565. * @vp: Vpath handle.
  1566. *
  1567. * Disable Titan-e multicast addresses.
  1568. * Returns: VXGE_HW_OK - success.
  1569. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
  1570. *
  1571. */
  1572. enum vxge_hw_status
  1573. vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
  1574. {
  1575. u64 val64;
  1576. struct __vxge_hw_virtualpath *vpath;
  1577. enum vxge_hw_status status = VXGE_HW_OK;
  1578. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1579. status = VXGE_HW_ERR_INVALID_HANDLE;
  1580. goto exit;
  1581. }
  1582. vpath = vp->vpath;
  1583. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1584. if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
  1585. val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1586. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1587. }
  1588. exit:
  1589. return status;
  1590. }
  1591. /*
  1592. * __vxge_hw_vpath_alarm_process - Process Alarms.
  1593. * @vpath: Virtual Path.
  1594. * @skip_alarms: Do not clear the alarms
  1595. *
  1596. * Process vpath alarms.
  1597. *
  1598. */
  1599. enum vxge_hw_status __vxge_hw_vpath_alarm_process(
  1600. struct __vxge_hw_virtualpath *vpath,
  1601. u32 skip_alarms)
  1602. {
  1603. u64 val64;
  1604. u64 alarm_status;
  1605. u64 pic_status;
  1606. struct __vxge_hw_device *hldev = NULL;
  1607. enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
  1608. u64 mask64;
  1609. struct vxge_hw_vpath_stats_sw_info *sw_stats;
  1610. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1611. if (vpath == NULL) {
  1612. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  1613. alarm_event);
  1614. goto out2;
  1615. }
  1616. hldev = vpath->hldev;
  1617. vp_reg = vpath->vp_reg;
  1618. alarm_status = readq(&vp_reg->vpath_general_int_status);
  1619. if (alarm_status == VXGE_HW_ALL_FOXES) {
  1620. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
  1621. alarm_event);
  1622. goto out;
  1623. }
  1624. sw_stats = vpath->sw_stats;
  1625. if (alarm_status & ~(
  1626. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  1627. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  1628. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  1629. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  1630. sw_stats->error_stats.unknown_alarms++;
  1631. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  1632. alarm_event);
  1633. goto out;
  1634. }
  1635. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  1636. val64 = readq(&vp_reg->xgmac_vp_int_status);
  1637. if (val64 &
  1638. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  1639. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  1640. if (((val64 &
  1641. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  1642. (!(val64 &
  1643. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  1644. ((val64 &
  1645. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
  1646. (!(val64 &
  1647. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  1648. ))) {
  1649. sw_stats->error_stats.network_sustained_fault++;
  1650. writeq(
  1651. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  1652. &vp_reg->asic_ntwk_vp_err_mask);
  1653. __vxge_hw_device_handle_link_down_ind(hldev);
  1654. alarm_event = VXGE_HW_SET_LEVEL(
  1655. VXGE_HW_EVENT_LINK_DOWN, alarm_event);
  1656. }
  1657. if (((val64 &
  1658. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  1659. (!(val64 &
  1660. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  1661. ((val64 &
  1662. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
  1663. (!(val64 &
  1664. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  1665. ))) {
  1666. sw_stats->error_stats.network_sustained_ok++;
  1667. writeq(
  1668. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  1669. &vp_reg->asic_ntwk_vp_err_mask);
  1670. __vxge_hw_device_handle_link_up_ind(hldev);
  1671. alarm_event = VXGE_HW_SET_LEVEL(
  1672. VXGE_HW_EVENT_LINK_UP, alarm_event);
  1673. }
  1674. writeq(VXGE_HW_INTR_MASK_ALL,
  1675. &vp_reg->asic_ntwk_vp_err_reg);
  1676. alarm_event = VXGE_HW_SET_LEVEL(
  1677. VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
  1678. if (skip_alarms)
  1679. return VXGE_HW_OK;
  1680. }
  1681. }
  1682. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
  1683. pic_status = readq(&vp_reg->vpath_ppif_int_status);
  1684. if (pic_status &
  1685. VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
  1686. val64 = readq(&vp_reg->general_errors_reg);
  1687. mask64 = readq(&vp_reg->general_errors_mask);
  1688. if ((val64 &
  1689. VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
  1690. ~mask64) {
  1691. sw_stats->error_stats.ini_serr_det++;
  1692. alarm_event = VXGE_HW_SET_LEVEL(
  1693. VXGE_HW_EVENT_SERR, alarm_event);
  1694. }
  1695. if ((val64 &
  1696. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
  1697. ~mask64) {
  1698. sw_stats->error_stats.dblgen_fifo0_overflow++;
  1699. alarm_event = VXGE_HW_SET_LEVEL(
  1700. VXGE_HW_EVENT_FIFO_ERR, alarm_event);
  1701. }
  1702. if ((val64 &
  1703. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
  1704. ~mask64)
  1705. sw_stats->error_stats.statsb_pif_chain_error++;
  1706. if ((val64 &
  1707. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
  1708. ~mask64)
  1709. sw_stats->error_stats.statsb_drop_timeout++;
  1710. if ((val64 &
  1711. VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
  1712. ~mask64)
  1713. sw_stats->error_stats.target_illegal_access++;
  1714. if (!skip_alarms) {
  1715. writeq(VXGE_HW_INTR_MASK_ALL,
  1716. &vp_reg->general_errors_reg);
  1717. alarm_event = VXGE_HW_SET_LEVEL(
  1718. VXGE_HW_EVENT_ALARM_CLEARED,
  1719. alarm_event);
  1720. }
  1721. }
  1722. if (pic_status &
  1723. VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
  1724. val64 = readq(&vp_reg->kdfcctl_errors_reg);
  1725. mask64 = readq(&vp_reg->kdfcctl_errors_mask);
  1726. if ((val64 &
  1727. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
  1728. ~mask64) {
  1729. sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
  1730. alarm_event = VXGE_HW_SET_LEVEL(
  1731. VXGE_HW_EVENT_FIFO_ERR,
  1732. alarm_event);
  1733. }
  1734. if ((val64 &
  1735. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
  1736. ~mask64) {
  1737. sw_stats->error_stats.kdfcctl_fifo0_poison++;
  1738. alarm_event = VXGE_HW_SET_LEVEL(
  1739. VXGE_HW_EVENT_FIFO_ERR,
  1740. alarm_event);
  1741. }
  1742. if ((val64 &
  1743. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
  1744. ~mask64) {
  1745. sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
  1746. alarm_event = VXGE_HW_SET_LEVEL(
  1747. VXGE_HW_EVENT_FIFO_ERR,
  1748. alarm_event);
  1749. }
  1750. if (!skip_alarms) {
  1751. writeq(VXGE_HW_INTR_MASK_ALL,
  1752. &vp_reg->kdfcctl_errors_reg);
  1753. alarm_event = VXGE_HW_SET_LEVEL(
  1754. VXGE_HW_EVENT_ALARM_CLEARED,
  1755. alarm_event);
  1756. }
  1757. }
  1758. }
  1759. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
  1760. val64 = readq(&vp_reg->wrdma_alarm_status);
  1761. if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
  1762. val64 = readq(&vp_reg->prc_alarm_reg);
  1763. mask64 = readq(&vp_reg->prc_alarm_mask);
  1764. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
  1765. ~mask64)
  1766. sw_stats->error_stats.prc_ring_bumps++;
  1767. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
  1768. ~mask64) {
  1769. sw_stats->error_stats.prc_rxdcm_sc_err++;
  1770. alarm_event = VXGE_HW_SET_LEVEL(
  1771. VXGE_HW_EVENT_VPATH_ERR,
  1772. alarm_event);
  1773. }
  1774. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
  1775. & ~mask64) {
  1776. sw_stats->error_stats.prc_rxdcm_sc_abort++;
  1777. alarm_event = VXGE_HW_SET_LEVEL(
  1778. VXGE_HW_EVENT_VPATH_ERR,
  1779. alarm_event);
  1780. }
  1781. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
  1782. & ~mask64) {
  1783. sw_stats->error_stats.prc_quanta_size_err++;
  1784. alarm_event = VXGE_HW_SET_LEVEL(
  1785. VXGE_HW_EVENT_VPATH_ERR,
  1786. alarm_event);
  1787. }
  1788. if (!skip_alarms) {
  1789. writeq(VXGE_HW_INTR_MASK_ALL,
  1790. &vp_reg->prc_alarm_reg);
  1791. alarm_event = VXGE_HW_SET_LEVEL(
  1792. VXGE_HW_EVENT_ALARM_CLEARED,
  1793. alarm_event);
  1794. }
  1795. }
  1796. }
  1797. out:
  1798. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  1799. out2:
  1800. if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
  1801. (alarm_event == VXGE_HW_EVENT_UNKNOWN))
  1802. return VXGE_HW_OK;
  1803. __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
  1804. if (alarm_event == VXGE_HW_EVENT_SERR)
  1805. return VXGE_HW_ERR_CRITICAL;
  1806. return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
  1807. VXGE_HW_ERR_SLOT_FREEZE :
  1808. (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
  1809. VXGE_HW_ERR_VPATH;
  1810. }
  1811. /*
  1812. * vxge_hw_vpath_alarm_process - Process Alarms.
  1813. * @vpath: Virtual Path.
  1814. * @skip_alarms: Do not clear the alarms
  1815. *
  1816. * Process vpath alarms.
  1817. *
  1818. */
  1819. enum vxge_hw_status vxge_hw_vpath_alarm_process(
  1820. struct __vxge_hw_vpath_handle *vp,
  1821. u32 skip_alarms)
  1822. {
  1823. enum vxge_hw_status status = VXGE_HW_OK;
  1824. if (vp == NULL) {
  1825. status = VXGE_HW_ERR_INVALID_HANDLE;
  1826. goto exit;
  1827. }
  1828. status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
  1829. exit:
  1830. return status;
  1831. }
  1832. /**
  1833. * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
  1834. * alrms
  1835. * @vp: Virtual Path handle.
  1836. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
  1837. * interrupts(Can be repeated). If fifo or ring are not enabled
  1838. * the MSIX vector for that should be set to 0
  1839. * @alarm_msix_id: MSIX vector for alarm.
  1840. *
  1841. * This API will associate a given MSIX vector numbers with the four TIM
  1842. * interrupts and alarm interrupt.
  1843. */
  1844. enum vxge_hw_status
  1845. vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
  1846. int alarm_msix_id)
  1847. {
  1848. u64 val64;
  1849. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  1850. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  1851. u32 first_vp_id = vpath->hldev->first_vp_id;
  1852. val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
  1853. (first_vp_id * 4) + tim_msix_id[0]) |
  1854. VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
  1855. (first_vp_id * 4) + tim_msix_id[1]) |
  1856. VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
  1857. (first_vp_id * 4) + tim_msix_id[2]);
  1858. val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
  1859. (first_vp_id * 4) + tim_msix_id[3]);
  1860. writeq(val64, &vp_reg->interrupt_cfg0);
  1861. writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
  1862. (first_vp_id * 4) + alarm_msix_id),
  1863. &vp_reg->interrupt_cfg2);
  1864. if (vpath->hldev->config.intr_mode ==
  1865. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1866. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1867. VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
  1868. 0, 32), &vp_reg->one_shot_vect1_en);
  1869. }
  1870. if (vpath->hldev->config.intr_mode ==
  1871. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1872. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1873. VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
  1874. 0, 32), &vp_reg->one_shot_vect2_en);
  1875. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1876. VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
  1877. 0, 32), &vp_reg->one_shot_vect3_en);
  1878. }
  1879. return VXGE_HW_OK;
  1880. }
  1881. /**
  1882. * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
  1883. * @vp: Virtual Path handle.
  1884. * @msix_id: MSIX ID
  1885. *
  1886. * The function masks the msix interrupt for the given msix_id
  1887. *
  1888. * Returns: 0,
  1889. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1890. * status.
  1891. * See also:
  1892. */
  1893. void
  1894. vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1895. {
  1896. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1897. __vxge_hw_pio_mem_write32_upper(
  1898. (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
  1899. (msix_id / 4)), 0, 32),
  1900. &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
  1901. return;
  1902. }
  1903. /**
  1904. * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
  1905. * @vp: Virtual Path handle.
  1906. * @msix_id: MSI ID
  1907. *
  1908. * The function clears the msix interrupt for the given msix_id
  1909. *
  1910. * Returns: 0,
  1911. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1912. * status.
  1913. * See also:
  1914. */
  1915. void
  1916. vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1917. {
  1918. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1919. if (hldev->config.intr_mode ==
  1920. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1921. __vxge_hw_pio_mem_write32_upper(
  1922. (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
  1923. (msix_id/4)), 0, 32),
  1924. &hldev->common_reg->
  1925. clr_msix_one_shot_vec[msix_id%4]);
  1926. } else {
  1927. __vxge_hw_pio_mem_write32_upper(
  1928. (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
  1929. (msix_id/4)), 0, 32),
  1930. &hldev->common_reg->
  1931. clear_msix_mask_vect[msix_id%4]);
  1932. }
  1933. return;
  1934. }
  1935. /**
  1936. * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  1937. * @vp: Virtual Path handle.
  1938. * @msix_id: MSI ID
  1939. *
  1940. * The function unmasks the msix interrupt for the given msix_id
  1941. *
  1942. * Returns: 0,
  1943. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1944. * status.
  1945. * See also:
  1946. */
  1947. void
  1948. vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1949. {
  1950. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1951. __vxge_hw_pio_mem_write32_upper(
  1952. (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
  1953. (msix_id/4)), 0, 32),
  1954. &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
  1955. return;
  1956. }
  1957. /**
  1958. * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
  1959. * @vp: Virtual Path handle.
  1960. *
  1961. * The function masks all msix interrupt for the given vpath
  1962. *
  1963. */
  1964. void
  1965. vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
  1966. {
  1967. __vxge_hw_pio_mem_write32_upper(
  1968. (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
  1969. &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
  1970. return;
  1971. }
  1972. /**
  1973. * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  1974. * @vp: Virtual Path handle.
  1975. *
  1976. * Mask Tx and Rx vpath interrupts.
  1977. *
  1978. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1979. */
  1980. void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1981. {
  1982. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1983. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1984. u64 val64;
  1985. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1986. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1987. tim_int_mask1, vp->vpath->vp_id);
  1988. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1989. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1990. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1991. writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1992. tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
  1993. &hldev->common_reg->tim_int_mask0);
  1994. }
  1995. val64 = readl(&hldev->common_reg->tim_int_mask1);
  1996. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1997. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1998. __vxge_hw_pio_mem_write32_upper(
  1999. (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  2000. tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
  2001. &hldev->common_reg->tim_int_mask1);
  2002. }
  2003. return;
  2004. }
  2005. /**
  2006. * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
  2007. * @vp: Virtual Path handle.
  2008. *
  2009. * Unmask Tx and Rx vpath interrupts.
  2010. *
  2011. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  2012. */
  2013. void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  2014. {
  2015. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  2016. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  2017. u64 val64;
  2018. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  2019. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  2020. tim_int_mask1, vp->vpath->vp_id);
  2021. val64 = readq(&hldev->common_reg->tim_int_mask0);
  2022. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2023. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2024. writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  2025. tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
  2026. &hldev->common_reg->tim_int_mask0);
  2027. }
  2028. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2029. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2030. __vxge_hw_pio_mem_write32_upper(
  2031. (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  2032. tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
  2033. &hldev->common_reg->tim_int_mask1);
  2034. }
  2035. return;
  2036. }
  2037. /**
  2038. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  2039. * descriptors and process the same.
  2040. * @ring: Handle to the ring object used for receive
  2041. *
  2042. * The function polls the Rx for the completed descriptors and calls
  2043. * the driver via supplied completion callback.
  2044. *
  2045. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2046. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2047. * descriptors available which are yet to be processed.
  2048. *
  2049. * See also: vxge_hw_vpath_poll_rx()
  2050. */
  2051. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  2052. {
  2053. u8 t_code;
  2054. enum vxge_hw_status status = VXGE_HW_OK;
  2055. void *first_rxdh;
  2056. u64 val64 = 0;
  2057. int new_count = 0;
  2058. ring->cmpl_cnt = 0;
  2059. status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
  2060. if (status == VXGE_HW_OK)
  2061. ring->callback(ring, first_rxdh,
  2062. t_code, ring->channel.userdata);
  2063. if (ring->cmpl_cnt != 0) {
  2064. ring->doorbell_cnt += ring->cmpl_cnt;
  2065. if (ring->doorbell_cnt >= ring->rxds_limit) {
  2066. /*
  2067. * Each RxD is of 4 qwords, update the number of
  2068. * qwords replenished
  2069. */
  2070. new_count = (ring->doorbell_cnt * 4);
  2071. /* For each block add 4 more qwords */
  2072. ring->total_db_cnt += ring->doorbell_cnt;
  2073. if (ring->total_db_cnt >= ring->rxds_per_block) {
  2074. new_count += 4;
  2075. /* Reset total count */
  2076. ring->total_db_cnt %= ring->rxds_per_block;
  2077. }
  2078. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
  2079. &ring->vp_reg->prc_rxd_doorbell);
  2080. val64 =
  2081. readl(&ring->common_reg->titan_general_int_status);
  2082. ring->doorbell_cnt = 0;
  2083. }
  2084. }
  2085. return status;
  2086. }
  2087. /**
  2088. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  2089. * the same.
  2090. * @fifo: Handle to the fifo object used for non offload send
  2091. *
  2092. * The function polls the Tx for the completed descriptors and calls
  2093. * the driver via supplied completion callback.
  2094. *
  2095. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2096. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2097. * descriptors available which are yet to be processed.
  2098. *
  2099. * See also: vxge_hw_vpath_poll_tx().
  2100. */
  2101. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
  2102. struct sk_buff ***skb_ptr, int nr_skb,
  2103. int *more)
  2104. {
  2105. enum vxge_hw_fifo_tcode t_code;
  2106. void *first_txdlh;
  2107. enum vxge_hw_status status = VXGE_HW_OK;
  2108. struct __vxge_hw_channel *channel;
  2109. channel = &fifo->channel;
  2110. status = vxge_hw_fifo_txdl_next_completed(fifo,
  2111. &first_txdlh, &t_code);
  2112. if (status == VXGE_HW_OK)
  2113. if (fifo->callback(fifo, first_txdlh, t_code,
  2114. channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
  2115. status = VXGE_HW_COMPLETIONS_REMAIN;
  2116. return status;
  2117. }