vxge-traffic.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. ******************************************************************************/
  14. #include <linux/etherdevice.h>
  15. #include "vxge-traffic.h"
  16. #include "vxge-config.h"
  17. #include "vxge-main.h"
  18. /*
  19. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  20. * @vp: Virtual Path handle.
  21. *
  22. * Enable vpath interrupts. The function is to be executed the last in
  23. * vpath initialization sequence.
  24. *
  25. * See also: vxge_hw_vpath_intr_disable()
  26. */
  27. enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  28. {
  29. u64 val64;
  30. struct __vxge_hw_virtualpath *vpath;
  31. struct vxge_hw_vpath_reg __iomem *vp_reg;
  32. enum vxge_hw_status status = VXGE_HW_OK;
  33. if (vp == NULL) {
  34. status = VXGE_HW_ERR_INVALID_HANDLE;
  35. goto exit;
  36. }
  37. vpath = vp->vpath;
  38. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  39. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  40. goto exit;
  41. }
  42. vp_reg = vpath->vp_reg;
  43. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  44. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  45. &vp_reg->general_errors_reg);
  46. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  47. &vp_reg->pci_config_errors_reg);
  48. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  49. &vp_reg->mrpcim_to_vpath_alarm_reg);
  50. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  51. &vp_reg->srpcim_to_vpath_alarm_reg);
  52. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  53. &vp_reg->vpath_ppif_int_status);
  54. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  55. &vp_reg->srpcim_msg_to_vpath_reg);
  56. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  57. &vp_reg->vpath_pcipif_int_status);
  58. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  59. &vp_reg->prc_alarm_reg);
  60. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  61. &vp_reg->wrdma_alarm_status);
  62. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  63. &vp_reg->asic_ntwk_vp_err_reg);
  64. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  65. &vp_reg->xgmac_vp_int_status);
  66. val64 = readq(&vp_reg->vpath_general_int_status);
  67. /* Mask unwanted interrupts */
  68. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  69. &vp_reg->vpath_pcipif_int_mask);
  70. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  71. &vp_reg->srpcim_msg_to_vpath_mask);
  72. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  73. &vp_reg->srpcim_to_vpath_alarm_mask);
  74. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  75. &vp_reg->mrpcim_to_vpath_alarm_mask);
  76. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  77. &vp_reg->pci_config_errors_mask);
  78. /* Unmask the individual interrupts */
  79. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  80. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  81. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  82. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  83. &vp_reg->general_errors_mask);
  84. __vxge_hw_pio_mem_write32_upper(
  85. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  86. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  87. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  88. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  89. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  90. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  91. &vp_reg->kdfcctl_errors_mask);
  92. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  93. __vxge_hw_pio_mem_write32_upper(
  94. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  95. &vp_reg->prc_alarm_mask);
  96. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  97. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  98. if (vpath->hldev->first_vp_id != vpath->vp_id)
  99. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  100. &vp_reg->asic_ntwk_vp_err_mask);
  101. else
  102. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  103. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
  104. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
  105. &vp_reg->asic_ntwk_vp_err_mask);
  106. __vxge_hw_pio_mem_write32_upper(0,
  107. &vp_reg->vpath_general_int_mask);
  108. exit:
  109. return status;
  110. }
  111. /*
  112. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  113. * @vp: Virtual Path handle.
  114. *
  115. * Disable vpath interrupts. The function is to be executed the last in
  116. * vpath initialization sequence.
  117. *
  118. * See also: vxge_hw_vpath_intr_enable()
  119. */
  120. enum vxge_hw_status vxge_hw_vpath_intr_disable(
  121. struct __vxge_hw_vpath_handle *vp)
  122. {
  123. u64 val64;
  124. struct __vxge_hw_virtualpath *vpath;
  125. enum vxge_hw_status status = VXGE_HW_OK;
  126. struct vxge_hw_vpath_reg __iomem *vp_reg;
  127. if (vp == NULL) {
  128. status = VXGE_HW_ERR_INVALID_HANDLE;
  129. goto exit;
  130. }
  131. vpath = vp->vpath;
  132. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  133. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  134. goto exit;
  135. }
  136. vp_reg = vpath->vp_reg;
  137. __vxge_hw_pio_mem_write32_upper(
  138. (u32)VXGE_HW_INTR_MASK_ALL,
  139. &vp_reg->vpath_general_int_mask);
  140. val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
  141. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  142. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  143. &vp_reg->general_errors_mask);
  144. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  145. &vp_reg->pci_config_errors_mask);
  146. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  147. &vp_reg->mrpcim_to_vpath_alarm_mask);
  148. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  149. &vp_reg->srpcim_to_vpath_alarm_mask);
  150. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  151. &vp_reg->vpath_ppif_int_mask);
  152. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  153. &vp_reg->srpcim_msg_to_vpath_mask);
  154. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  155. &vp_reg->vpath_pcipif_int_mask);
  156. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  157. &vp_reg->wrdma_alarm_mask);
  158. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  159. &vp_reg->prc_alarm_mask);
  160. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  161. &vp_reg->xgmac_vp_int_mask);
  162. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  163. &vp_reg->asic_ntwk_vp_err_mask);
  164. exit:
  165. return status;
  166. }
  167. /**
  168. * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  169. * @channeh: Channel for rx or tx handle
  170. * @msix_id: MSIX ID
  171. *
  172. * The function masks the msix interrupt for the given msix_id
  173. *
  174. * Returns: 0
  175. */
  176. void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
  177. {
  178. __vxge_hw_pio_mem_write32_upper(
  179. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  180. &channel->common_reg->set_msix_mask_vect[msix_id%4]);
  181. }
  182. /**
  183. * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
  184. * @channeh: Channel for rx or tx handle
  185. * @msix_id: MSI ID
  186. *
  187. * The function unmasks the msix interrupt for the given msix_id
  188. *
  189. * Returns: 0
  190. */
  191. void
  192. vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
  193. {
  194. __vxge_hw_pio_mem_write32_upper(
  195. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  196. &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
  197. }
  198. /**
  199. * vxge_hw_device_set_intr_type - Updates the configuration
  200. * with new interrupt type.
  201. * @hldev: HW device handle.
  202. * @intr_mode: New interrupt type
  203. */
  204. u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
  205. {
  206. if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  207. (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  208. (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  209. (intr_mode != VXGE_HW_INTR_MODE_DEF))
  210. intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  211. hldev->config.intr_mode = intr_mode;
  212. return intr_mode;
  213. }
  214. /**
  215. * vxge_hw_device_intr_enable - Enable interrupts.
  216. * @hldev: HW device handle.
  217. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  218. * the type(s) of interrupts to enable.
  219. *
  220. * Enable Titan interrupts. The function is to be executed the last in
  221. * Titan initialization sequence.
  222. *
  223. * See also: vxge_hw_device_intr_disable()
  224. */
  225. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  226. {
  227. u32 i;
  228. u64 val64;
  229. u32 val32;
  230. vxge_hw_device_mask_all(hldev);
  231. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  232. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  233. continue;
  234. vxge_hw_vpath_intr_enable(
  235. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  236. }
  237. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
  238. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  239. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  240. if (val64 != 0) {
  241. writeq(val64, &hldev->common_reg->tim_int_status0);
  242. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  243. }
  244. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  245. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  246. if (val32 != 0) {
  247. __vxge_hw_pio_mem_write32_upper(val32,
  248. &hldev->common_reg->tim_int_status1);
  249. __vxge_hw_pio_mem_write32_upper(~val32,
  250. &hldev->common_reg->tim_int_mask1);
  251. }
  252. }
  253. val64 = readq(&hldev->common_reg->titan_general_int_status);
  254. vxge_hw_device_unmask_all(hldev);
  255. }
  256. /**
  257. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  258. * @hldev: HW device handle.
  259. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  260. * the type(s) of interrupts to disable.
  261. *
  262. * Disable Titan interrupts.
  263. *
  264. * See also: vxge_hw_device_intr_enable()
  265. */
  266. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  267. {
  268. u32 i;
  269. vxge_hw_device_mask_all(hldev);
  270. /* mask all the tim interrupts */
  271. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  272. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  273. &hldev->common_reg->tim_int_mask1);
  274. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  275. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  276. continue;
  277. vxge_hw_vpath_intr_disable(
  278. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  279. }
  280. }
  281. /**
  282. * vxge_hw_device_mask_all - Mask all device interrupts.
  283. * @hldev: HW device handle.
  284. *
  285. * Mask all device interrupts.
  286. *
  287. * See also: vxge_hw_device_unmask_all()
  288. */
  289. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  290. {
  291. u64 val64;
  292. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  293. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  294. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  295. &hldev->common_reg->titan_mask_all_int);
  296. }
  297. /**
  298. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  299. * @hldev: HW device handle.
  300. *
  301. * Unmask all device interrupts.
  302. *
  303. * See also: vxge_hw_device_mask_all()
  304. */
  305. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  306. {
  307. u64 val64 = 0;
  308. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
  309. val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  310. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  311. &hldev->common_reg->titan_mask_all_int);
  312. }
  313. /**
  314. * vxge_hw_device_flush_io - Flush io writes.
  315. * @hldev: HW device handle.
  316. *
  317. * The function performs a read operation to flush io writes.
  318. *
  319. * Returns: void
  320. */
  321. void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
  322. {
  323. u32 val32;
  324. val32 = readl(&hldev->common_reg->titan_general_int_status);
  325. }
  326. /**
  327. * __vxge_hw_device_handle_error - Handle error
  328. * @hldev: HW device
  329. * @vp_id: Vpath Id
  330. * @type: Error type. Please see enum vxge_hw_event{}
  331. *
  332. * Handle error.
  333. */
  334. static enum vxge_hw_status
  335. __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
  336. enum vxge_hw_event type)
  337. {
  338. switch (type) {
  339. case VXGE_HW_EVENT_UNKNOWN:
  340. break;
  341. case VXGE_HW_EVENT_RESET_START:
  342. case VXGE_HW_EVENT_RESET_COMPLETE:
  343. case VXGE_HW_EVENT_LINK_DOWN:
  344. case VXGE_HW_EVENT_LINK_UP:
  345. goto out;
  346. case VXGE_HW_EVENT_ALARM_CLEARED:
  347. goto out;
  348. case VXGE_HW_EVENT_ECCERR:
  349. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  350. goto out;
  351. case VXGE_HW_EVENT_FIFO_ERR:
  352. case VXGE_HW_EVENT_VPATH_ERR:
  353. case VXGE_HW_EVENT_CRITICAL_ERR:
  354. case VXGE_HW_EVENT_SERR:
  355. break;
  356. case VXGE_HW_EVENT_SRPCIM_SERR:
  357. case VXGE_HW_EVENT_MRPCIM_SERR:
  358. goto out;
  359. case VXGE_HW_EVENT_SLOT_FREEZE:
  360. break;
  361. default:
  362. vxge_assert(0);
  363. goto out;
  364. }
  365. /* notify driver */
  366. if (hldev->uld_callbacks.crit_err)
  367. hldev->uld_callbacks.crit_err(
  368. (struct __vxge_hw_device *)hldev,
  369. type, vp_id);
  370. out:
  371. return VXGE_HW_OK;
  372. }
  373. /*
  374. * __vxge_hw_device_handle_link_down_ind
  375. * @hldev: HW device handle.
  376. *
  377. * Link down indication handler. The function is invoked by HW when
  378. * Titan indicates that the link is down.
  379. */
  380. static enum vxge_hw_status
  381. __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
  382. {
  383. /*
  384. * If the previous link state is not down, return.
  385. */
  386. if (hldev->link_state == VXGE_HW_LINK_DOWN)
  387. goto exit;
  388. hldev->link_state = VXGE_HW_LINK_DOWN;
  389. /* notify driver */
  390. if (hldev->uld_callbacks.link_down)
  391. hldev->uld_callbacks.link_down(hldev);
  392. exit:
  393. return VXGE_HW_OK;
  394. }
  395. /*
  396. * __vxge_hw_device_handle_link_up_ind
  397. * @hldev: HW device handle.
  398. *
  399. * Link up indication handler. The function is invoked by HW when
  400. * Titan indicates that the link is up for programmable amount of time.
  401. */
  402. static enum vxge_hw_status
  403. __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
  404. {
  405. /*
  406. * If the previous link state is not down, return.
  407. */
  408. if (hldev->link_state == VXGE_HW_LINK_UP)
  409. goto exit;
  410. hldev->link_state = VXGE_HW_LINK_UP;
  411. /* notify driver */
  412. if (hldev->uld_callbacks.link_up)
  413. hldev->uld_callbacks.link_up(hldev);
  414. exit:
  415. return VXGE_HW_OK;
  416. }
  417. /*
  418. * __vxge_hw_vpath_alarm_process - Process Alarms.
  419. * @vpath: Virtual Path.
  420. * @skip_alarms: Do not clear the alarms
  421. *
  422. * Process vpath alarms.
  423. *
  424. */
  425. static enum vxge_hw_status
  426. __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
  427. u32 skip_alarms)
  428. {
  429. u64 val64;
  430. u64 alarm_status;
  431. u64 pic_status;
  432. struct __vxge_hw_device *hldev = NULL;
  433. enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
  434. u64 mask64;
  435. struct vxge_hw_vpath_stats_sw_info *sw_stats;
  436. struct vxge_hw_vpath_reg __iomem *vp_reg;
  437. if (vpath == NULL) {
  438. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  439. alarm_event);
  440. goto out2;
  441. }
  442. hldev = vpath->hldev;
  443. vp_reg = vpath->vp_reg;
  444. alarm_status = readq(&vp_reg->vpath_general_int_status);
  445. if (alarm_status == VXGE_HW_ALL_FOXES) {
  446. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
  447. alarm_event);
  448. goto out;
  449. }
  450. sw_stats = vpath->sw_stats;
  451. if (alarm_status & ~(
  452. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  453. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  454. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  455. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  456. sw_stats->error_stats.unknown_alarms++;
  457. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  458. alarm_event);
  459. goto out;
  460. }
  461. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  462. val64 = readq(&vp_reg->xgmac_vp_int_status);
  463. if (val64 &
  464. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  465. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  466. if (((val64 &
  467. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  468. (!(val64 &
  469. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  470. ((val64 &
  471. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
  472. (!(val64 &
  473. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  474. ))) {
  475. sw_stats->error_stats.network_sustained_fault++;
  476. writeq(
  477. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  478. &vp_reg->asic_ntwk_vp_err_mask);
  479. __vxge_hw_device_handle_link_down_ind(hldev);
  480. alarm_event = VXGE_HW_SET_LEVEL(
  481. VXGE_HW_EVENT_LINK_DOWN, alarm_event);
  482. }
  483. if (((val64 &
  484. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  485. (!(val64 &
  486. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  487. ((val64 &
  488. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
  489. (!(val64 &
  490. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  491. ))) {
  492. sw_stats->error_stats.network_sustained_ok++;
  493. writeq(
  494. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  495. &vp_reg->asic_ntwk_vp_err_mask);
  496. __vxge_hw_device_handle_link_up_ind(hldev);
  497. alarm_event = VXGE_HW_SET_LEVEL(
  498. VXGE_HW_EVENT_LINK_UP, alarm_event);
  499. }
  500. writeq(VXGE_HW_INTR_MASK_ALL,
  501. &vp_reg->asic_ntwk_vp_err_reg);
  502. alarm_event = VXGE_HW_SET_LEVEL(
  503. VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
  504. if (skip_alarms)
  505. return VXGE_HW_OK;
  506. }
  507. }
  508. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
  509. pic_status = readq(&vp_reg->vpath_ppif_int_status);
  510. if (pic_status &
  511. VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
  512. val64 = readq(&vp_reg->general_errors_reg);
  513. mask64 = readq(&vp_reg->general_errors_mask);
  514. if ((val64 &
  515. VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
  516. ~mask64) {
  517. sw_stats->error_stats.ini_serr_det++;
  518. alarm_event = VXGE_HW_SET_LEVEL(
  519. VXGE_HW_EVENT_SERR, alarm_event);
  520. }
  521. if ((val64 &
  522. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
  523. ~mask64) {
  524. sw_stats->error_stats.dblgen_fifo0_overflow++;
  525. alarm_event = VXGE_HW_SET_LEVEL(
  526. VXGE_HW_EVENT_FIFO_ERR, alarm_event);
  527. }
  528. if ((val64 &
  529. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
  530. ~mask64)
  531. sw_stats->error_stats.statsb_pif_chain_error++;
  532. if ((val64 &
  533. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
  534. ~mask64)
  535. sw_stats->error_stats.statsb_drop_timeout++;
  536. if ((val64 &
  537. VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
  538. ~mask64)
  539. sw_stats->error_stats.target_illegal_access++;
  540. if (!skip_alarms) {
  541. writeq(VXGE_HW_INTR_MASK_ALL,
  542. &vp_reg->general_errors_reg);
  543. alarm_event = VXGE_HW_SET_LEVEL(
  544. VXGE_HW_EVENT_ALARM_CLEARED,
  545. alarm_event);
  546. }
  547. }
  548. if (pic_status &
  549. VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
  550. val64 = readq(&vp_reg->kdfcctl_errors_reg);
  551. mask64 = readq(&vp_reg->kdfcctl_errors_mask);
  552. if ((val64 &
  553. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
  554. ~mask64) {
  555. sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
  556. alarm_event = VXGE_HW_SET_LEVEL(
  557. VXGE_HW_EVENT_FIFO_ERR,
  558. alarm_event);
  559. }
  560. if ((val64 &
  561. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
  562. ~mask64) {
  563. sw_stats->error_stats.kdfcctl_fifo0_poison++;
  564. alarm_event = VXGE_HW_SET_LEVEL(
  565. VXGE_HW_EVENT_FIFO_ERR,
  566. alarm_event);
  567. }
  568. if ((val64 &
  569. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
  570. ~mask64) {
  571. sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
  572. alarm_event = VXGE_HW_SET_LEVEL(
  573. VXGE_HW_EVENT_FIFO_ERR,
  574. alarm_event);
  575. }
  576. if (!skip_alarms) {
  577. writeq(VXGE_HW_INTR_MASK_ALL,
  578. &vp_reg->kdfcctl_errors_reg);
  579. alarm_event = VXGE_HW_SET_LEVEL(
  580. VXGE_HW_EVENT_ALARM_CLEARED,
  581. alarm_event);
  582. }
  583. }
  584. }
  585. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
  586. val64 = readq(&vp_reg->wrdma_alarm_status);
  587. if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
  588. val64 = readq(&vp_reg->prc_alarm_reg);
  589. mask64 = readq(&vp_reg->prc_alarm_mask);
  590. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
  591. ~mask64)
  592. sw_stats->error_stats.prc_ring_bumps++;
  593. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
  594. ~mask64) {
  595. sw_stats->error_stats.prc_rxdcm_sc_err++;
  596. alarm_event = VXGE_HW_SET_LEVEL(
  597. VXGE_HW_EVENT_VPATH_ERR,
  598. alarm_event);
  599. }
  600. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
  601. & ~mask64) {
  602. sw_stats->error_stats.prc_rxdcm_sc_abort++;
  603. alarm_event = VXGE_HW_SET_LEVEL(
  604. VXGE_HW_EVENT_VPATH_ERR,
  605. alarm_event);
  606. }
  607. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
  608. & ~mask64) {
  609. sw_stats->error_stats.prc_quanta_size_err++;
  610. alarm_event = VXGE_HW_SET_LEVEL(
  611. VXGE_HW_EVENT_VPATH_ERR,
  612. alarm_event);
  613. }
  614. if (!skip_alarms) {
  615. writeq(VXGE_HW_INTR_MASK_ALL,
  616. &vp_reg->prc_alarm_reg);
  617. alarm_event = VXGE_HW_SET_LEVEL(
  618. VXGE_HW_EVENT_ALARM_CLEARED,
  619. alarm_event);
  620. }
  621. }
  622. }
  623. out:
  624. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  625. out2:
  626. if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
  627. (alarm_event == VXGE_HW_EVENT_UNKNOWN))
  628. return VXGE_HW_OK;
  629. __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
  630. if (alarm_event == VXGE_HW_EVENT_SERR)
  631. return VXGE_HW_ERR_CRITICAL;
  632. return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
  633. VXGE_HW_ERR_SLOT_FREEZE :
  634. (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
  635. VXGE_HW_ERR_VPATH;
  636. }
  637. /**
  638. * vxge_hw_device_begin_irq - Begin IRQ processing.
  639. * @hldev: HW device handle.
  640. * @skip_alarms: Do not clear the alarms
  641. * @reason: "Reason" for the interrupt, the value of Titan's
  642. * general_int_status register.
  643. *
  644. * The function performs two actions, It first checks whether (shared IRQ) the
  645. * interrupt was raised by the device. Next, it masks the device interrupts.
  646. *
  647. * Note:
  648. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  649. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  650. *
  651. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  652. * device remain enabled).
  653. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  654. * status.
  655. */
  656. enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
  657. u32 skip_alarms, u64 *reason)
  658. {
  659. u32 i;
  660. u64 val64;
  661. u64 adapter_status;
  662. u64 vpath_mask;
  663. enum vxge_hw_status ret = VXGE_HW_OK;
  664. val64 = readq(&hldev->common_reg->titan_general_int_status);
  665. if (unlikely(!val64)) {
  666. /* not Titan interrupt */
  667. *reason = 0;
  668. ret = VXGE_HW_ERR_WRONG_IRQ;
  669. goto exit;
  670. }
  671. if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
  672. adapter_status = readq(&hldev->common_reg->adapter_status);
  673. if (adapter_status == VXGE_HW_ALL_FOXES) {
  674. __vxge_hw_device_handle_error(hldev,
  675. NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
  676. *reason = 0;
  677. ret = VXGE_HW_ERR_SLOT_FREEZE;
  678. goto exit;
  679. }
  680. }
  681. hldev->stats.sw_dev_info_stats.total_intr_cnt++;
  682. *reason = val64;
  683. vpath_mask = hldev->vpaths_deployed >>
  684. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  685. if (val64 &
  686. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
  687. hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
  688. return VXGE_HW_OK;
  689. }
  690. hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
  691. if (unlikely(val64 &
  692. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
  693. enum vxge_hw_status error_level = VXGE_HW_OK;
  694. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  695. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  696. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  697. continue;
  698. ret = __vxge_hw_vpath_alarm_process(
  699. &hldev->virtual_paths[i], skip_alarms);
  700. error_level = VXGE_HW_SET_LEVEL(ret, error_level);
  701. if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
  702. (ret == VXGE_HW_ERR_SLOT_FREEZE)))
  703. break;
  704. }
  705. ret = error_level;
  706. }
  707. exit:
  708. return ret;
  709. }
  710. /**
  711. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  712. * condition that has caused the Tx and RX interrupt.
  713. * @hldev: HW device.
  714. *
  715. * Acknowledge (that is, clear) the condition that has caused
  716. * the Tx and Rx interrupt.
  717. * See also: vxge_hw_device_begin_irq(),
  718. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  719. */
  720. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  721. {
  722. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  723. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  724. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  725. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  726. &hldev->common_reg->tim_int_status0);
  727. }
  728. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  729. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  730. __vxge_hw_pio_mem_write32_upper(
  731. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  732. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  733. &hldev->common_reg->tim_int_status1);
  734. }
  735. }
  736. /*
  737. * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
  738. * @channel: Channel
  739. * @dtrh: Buffer to return the DTR pointer
  740. *
  741. * Allocates a dtr from the reserve array. If the reserve array is empty,
  742. * it swaps the reserve and free arrays.
  743. *
  744. */
  745. static enum vxge_hw_status
  746. vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
  747. {
  748. void **tmp_arr;
  749. if (channel->reserve_ptr - channel->reserve_top > 0) {
  750. _alloc_after_swap:
  751. *dtrh = channel->reserve_arr[--channel->reserve_ptr];
  752. return VXGE_HW_OK;
  753. }
  754. /* switch between empty and full arrays */
  755. /* the idea behind such a design is that by having free and reserved
  756. * arrays separated we basically separated irq and non-irq parts.
  757. * i.e. no additional lock need to be done when we free a resource */
  758. if (channel->length - channel->free_ptr > 0) {
  759. tmp_arr = channel->reserve_arr;
  760. channel->reserve_arr = channel->free_arr;
  761. channel->free_arr = tmp_arr;
  762. channel->reserve_ptr = channel->length;
  763. channel->reserve_top = channel->free_ptr;
  764. channel->free_ptr = channel->length;
  765. channel->stats->reserve_free_swaps_cnt++;
  766. goto _alloc_after_swap;
  767. }
  768. channel->stats->full_cnt++;
  769. *dtrh = NULL;
  770. return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
  771. }
  772. /*
  773. * vxge_hw_channel_dtr_post - Post a dtr to the channel
  774. * @channelh: Channel
  775. * @dtrh: DTR pointer
  776. *
  777. * Posts a dtr to work array.
  778. *
  779. */
  780. static void
  781. vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
  782. {
  783. vxge_assert(channel->work_arr[channel->post_index] == NULL);
  784. channel->work_arr[channel->post_index++] = dtrh;
  785. /* wrap-around */
  786. if (channel->post_index == channel->length)
  787. channel->post_index = 0;
  788. }
  789. /*
  790. * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
  791. * @channel: Channel
  792. * @dtr: Buffer to return the next completed DTR pointer
  793. *
  794. * Returns the next completed dtr with out removing it from work array
  795. *
  796. */
  797. void
  798. vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
  799. {
  800. vxge_assert(channel->compl_index < channel->length);
  801. *dtrh = channel->work_arr[channel->compl_index];
  802. prefetch(*dtrh);
  803. }
  804. /*
  805. * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
  806. * @channel: Channel handle
  807. *
  808. * Removes the next completed dtr from work array
  809. *
  810. */
  811. void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
  812. {
  813. channel->work_arr[channel->compl_index] = NULL;
  814. /* wrap-around */
  815. if (++channel->compl_index == channel->length)
  816. channel->compl_index = 0;
  817. channel->stats->total_compl_cnt++;
  818. }
  819. /*
  820. * vxge_hw_channel_dtr_free - Frees a dtr
  821. * @channel: Channel handle
  822. * @dtr: DTR pointer
  823. *
  824. * Returns the dtr to free array
  825. *
  826. */
  827. void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
  828. {
  829. channel->free_arr[--channel->free_ptr] = dtrh;
  830. }
  831. /*
  832. * vxge_hw_channel_dtr_count
  833. * @channel: Channel handle. Obtained via vxge_hw_channel_open().
  834. *
  835. * Retreive number of DTRs available. This function can not be called
  836. * from data path. ring_initial_replenishi() is the only user.
  837. */
  838. int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
  839. {
  840. return (channel->reserve_ptr - channel->reserve_top) +
  841. (channel->length - channel->free_ptr);
  842. }
  843. /**
  844. * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
  845. * @ring: Handle to the ring object used for receive
  846. * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
  847. * with a valid handle.
  848. *
  849. * Reserve Rx descriptor for the subsequent filling-in driver
  850. * and posting on the corresponding channel (@channelh)
  851. * via vxge_hw_ring_rxd_post().
  852. *
  853. * Returns: VXGE_HW_OK - success.
  854. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
  855. *
  856. */
  857. enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
  858. void **rxdh)
  859. {
  860. enum vxge_hw_status status;
  861. struct __vxge_hw_channel *channel;
  862. channel = &ring->channel;
  863. status = vxge_hw_channel_dtr_alloc(channel, rxdh);
  864. if (status == VXGE_HW_OK) {
  865. struct vxge_hw_ring_rxd_1 *rxdp =
  866. (struct vxge_hw_ring_rxd_1 *)*rxdh;
  867. rxdp->control_0 = rxdp->control_1 = 0;
  868. }
  869. return status;
  870. }
  871. /**
  872. * vxge_hw_ring_rxd_free - Free descriptor.
  873. * @ring: Handle to the ring object used for receive
  874. * @rxdh: Descriptor handle.
  875. *
  876. * Free the reserved descriptor. This operation is "symmetrical" to
  877. * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
  878. * lifecycle.
  879. *
  880. * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
  881. * be:
  882. *
  883. * - reserved (vxge_hw_ring_rxd_reserve);
  884. *
  885. * - posted (vxge_hw_ring_rxd_post);
  886. *
  887. * - completed (vxge_hw_ring_rxd_next_completed);
  888. *
  889. * - and recycled again (vxge_hw_ring_rxd_free).
  890. *
  891. * For alternative state transitions and more details please refer to
  892. * the design doc.
  893. *
  894. */
  895. void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
  896. {
  897. struct __vxge_hw_channel *channel;
  898. channel = &ring->channel;
  899. vxge_hw_channel_dtr_free(channel, rxdh);
  900. }
  901. /**
  902. * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
  903. * @ring: Handle to the ring object used for receive
  904. * @rxdh: Descriptor handle.
  905. *
  906. * This routine prepares a rxd and posts
  907. */
  908. void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
  909. {
  910. struct __vxge_hw_channel *channel;
  911. channel = &ring->channel;
  912. vxge_hw_channel_dtr_post(channel, rxdh);
  913. }
  914. /**
  915. * vxge_hw_ring_rxd_post_post - Process rxd after post.
  916. * @ring: Handle to the ring object used for receive
  917. * @rxdh: Descriptor handle.
  918. *
  919. * Processes rxd after post
  920. */
  921. void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
  922. {
  923. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  924. struct __vxge_hw_channel *channel;
  925. channel = &ring->channel;
  926. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  927. if (ring->stats->common_stats.usage_cnt > 0)
  928. ring->stats->common_stats.usage_cnt--;
  929. }
  930. /**
  931. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  932. * @ring: Handle to the ring object used for receive
  933. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  934. *
  935. * Post descriptor on the ring.
  936. * Prior to posting the descriptor should be filled in accordance with
  937. * Host/Titan interface specification for a given service (LL, etc.).
  938. *
  939. */
  940. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  941. {
  942. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  943. struct __vxge_hw_channel *channel;
  944. channel = &ring->channel;
  945. wmb();
  946. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  947. vxge_hw_channel_dtr_post(channel, rxdh);
  948. if (ring->stats->common_stats.usage_cnt > 0)
  949. ring->stats->common_stats.usage_cnt--;
  950. }
  951. /**
  952. * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
  953. * @ring: Handle to the ring object used for receive
  954. * @rxdh: Descriptor handle.
  955. *
  956. * Processes rxd after post with memory barrier.
  957. */
  958. void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
  959. {
  960. wmb();
  961. vxge_hw_ring_rxd_post_post(ring, rxdh);
  962. }
  963. /**
  964. * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
  965. * @ring: Handle to the ring object used for receive
  966. * @rxdh: Descriptor handle. Returned by HW.
  967. * @t_code: Transfer code, as per Titan User Guide,
  968. * Receive Descriptor Format. Returned by HW.
  969. *
  970. * Retrieve the _next_ completed descriptor.
  971. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
  972. * driver of new completed descriptors. After that
  973. * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
  974. * completions (the very first completion is passed by HW via
  975. * vxge_hw_ring_callback_f).
  976. *
  977. * Implementation-wise, the driver is free to call
  978. * vxge_hw_ring_rxd_next_completed either immediately from inside the
  979. * ring callback, or in a deferred fashion and separate (from HW)
  980. * context.
  981. *
  982. * Non-zero @t_code means failure to fill-in receive buffer(s)
  983. * of the descriptor.
  984. * For instance, parity error detected during the data transfer.
  985. * In this case Titan will complete the descriptor and indicate
  986. * for the host that the received data is not to be used.
  987. * For details please refer to Titan User Guide.
  988. *
  989. * Returns: VXGE_HW_OK - success.
  990. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  991. * are currently available for processing.
  992. *
  993. * See also: vxge_hw_ring_callback_f{},
  994. * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
  995. */
  996. enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
  997. struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
  998. {
  999. struct __vxge_hw_channel *channel;
  1000. struct vxge_hw_ring_rxd_1 *rxdp;
  1001. enum vxge_hw_status status = VXGE_HW_OK;
  1002. u64 control_0, own;
  1003. channel = &ring->channel;
  1004. vxge_hw_channel_dtr_try_complete(channel, rxdh);
  1005. rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
  1006. if (rxdp == NULL) {
  1007. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1008. goto exit;
  1009. }
  1010. control_0 = rxdp->control_0;
  1011. own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1012. *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
  1013. /* check whether it is not the end */
  1014. if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
  1015. vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
  1016. 0);
  1017. ++ring->cmpl_cnt;
  1018. vxge_hw_channel_dtr_complete(channel);
  1019. vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
  1020. ring->stats->common_stats.usage_cnt++;
  1021. if (ring->stats->common_stats.usage_max <
  1022. ring->stats->common_stats.usage_cnt)
  1023. ring->stats->common_stats.usage_max =
  1024. ring->stats->common_stats.usage_cnt;
  1025. status = VXGE_HW_OK;
  1026. goto exit;
  1027. }
  1028. /* reset it. since we don't want to return
  1029. * garbage to the driver */
  1030. *rxdh = NULL;
  1031. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1032. exit:
  1033. return status;
  1034. }
  1035. /**
  1036. * vxge_hw_ring_handle_tcode - Handle transfer code.
  1037. * @ring: Handle to the ring object used for receive
  1038. * @rxdh: Descriptor handle.
  1039. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1040. * "transfer codes".
  1041. *
  1042. * Handle descriptor's transfer code. The latter comes with each completed
  1043. * descriptor.
  1044. *
  1045. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1046. * VXGE_HW_OK - for success.
  1047. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1048. */
  1049. enum vxge_hw_status vxge_hw_ring_handle_tcode(
  1050. struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
  1051. {
  1052. struct __vxge_hw_channel *channel;
  1053. enum vxge_hw_status status = VXGE_HW_OK;
  1054. channel = &ring->channel;
  1055. /* If the t_code is not supported and if the
  1056. * t_code is other than 0x5 (unparseable packet
  1057. * such as unknown UPV6 header), Drop it !!!
  1058. */
  1059. if (t_code == VXGE_HW_RING_T_CODE_OK ||
  1060. t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
  1061. status = VXGE_HW_OK;
  1062. goto exit;
  1063. }
  1064. if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
  1065. status = VXGE_HW_ERR_INVALID_TCODE;
  1066. goto exit;
  1067. }
  1068. ring->stats->rxd_t_code_err_cnt[t_code]++;
  1069. exit:
  1070. return status;
  1071. }
  1072. /**
  1073. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  1074. *
  1075. * @fifo: fifohandle
  1076. * @txdl_ptr: The starting location of the TxDL in host memory
  1077. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  1078. * @no_snoop: No snoop flags
  1079. *
  1080. * This function posts a non-offload doorbell to doorbell FIFO
  1081. *
  1082. */
  1083. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  1084. u64 txdl_ptr, u32 num_txds, u32 no_snoop)
  1085. {
  1086. struct __vxge_hw_channel *channel;
  1087. channel = &fifo->channel;
  1088. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  1089. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
  1090. VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
  1091. &fifo->nofl_db->control_0);
  1092. mmiowb();
  1093. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  1094. mmiowb();
  1095. }
  1096. /**
  1097. * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
  1098. * the fifo
  1099. * @fifoh: Handle to the fifo object used for non offload send
  1100. */
  1101. u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
  1102. {
  1103. return vxge_hw_channel_dtr_count(&fifoh->channel);
  1104. }
  1105. /**
  1106. * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
  1107. * @fifoh: Handle to the fifo object used for non offload send
  1108. * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
  1109. * with a valid handle.
  1110. * @txdl_priv: Buffer to return the pointer to per txdl space
  1111. *
  1112. * Reserve a single TxDL (that is, fifo descriptor)
  1113. * for the subsequent filling-in by driver)
  1114. * and posting on the corresponding channel (@channelh)
  1115. * via vxge_hw_fifo_txdl_post().
  1116. *
  1117. * Note: it is the responsibility of driver to reserve multiple descriptors
  1118. * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
  1119. * carries up to configured number (fifo.max_frags) of contiguous buffers.
  1120. *
  1121. * Returns: VXGE_HW_OK - success;
  1122. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
  1123. *
  1124. */
  1125. enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
  1126. struct __vxge_hw_fifo *fifo,
  1127. void **txdlh, void **txdl_priv)
  1128. {
  1129. struct __vxge_hw_channel *channel;
  1130. enum vxge_hw_status status;
  1131. int i;
  1132. channel = &fifo->channel;
  1133. status = vxge_hw_channel_dtr_alloc(channel, txdlh);
  1134. if (status == VXGE_HW_OK) {
  1135. struct vxge_hw_fifo_txd *txdp =
  1136. (struct vxge_hw_fifo_txd *)*txdlh;
  1137. struct __vxge_hw_fifo_txdl_priv *priv;
  1138. priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  1139. /* reset the TxDL's private */
  1140. priv->align_dma_offset = 0;
  1141. priv->align_vaddr_start = priv->align_vaddr;
  1142. priv->align_used_frags = 0;
  1143. priv->frags = 0;
  1144. priv->alloc_frags = fifo->config->max_frags;
  1145. priv->next_txdl_priv = NULL;
  1146. *txdl_priv = (void *)(size_t)txdp->host_control;
  1147. for (i = 0; i < fifo->config->max_frags; i++) {
  1148. txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
  1149. txdp->control_0 = txdp->control_1 = 0;
  1150. }
  1151. }
  1152. return status;
  1153. }
  1154. /**
  1155. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  1156. * descriptor.
  1157. * @fifo: Handle to the fifo object used for non offload send
  1158. * @txdlh: Descriptor handle.
  1159. * @frag_idx: Index of the data buffer in the caller's scatter-gather list
  1160. * (of buffers).
  1161. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
  1162. * @size: Size of the data buffer (in bytes).
  1163. *
  1164. * This API is part of the preparation of the transmit descriptor for posting
  1165. * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
  1166. * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
  1167. * All three APIs fill in the fields of the fifo descriptor,
  1168. * in accordance with the Titan specification.
  1169. *
  1170. */
  1171. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  1172. void *txdlh, u32 frag_idx,
  1173. dma_addr_t dma_pointer, u32 size)
  1174. {
  1175. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1176. struct vxge_hw_fifo_txd *txdp, *txdp_last;
  1177. struct __vxge_hw_channel *channel;
  1178. channel = &fifo->channel;
  1179. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1180. txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
  1181. if (frag_idx != 0)
  1182. txdp->control_0 = txdp->control_1 = 0;
  1183. else {
  1184. txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1185. VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
  1186. txdp->control_1 |= fifo->interrupt_type;
  1187. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
  1188. fifo->tx_intr_num);
  1189. if (txdl_priv->frags) {
  1190. txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
  1191. (txdl_priv->frags - 1);
  1192. txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1193. VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1194. }
  1195. }
  1196. vxge_assert(frag_idx < txdl_priv->alloc_frags);
  1197. txdp->buffer_pointer = (u64)dma_pointer;
  1198. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
  1199. fifo->stats->total_buffers++;
  1200. txdl_priv->frags++;
  1201. }
  1202. /**
  1203. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  1204. * @fifo: Handle to the fifo object used for non offload send
  1205. * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
  1206. * @frags: Number of contiguous buffers that are part of a single
  1207. * transmit operation.
  1208. *
  1209. * Post descriptor on the 'fifo' type channel for transmission.
  1210. * Prior to posting the descriptor should be filled in accordance with
  1211. * Host/Titan interface specification for a given service (LL, etc.).
  1212. *
  1213. */
  1214. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
  1215. {
  1216. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1217. struct vxge_hw_fifo_txd *txdp_last;
  1218. struct vxge_hw_fifo_txd *txdp_first;
  1219. struct __vxge_hw_channel *channel;
  1220. channel = &fifo->channel;
  1221. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1222. txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
  1223. txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
  1224. txdp_last->control_0 |=
  1225. VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1226. txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  1227. vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
  1228. __vxge_hw_non_offload_db_post(fifo,
  1229. (u64)txdl_priv->dma_addr,
  1230. txdl_priv->frags - 1,
  1231. fifo->no_snoop_bits);
  1232. fifo->stats->total_posts++;
  1233. fifo->stats->common_stats.usage_cnt++;
  1234. if (fifo->stats->common_stats.usage_max <
  1235. fifo->stats->common_stats.usage_cnt)
  1236. fifo->stats->common_stats.usage_max =
  1237. fifo->stats->common_stats.usage_cnt;
  1238. }
  1239. /**
  1240. * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
  1241. * @fifo: Handle to the fifo object used for non offload send
  1242. * @txdlh: Descriptor handle. Returned by HW.
  1243. * @t_code: Transfer code, as per Titan User Guide,
  1244. * Transmit Descriptor Format.
  1245. * Returned by HW.
  1246. *
  1247. * Retrieve the _next_ completed descriptor.
  1248. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
  1249. * driver of new completed descriptors. After that
  1250. * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
  1251. * completions (the very first completion is passed by HW via
  1252. * vxge_hw_channel_callback_f).
  1253. *
  1254. * Implementation-wise, the driver is free to call
  1255. * vxge_hw_fifo_txdl_next_completed either immediately from inside the
  1256. * channel callback, or in a deferred fashion and separate (from HW)
  1257. * context.
  1258. *
  1259. * Non-zero @t_code means failure to process the descriptor.
  1260. * The failure could happen, for instance, when the link is
  1261. * down, in which case Titan completes the descriptor because it
  1262. * is not able to send the data out.
  1263. *
  1264. * For details please refer to Titan User Guide.
  1265. *
  1266. * Returns: VXGE_HW_OK - success.
  1267. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1268. * are currently available for processing.
  1269. *
  1270. */
  1271. enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
  1272. struct __vxge_hw_fifo *fifo, void **txdlh,
  1273. enum vxge_hw_fifo_tcode *t_code)
  1274. {
  1275. struct __vxge_hw_channel *channel;
  1276. struct vxge_hw_fifo_txd *txdp;
  1277. enum vxge_hw_status status = VXGE_HW_OK;
  1278. channel = &fifo->channel;
  1279. vxge_hw_channel_dtr_try_complete(channel, txdlh);
  1280. txdp = (struct vxge_hw_fifo_txd *)*txdlh;
  1281. if (txdp == NULL) {
  1282. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1283. goto exit;
  1284. }
  1285. /* check whether host owns it */
  1286. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
  1287. vxge_assert(txdp->host_control != 0);
  1288. vxge_hw_channel_dtr_complete(channel);
  1289. *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
  1290. if (fifo->stats->common_stats.usage_cnt > 0)
  1291. fifo->stats->common_stats.usage_cnt--;
  1292. status = VXGE_HW_OK;
  1293. goto exit;
  1294. }
  1295. /* no more completions */
  1296. *txdlh = NULL;
  1297. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1298. exit:
  1299. return status;
  1300. }
  1301. /**
  1302. * vxge_hw_fifo_handle_tcode - Handle transfer code.
  1303. * @fifo: Handle to the fifo object used for non offload send
  1304. * @txdlh: Descriptor handle.
  1305. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1306. * "transfer codes".
  1307. *
  1308. * Handle descriptor's transfer code. The latter comes with each completed
  1309. * descriptor.
  1310. *
  1311. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1312. * VXGE_HW_OK - for success.
  1313. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1314. */
  1315. enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
  1316. void *txdlh,
  1317. enum vxge_hw_fifo_tcode t_code)
  1318. {
  1319. struct __vxge_hw_channel *channel;
  1320. enum vxge_hw_status status = VXGE_HW_OK;
  1321. channel = &fifo->channel;
  1322. if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
  1323. status = VXGE_HW_ERR_INVALID_TCODE;
  1324. goto exit;
  1325. }
  1326. fifo->stats->txd_t_code_err_cnt[t_code]++;
  1327. exit:
  1328. return status;
  1329. }
  1330. /**
  1331. * vxge_hw_fifo_txdl_free - Free descriptor.
  1332. * @fifo: Handle to the fifo object used for non offload send
  1333. * @txdlh: Descriptor handle.
  1334. *
  1335. * Free the reserved descriptor. This operation is "symmetrical" to
  1336. * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
  1337. * lifecycle.
  1338. *
  1339. * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
  1340. * be:
  1341. *
  1342. * - reserved (vxge_hw_fifo_txdl_reserve);
  1343. *
  1344. * - posted (vxge_hw_fifo_txdl_post);
  1345. *
  1346. * - completed (vxge_hw_fifo_txdl_next_completed);
  1347. *
  1348. * - and recycled again (vxge_hw_fifo_txdl_free).
  1349. *
  1350. * For alternative state transitions and more details please refer to
  1351. * the design doc.
  1352. *
  1353. */
  1354. void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
  1355. {
  1356. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1357. u32 max_frags;
  1358. struct __vxge_hw_channel *channel;
  1359. channel = &fifo->channel;
  1360. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
  1361. (struct vxge_hw_fifo_txd *)txdlh);
  1362. max_frags = fifo->config->max_frags;
  1363. vxge_hw_channel_dtr_free(channel, txdlh);
  1364. }
  1365. /**
  1366. * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
  1367. * to MAC address table.
  1368. * @vp: Vpath handle.
  1369. * @macaddr: MAC address to be added for this vpath into the list
  1370. * @macaddr_mask: MAC address mask for macaddr
  1371. * @duplicate_mode: Duplicate MAC address add mode. Please see
  1372. * enum vxge_hw_vpath_mac_addr_add_mode{}
  1373. *
  1374. * Adds the given mac address and mac address mask into the list for this
  1375. * vpath.
  1376. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
  1377. * vxge_hw_vpath_mac_addr_get_next
  1378. *
  1379. */
  1380. enum vxge_hw_status
  1381. vxge_hw_vpath_mac_addr_add(
  1382. struct __vxge_hw_vpath_handle *vp,
  1383. u8 (macaddr)[ETH_ALEN],
  1384. u8 (macaddr_mask)[ETH_ALEN],
  1385. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
  1386. {
  1387. u32 i;
  1388. u64 data1 = 0ULL;
  1389. u64 data2 = 0ULL;
  1390. enum vxge_hw_status status = VXGE_HW_OK;
  1391. if (vp == NULL) {
  1392. status = VXGE_HW_ERR_INVALID_HANDLE;
  1393. goto exit;
  1394. }
  1395. for (i = 0; i < ETH_ALEN; i++) {
  1396. data1 <<= 8;
  1397. data1 |= (u8)macaddr[i];
  1398. data2 <<= 8;
  1399. data2 |= (u8)macaddr_mask[i];
  1400. }
  1401. switch (duplicate_mode) {
  1402. case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
  1403. i = 0;
  1404. break;
  1405. case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
  1406. i = 1;
  1407. break;
  1408. case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
  1409. i = 2;
  1410. break;
  1411. default:
  1412. i = 0;
  1413. break;
  1414. }
  1415. status = __vxge_hw_vpath_rts_table_set(vp,
  1416. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1417. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1418. 0,
  1419. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1420. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
  1421. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
  1422. exit:
  1423. return status;
  1424. }
  1425. /**
  1426. * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
  1427. * from MAC address table.
  1428. * @vp: Vpath handle.
  1429. * @macaddr: First MAC address entry for this vpath in the list
  1430. * @macaddr_mask: MAC address mask for macaddr
  1431. *
  1432. * Returns the first mac address and mac address mask in the list for this
  1433. * vpath.
  1434. * see also: vxge_hw_vpath_mac_addr_get_next
  1435. *
  1436. */
  1437. enum vxge_hw_status
  1438. vxge_hw_vpath_mac_addr_get(
  1439. struct __vxge_hw_vpath_handle *vp,
  1440. u8 (macaddr)[ETH_ALEN],
  1441. u8 (macaddr_mask)[ETH_ALEN])
  1442. {
  1443. u32 i;
  1444. u64 data1 = 0ULL;
  1445. u64 data2 = 0ULL;
  1446. enum vxge_hw_status status = VXGE_HW_OK;
  1447. if (vp == NULL) {
  1448. status = VXGE_HW_ERR_INVALID_HANDLE;
  1449. goto exit;
  1450. }
  1451. status = __vxge_hw_vpath_rts_table_get(vp,
  1452. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1453. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1454. 0, &data1, &data2);
  1455. if (status != VXGE_HW_OK)
  1456. goto exit;
  1457. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1458. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1459. for (i = ETH_ALEN; i > 0; i--) {
  1460. macaddr[i-1] = (u8)(data1 & 0xFF);
  1461. data1 >>= 8;
  1462. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1463. data2 >>= 8;
  1464. }
  1465. exit:
  1466. return status;
  1467. }
  1468. /**
  1469. * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
  1470. * vpath
  1471. * from MAC address table.
  1472. * @vp: Vpath handle.
  1473. * @macaddr: Next MAC address entry for this vpath in the list
  1474. * @macaddr_mask: MAC address mask for macaddr
  1475. *
  1476. * Returns the next mac address and mac address mask in the list for this
  1477. * vpath.
  1478. * see also: vxge_hw_vpath_mac_addr_get
  1479. *
  1480. */
  1481. enum vxge_hw_status
  1482. vxge_hw_vpath_mac_addr_get_next(
  1483. struct __vxge_hw_vpath_handle *vp,
  1484. u8 (macaddr)[ETH_ALEN],
  1485. u8 (macaddr_mask)[ETH_ALEN])
  1486. {
  1487. u32 i;
  1488. u64 data1 = 0ULL;
  1489. u64 data2 = 0ULL;
  1490. enum vxge_hw_status status = VXGE_HW_OK;
  1491. if (vp == NULL) {
  1492. status = VXGE_HW_ERR_INVALID_HANDLE;
  1493. goto exit;
  1494. }
  1495. status = __vxge_hw_vpath_rts_table_get(vp,
  1496. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1497. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1498. 0, &data1, &data2);
  1499. if (status != VXGE_HW_OK)
  1500. goto exit;
  1501. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1502. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1503. for (i = ETH_ALEN; i > 0; i--) {
  1504. macaddr[i-1] = (u8)(data1 & 0xFF);
  1505. data1 >>= 8;
  1506. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1507. data2 >>= 8;
  1508. }
  1509. exit:
  1510. return status;
  1511. }
  1512. /**
  1513. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
  1514. * to MAC address table.
  1515. * @vp: Vpath handle.
  1516. * @macaddr: MAC address to be added for this vpath into the list
  1517. * @macaddr_mask: MAC address mask for macaddr
  1518. *
  1519. * Delete the given mac address and mac address mask into the list for this
  1520. * vpath.
  1521. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
  1522. * vxge_hw_vpath_mac_addr_get_next
  1523. *
  1524. */
  1525. enum vxge_hw_status
  1526. vxge_hw_vpath_mac_addr_delete(
  1527. struct __vxge_hw_vpath_handle *vp,
  1528. u8 (macaddr)[ETH_ALEN],
  1529. u8 (macaddr_mask)[ETH_ALEN])
  1530. {
  1531. u32 i;
  1532. u64 data1 = 0ULL;
  1533. u64 data2 = 0ULL;
  1534. enum vxge_hw_status status = VXGE_HW_OK;
  1535. if (vp == NULL) {
  1536. status = VXGE_HW_ERR_INVALID_HANDLE;
  1537. goto exit;
  1538. }
  1539. for (i = 0; i < ETH_ALEN; i++) {
  1540. data1 <<= 8;
  1541. data1 |= (u8)macaddr[i];
  1542. data2 <<= 8;
  1543. data2 |= (u8)macaddr_mask[i];
  1544. }
  1545. status = __vxge_hw_vpath_rts_table_set(vp,
  1546. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1547. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1548. 0,
  1549. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1550. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
  1551. exit:
  1552. return status;
  1553. }
  1554. /**
  1555. * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
  1556. * to vlan id table.
  1557. * @vp: Vpath handle.
  1558. * @vid: vlan id to be added for this vpath into the list
  1559. *
  1560. * Adds the given vlan id into the list for this vpath.
  1561. * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
  1562. * vxge_hw_vpath_vid_get_next
  1563. *
  1564. */
  1565. enum vxge_hw_status
  1566. vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1567. {
  1568. enum vxge_hw_status status = VXGE_HW_OK;
  1569. if (vp == NULL) {
  1570. status = VXGE_HW_ERR_INVALID_HANDLE;
  1571. goto exit;
  1572. }
  1573. status = __vxge_hw_vpath_rts_table_set(vp,
  1574. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1575. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1576. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1577. exit:
  1578. return status;
  1579. }
  1580. /**
  1581. * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
  1582. * from vlan id table.
  1583. * @vp: Vpath handle.
  1584. * @vid: Buffer to return vlan id
  1585. *
  1586. * Returns the first vlan id in the list for this vpath.
  1587. * see also: vxge_hw_vpath_vid_get_next
  1588. *
  1589. */
  1590. enum vxge_hw_status
  1591. vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
  1592. {
  1593. u64 data;
  1594. enum vxge_hw_status status = VXGE_HW_OK;
  1595. if (vp == NULL) {
  1596. status = VXGE_HW_ERR_INVALID_HANDLE;
  1597. goto exit;
  1598. }
  1599. status = __vxge_hw_vpath_rts_table_get(vp,
  1600. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1601. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1602. 0, vid, &data);
  1603. *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
  1604. exit:
  1605. return status;
  1606. }
  1607. /**
  1608. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  1609. * to vlan id table.
  1610. * @vp: Vpath handle.
  1611. * @vid: vlan id to be added for this vpath into the list
  1612. *
  1613. * Adds the given vlan id into the list for this vpath.
  1614. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
  1615. * vxge_hw_vpath_vid_get_next
  1616. *
  1617. */
  1618. enum vxge_hw_status
  1619. vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1620. {
  1621. enum vxge_hw_status status = VXGE_HW_OK;
  1622. if (vp == NULL) {
  1623. status = VXGE_HW_ERR_INVALID_HANDLE;
  1624. goto exit;
  1625. }
  1626. status = __vxge_hw_vpath_rts_table_set(vp,
  1627. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1628. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1629. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1630. exit:
  1631. return status;
  1632. }
  1633. /**
  1634. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
  1635. * @vp: Vpath handle.
  1636. *
  1637. * Enable promiscuous mode of Titan-e operation.
  1638. *
  1639. * See also: vxge_hw_vpath_promisc_disable().
  1640. */
  1641. enum vxge_hw_status vxge_hw_vpath_promisc_enable(
  1642. struct __vxge_hw_vpath_handle *vp)
  1643. {
  1644. u64 val64;
  1645. struct __vxge_hw_virtualpath *vpath;
  1646. enum vxge_hw_status status = VXGE_HW_OK;
  1647. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1648. status = VXGE_HW_ERR_INVALID_HANDLE;
  1649. goto exit;
  1650. }
  1651. vpath = vp->vpath;
  1652. /* Enable promiscous mode for function 0 only */
  1653. if (!(vpath->hldev->access_rights &
  1654. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
  1655. return VXGE_HW_OK;
  1656. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1657. if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
  1658. val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1659. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1660. VXGE_HW_RXMAC_VCFG0_BCAST_EN |
  1661. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
  1662. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1663. }
  1664. exit:
  1665. return status;
  1666. }
  1667. /**
  1668. * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
  1669. * @vp: Vpath handle.
  1670. *
  1671. * Disable promiscuous mode of Titan-e operation.
  1672. *
  1673. * See also: vxge_hw_vpath_promisc_enable().
  1674. */
  1675. enum vxge_hw_status vxge_hw_vpath_promisc_disable(
  1676. struct __vxge_hw_vpath_handle *vp)
  1677. {
  1678. u64 val64;
  1679. struct __vxge_hw_virtualpath *vpath;
  1680. enum vxge_hw_status status = VXGE_HW_OK;
  1681. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1682. status = VXGE_HW_ERR_INVALID_HANDLE;
  1683. goto exit;
  1684. }
  1685. vpath = vp->vpath;
  1686. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1687. if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
  1688. val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1689. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1690. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
  1691. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1692. }
  1693. exit:
  1694. return status;
  1695. }
  1696. /*
  1697. * vxge_hw_vpath_bcast_enable - Enable broadcast
  1698. * @vp: Vpath handle.
  1699. *
  1700. * Enable receiving broadcasts.
  1701. */
  1702. enum vxge_hw_status vxge_hw_vpath_bcast_enable(
  1703. struct __vxge_hw_vpath_handle *vp)
  1704. {
  1705. u64 val64;
  1706. struct __vxge_hw_virtualpath *vpath;
  1707. enum vxge_hw_status status = VXGE_HW_OK;
  1708. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1709. status = VXGE_HW_ERR_INVALID_HANDLE;
  1710. goto exit;
  1711. }
  1712. vpath = vp->vpath;
  1713. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1714. if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
  1715. val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
  1716. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1717. }
  1718. exit:
  1719. return status;
  1720. }
  1721. /**
  1722. * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
  1723. * @vp: Vpath handle.
  1724. *
  1725. * Enable Titan-e multicast addresses.
  1726. * Returns: VXGE_HW_OK on success.
  1727. *
  1728. */
  1729. enum vxge_hw_status vxge_hw_vpath_mcast_enable(
  1730. struct __vxge_hw_vpath_handle *vp)
  1731. {
  1732. u64 val64;
  1733. struct __vxge_hw_virtualpath *vpath;
  1734. enum vxge_hw_status status = VXGE_HW_OK;
  1735. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1736. status = VXGE_HW_ERR_INVALID_HANDLE;
  1737. goto exit;
  1738. }
  1739. vpath = vp->vpath;
  1740. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1741. if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
  1742. val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1743. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1744. }
  1745. exit:
  1746. return status;
  1747. }
  1748. /**
  1749. * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
  1750. * @vp: Vpath handle.
  1751. *
  1752. * Disable Titan-e multicast addresses.
  1753. * Returns: VXGE_HW_OK - success.
  1754. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
  1755. *
  1756. */
  1757. enum vxge_hw_status
  1758. vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
  1759. {
  1760. u64 val64;
  1761. struct __vxge_hw_virtualpath *vpath;
  1762. enum vxge_hw_status status = VXGE_HW_OK;
  1763. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1764. status = VXGE_HW_ERR_INVALID_HANDLE;
  1765. goto exit;
  1766. }
  1767. vpath = vp->vpath;
  1768. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1769. if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
  1770. val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1771. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1772. }
  1773. exit:
  1774. return status;
  1775. }
  1776. /*
  1777. * vxge_hw_vpath_alarm_process - Process Alarms.
  1778. * @vpath: Virtual Path.
  1779. * @skip_alarms: Do not clear the alarms
  1780. *
  1781. * Process vpath alarms.
  1782. *
  1783. */
  1784. enum vxge_hw_status vxge_hw_vpath_alarm_process(
  1785. struct __vxge_hw_vpath_handle *vp,
  1786. u32 skip_alarms)
  1787. {
  1788. enum vxge_hw_status status = VXGE_HW_OK;
  1789. if (vp == NULL) {
  1790. status = VXGE_HW_ERR_INVALID_HANDLE;
  1791. goto exit;
  1792. }
  1793. status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
  1794. exit:
  1795. return status;
  1796. }
  1797. /**
  1798. * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
  1799. * alrms
  1800. * @vp: Virtual Path handle.
  1801. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
  1802. * interrupts(Can be repeated). If fifo or ring are not enabled
  1803. * the MSIX vector for that should be set to 0
  1804. * @alarm_msix_id: MSIX vector for alarm.
  1805. *
  1806. * This API will associate a given MSIX vector numbers with the four TIM
  1807. * interrupts and alarm interrupt.
  1808. */
  1809. void
  1810. vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
  1811. int alarm_msix_id)
  1812. {
  1813. u64 val64;
  1814. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  1815. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  1816. u32 vp_id = vp->vpath->vp_id;
  1817. val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
  1818. (vp_id * 4) + tim_msix_id[0]) |
  1819. VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
  1820. (vp_id * 4) + tim_msix_id[1]);
  1821. writeq(val64, &vp_reg->interrupt_cfg0);
  1822. writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
  1823. (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
  1824. &vp_reg->interrupt_cfg2);
  1825. if (vpath->hldev->config.intr_mode ==
  1826. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1827. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1828. VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
  1829. 0, 32), &vp_reg->one_shot_vect1_en);
  1830. }
  1831. if (vpath->hldev->config.intr_mode ==
  1832. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1833. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1834. VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
  1835. 0, 32), &vp_reg->one_shot_vect2_en);
  1836. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1837. VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
  1838. 0, 32), &vp_reg->one_shot_vect3_en);
  1839. }
  1840. }
  1841. /**
  1842. * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
  1843. * @vp: Virtual Path handle.
  1844. * @msix_id: MSIX ID
  1845. *
  1846. * The function masks the msix interrupt for the given msix_id
  1847. *
  1848. * Returns: 0,
  1849. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1850. * status.
  1851. * See also:
  1852. */
  1853. void
  1854. vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1855. {
  1856. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1857. __vxge_hw_pio_mem_write32_upper(
  1858. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1859. &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
  1860. }
  1861. /**
  1862. * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  1863. * @vp: Virtual Path handle.
  1864. * @msix_id: MSI ID
  1865. *
  1866. * The function unmasks the msix interrupt for the given msix_id
  1867. *
  1868. * Returns: 0,
  1869. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1870. * status.
  1871. * See also:
  1872. */
  1873. void
  1874. vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1875. {
  1876. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1877. __vxge_hw_pio_mem_write32_upper(
  1878. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1879. &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
  1880. }
  1881. /**
  1882. * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  1883. * @vp: Virtual Path handle.
  1884. *
  1885. * Mask Tx and Rx vpath interrupts.
  1886. *
  1887. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1888. */
  1889. void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1890. {
  1891. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1892. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1893. u64 val64;
  1894. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1895. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1896. tim_int_mask1, vp->vpath->vp_id);
  1897. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1898. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1899. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1900. writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1901. tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
  1902. &hldev->common_reg->tim_int_mask0);
  1903. }
  1904. val64 = readl(&hldev->common_reg->tim_int_mask1);
  1905. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1906. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1907. __vxge_hw_pio_mem_write32_upper(
  1908. (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1909. tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
  1910. &hldev->common_reg->tim_int_mask1);
  1911. }
  1912. }
  1913. /**
  1914. * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
  1915. * @vp: Virtual Path handle.
  1916. *
  1917. * Unmask Tx and Rx vpath interrupts.
  1918. *
  1919. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1920. */
  1921. void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1922. {
  1923. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1924. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1925. u64 val64;
  1926. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1927. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1928. tim_int_mask1, vp->vpath->vp_id);
  1929. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1930. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1931. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1932. writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1933. tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
  1934. &hldev->common_reg->tim_int_mask0);
  1935. }
  1936. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1937. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1938. __vxge_hw_pio_mem_write32_upper(
  1939. (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1940. tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
  1941. &hldev->common_reg->tim_int_mask1);
  1942. }
  1943. }
  1944. /**
  1945. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  1946. * descriptors and process the same.
  1947. * @ring: Handle to the ring object used for receive
  1948. *
  1949. * The function polls the Rx for the completed descriptors and calls
  1950. * the driver via supplied completion callback.
  1951. *
  1952. * Returns: VXGE_HW_OK, if the polling is completed successful.
  1953. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  1954. * descriptors available which are yet to be processed.
  1955. *
  1956. * See also: vxge_hw_vpath_poll_rx()
  1957. */
  1958. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  1959. {
  1960. u8 t_code;
  1961. enum vxge_hw_status status = VXGE_HW_OK;
  1962. void *first_rxdh;
  1963. u64 val64 = 0;
  1964. int new_count = 0;
  1965. ring->cmpl_cnt = 0;
  1966. status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
  1967. if (status == VXGE_HW_OK)
  1968. ring->callback(ring, first_rxdh,
  1969. t_code, ring->channel.userdata);
  1970. if (ring->cmpl_cnt != 0) {
  1971. ring->doorbell_cnt += ring->cmpl_cnt;
  1972. if (ring->doorbell_cnt >= ring->rxds_limit) {
  1973. /*
  1974. * Each RxD is of 4 qwords, update the number of
  1975. * qwords replenished
  1976. */
  1977. new_count = (ring->doorbell_cnt * 4);
  1978. /* For each block add 4 more qwords */
  1979. ring->total_db_cnt += ring->doorbell_cnt;
  1980. if (ring->total_db_cnt >= ring->rxds_per_block) {
  1981. new_count += 4;
  1982. /* Reset total count */
  1983. ring->total_db_cnt %= ring->rxds_per_block;
  1984. }
  1985. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
  1986. &ring->vp_reg->prc_rxd_doorbell);
  1987. val64 =
  1988. readl(&ring->common_reg->titan_general_int_status);
  1989. ring->doorbell_cnt = 0;
  1990. }
  1991. }
  1992. return status;
  1993. }
  1994. /**
  1995. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  1996. * the same.
  1997. * @fifo: Handle to the fifo object used for non offload send
  1998. *
  1999. * The function polls the Tx for the completed descriptors and calls
  2000. * the driver via supplied completion callback.
  2001. *
  2002. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2003. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2004. * descriptors available which are yet to be processed.
  2005. */
  2006. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
  2007. struct sk_buff ***skb_ptr, int nr_skb,
  2008. int *more)
  2009. {
  2010. enum vxge_hw_fifo_tcode t_code;
  2011. void *first_txdlh;
  2012. enum vxge_hw_status status = VXGE_HW_OK;
  2013. struct __vxge_hw_channel *channel;
  2014. channel = &fifo->channel;
  2015. status = vxge_hw_fifo_txdl_next_completed(fifo,
  2016. &first_txdlh, &t_code);
  2017. if (status == VXGE_HW_OK)
  2018. if (fifo->callback(fifo, first_txdlh, t_code,
  2019. channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
  2020. status = VXGE_HW_COMPLETIONS_REMAIN;
  2021. return status;
  2022. }