vxge-traffic.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. ******************************************************************************/
  14. #include <linux/etherdevice.h>
  15. #include "vxge-traffic.h"
  16. #include "vxge-config.h"
  17. #include "vxge-main.h"
  18. /*
  19. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  20. * @vp: Virtual Path handle.
  21. *
  22. * Enable vpath interrupts. The function is to be executed the last in
  23. * vpath initialization sequence.
  24. *
  25. * See also: vxge_hw_vpath_intr_disable()
  26. */
  27. enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  28. {
  29. u64 val64;
  30. struct __vxge_hw_virtualpath *vpath;
  31. struct vxge_hw_vpath_reg __iomem *vp_reg;
  32. enum vxge_hw_status status = VXGE_HW_OK;
  33. if (vp == NULL) {
  34. status = VXGE_HW_ERR_INVALID_HANDLE;
  35. goto exit;
  36. }
  37. vpath = vp->vpath;
  38. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  39. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  40. goto exit;
  41. }
  42. vp_reg = vpath->vp_reg;
  43. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  44. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  45. &vp_reg->general_errors_reg);
  46. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  47. &vp_reg->pci_config_errors_reg);
  48. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  49. &vp_reg->mrpcim_to_vpath_alarm_reg);
  50. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  51. &vp_reg->srpcim_to_vpath_alarm_reg);
  52. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  53. &vp_reg->vpath_ppif_int_status);
  54. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  55. &vp_reg->srpcim_msg_to_vpath_reg);
  56. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  57. &vp_reg->vpath_pcipif_int_status);
  58. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  59. &vp_reg->prc_alarm_reg);
  60. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  61. &vp_reg->wrdma_alarm_status);
  62. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  63. &vp_reg->asic_ntwk_vp_err_reg);
  64. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  65. &vp_reg->xgmac_vp_int_status);
  66. val64 = readq(&vp_reg->vpath_general_int_status);
  67. /* Mask unwanted interrupts */
  68. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  69. &vp_reg->vpath_pcipif_int_mask);
  70. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  71. &vp_reg->srpcim_msg_to_vpath_mask);
  72. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  73. &vp_reg->srpcim_to_vpath_alarm_mask);
  74. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  75. &vp_reg->mrpcim_to_vpath_alarm_mask);
  76. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  77. &vp_reg->pci_config_errors_mask);
  78. /* Unmask the individual interrupts */
  79. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  80. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  81. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  82. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  83. &vp_reg->general_errors_mask);
  84. __vxge_hw_pio_mem_write32_upper(
  85. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  86. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  87. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  88. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  89. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  90. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  91. &vp_reg->kdfcctl_errors_mask);
  92. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  93. __vxge_hw_pio_mem_write32_upper(
  94. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  95. &vp_reg->prc_alarm_mask);
  96. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  97. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  98. if (vpath->hldev->first_vp_id != vpath->vp_id)
  99. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  100. &vp_reg->asic_ntwk_vp_err_mask);
  101. else
  102. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  103. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
  104. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
  105. &vp_reg->asic_ntwk_vp_err_mask);
  106. __vxge_hw_pio_mem_write32_upper(0,
  107. &vp_reg->vpath_general_int_mask);
  108. exit:
  109. return status;
  110. }
  111. /*
  112. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  113. * @vp: Virtual Path handle.
  114. *
  115. * Disable vpath interrupts. The function is to be executed the last in
  116. * vpath initialization sequence.
  117. *
  118. * See also: vxge_hw_vpath_intr_enable()
  119. */
  120. enum vxge_hw_status vxge_hw_vpath_intr_disable(
  121. struct __vxge_hw_vpath_handle *vp)
  122. {
  123. u64 val64;
  124. struct __vxge_hw_virtualpath *vpath;
  125. enum vxge_hw_status status = VXGE_HW_OK;
  126. struct vxge_hw_vpath_reg __iomem *vp_reg;
  127. if (vp == NULL) {
  128. status = VXGE_HW_ERR_INVALID_HANDLE;
  129. goto exit;
  130. }
  131. vpath = vp->vpath;
  132. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  133. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  134. goto exit;
  135. }
  136. vp_reg = vpath->vp_reg;
  137. __vxge_hw_pio_mem_write32_upper(
  138. (u32)VXGE_HW_INTR_MASK_ALL,
  139. &vp_reg->vpath_general_int_mask);
  140. val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
  141. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  142. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  143. &vp_reg->general_errors_mask);
  144. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  145. &vp_reg->pci_config_errors_mask);
  146. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  147. &vp_reg->mrpcim_to_vpath_alarm_mask);
  148. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  149. &vp_reg->srpcim_to_vpath_alarm_mask);
  150. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  151. &vp_reg->vpath_ppif_int_mask);
  152. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  153. &vp_reg->srpcim_msg_to_vpath_mask);
  154. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  155. &vp_reg->vpath_pcipif_int_mask);
  156. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  157. &vp_reg->wrdma_alarm_mask);
  158. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  159. &vp_reg->prc_alarm_mask);
  160. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  161. &vp_reg->xgmac_vp_int_mask);
  162. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  163. &vp_reg->asic_ntwk_vp_err_mask);
  164. exit:
  165. return status;
  166. }
  167. void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
  168. {
  169. struct vxge_hw_vpath_reg __iomem *vp_reg;
  170. struct vxge_hw_vp_config *config;
  171. u64 val64;
  172. if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
  173. return;
  174. vp_reg = fifo->vp_reg;
  175. config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
  176. if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
  177. config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
  178. val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  179. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  180. fifo->tim_tti_cfg1_saved = val64;
  181. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  182. }
  183. }
  184. void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
  185. {
  186. u64 val64 = ring->tim_rti_cfg1_saved;
  187. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  188. ring->tim_rti_cfg1_saved = val64;
  189. writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  190. }
  191. void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
  192. {
  193. u64 val64 = fifo->tim_tti_cfg3_saved;
  194. u64 timer = (fifo->rtimer * 1000) / 272;
  195. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  196. if (timer)
  197. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  198. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
  199. writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  200. /* tti_cfg3_saved is not updated again because it is
  201. * initialized at one place only - init time.
  202. */
  203. }
  204. void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
  205. {
  206. u64 val64 = ring->tim_rti_cfg3_saved;
  207. u64 timer = (ring->rtimer * 1000) / 272;
  208. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  209. if (timer)
  210. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  211. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
  212. writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  213. /* rti_cfg3_saved is not updated again because it is
  214. * initialized at one place only - init time.
  215. */
  216. }
  217. /**
  218. * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  219. * @channeh: Channel for rx or tx handle
  220. * @msix_id: MSIX ID
  221. *
  222. * The function masks the msix interrupt for the given msix_id
  223. *
  224. * Returns: 0
  225. */
  226. void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
  227. {
  228. __vxge_hw_pio_mem_write32_upper(
  229. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  230. &channel->common_reg->set_msix_mask_vect[msix_id%4]);
  231. }
  232. /**
  233. * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
  234. * @channeh: Channel for rx or tx handle
  235. * @msix_id: MSI ID
  236. *
  237. * The function unmasks the msix interrupt for the given msix_id
  238. *
  239. * Returns: 0
  240. */
  241. void
  242. vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
  243. {
  244. __vxge_hw_pio_mem_write32_upper(
  245. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  246. &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
  247. }
  248. /**
  249. * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
  250. * @channel: Channel for rx or tx handle
  251. * @msix_id: MSI ID
  252. *
  253. * The function unmasks the msix interrupt for the given msix_id
  254. * if configured in MSIX oneshot mode
  255. *
  256. * Returns: 0
  257. */
  258. void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
  259. {
  260. __vxge_hw_pio_mem_write32_upper(
  261. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  262. &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  263. }
  264. /**
  265. * vxge_hw_device_set_intr_type - Updates the configuration
  266. * with new interrupt type.
  267. * @hldev: HW device handle.
  268. * @intr_mode: New interrupt type
  269. */
  270. u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
  271. {
  272. if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  273. (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  274. (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  275. (intr_mode != VXGE_HW_INTR_MODE_DEF))
  276. intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  277. hldev->config.intr_mode = intr_mode;
  278. return intr_mode;
  279. }
  280. /**
  281. * vxge_hw_device_intr_enable - Enable interrupts.
  282. * @hldev: HW device handle.
  283. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  284. * the type(s) of interrupts to enable.
  285. *
  286. * Enable Titan interrupts. The function is to be executed the last in
  287. * Titan initialization sequence.
  288. *
  289. * See also: vxge_hw_device_intr_disable()
  290. */
  291. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  292. {
  293. u32 i;
  294. u64 val64;
  295. u32 val32;
  296. vxge_hw_device_mask_all(hldev);
  297. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  298. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  299. continue;
  300. vxge_hw_vpath_intr_enable(
  301. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  302. }
  303. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
  304. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  305. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  306. if (val64 != 0) {
  307. writeq(val64, &hldev->common_reg->tim_int_status0);
  308. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  309. }
  310. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  311. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  312. if (val32 != 0) {
  313. __vxge_hw_pio_mem_write32_upper(val32,
  314. &hldev->common_reg->tim_int_status1);
  315. __vxge_hw_pio_mem_write32_upper(~val32,
  316. &hldev->common_reg->tim_int_mask1);
  317. }
  318. }
  319. val64 = readq(&hldev->common_reg->titan_general_int_status);
  320. vxge_hw_device_unmask_all(hldev);
  321. }
  322. /**
  323. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  324. * @hldev: HW device handle.
  325. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  326. * the type(s) of interrupts to disable.
  327. *
  328. * Disable Titan interrupts.
  329. *
  330. * See also: vxge_hw_device_intr_enable()
  331. */
  332. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  333. {
  334. u32 i;
  335. vxge_hw_device_mask_all(hldev);
  336. /* mask all the tim interrupts */
  337. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  338. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  339. &hldev->common_reg->tim_int_mask1);
  340. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  341. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  342. continue;
  343. vxge_hw_vpath_intr_disable(
  344. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  345. }
  346. }
  347. /**
  348. * vxge_hw_device_mask_all - Mask all device interrupts.
  349. * @hldev: HW device handle.
  350. *
  351. * Mask all device interrupts.
  352. *
  353. * See also: vxge_hw_device_unmask_all()
  354. */
  355. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  356. {
  357. u64 val64;
  358. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  359. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  360. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  361. &hldev->common_reg->titan_mask_all_int);
  362. }
  363. /**
  364. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  365. * @hldev: HW device handle.
  366. *
  367. * Unmask all device interrupts.
  368. *
  369. * See also: vxge_hw_device_mask_all()
  370. */
  371. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  372. {
  373. u64 val64 = 0;
  374. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
  375. val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  376. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  377. &hldev->common_reg->titan_mask_all_int);
  378. }
  379. /**
  380. * vxge_hw_device_flush_io - Flush io writes.
  381. * @hldev: HW device handle.
  382. *
  383. * The function performs a read operation to flush io writes.
  384. *
  385. * Returns: void
  386. */
  387. void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
  388. {
  389. u32 val32;
  390. val32 = readl(&hldev->common_reg->titan_general_int_status);
  391. }
  392. /**
  393. * __vxge_hw_device_handle_error - Handle error
  394. * @hldev: HW device
  395. * @vp_id: Vpath Id
  396. * @type: Error type. Please see enum vxge_hw_event{}
  397. *
  398. * Handle error.
  399. */
  400. static enum vxge_hw_status
  401. __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
  402. enum vxge_hw_event type)
  403. {
  404. switch (type) {
  405. case VXGE_HW_EVENT_UNKNOWN:
  406. break;
  407. case VXGE_HW_EVENT_RESET_START:
  408. case VXGE_HW_EVENT_RESET_COMPLETE:
  409. case VXGE_HW_EVENT_LINK_DOWN:
  410. case VXGE_HW_EVENT_LINK_UP:
  411. goto out;
  412. case VXGE_HW_EVENT_ALARM_CLEARED:
  413. goto out;
  414. case VXGE_HW_EVENT_ECCERR:
  415. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  416. goto out;
  417. case VXGE_HW_EVENT_FIFO_ERR:
  418. case VXGE_HW_EVENT_VPATH_ERR:
  419. case VXGE_HW_EVENT_CRITICAL_ERR:
  420. case VXGE_HW_EVENT_SERR:
  421. break;
  422. case VXGE_HW_EVENT_SRPCIM_SERR:
  423. case VXGE_HW_EVENT_MRPCIM_SERR:
  424. goto out;
  425. case VXGE_HW_EVENT_SLOT_FREEZE:
  426. break;
  427. default:
  428. vxge_assert(0);
  429. goto out;
  430. }
  431. /* notify driver */
  432. if (hldev->uld_callbacks.crit_err)
  433. hldev->uld_callbacks.crit_err(
  434. (struct __vxge_hw_device *)hldev,
  435. type, vp_id);
  436. out:
  437. return VXGE_HW_OK;
  438. }
  439. /*
  440. * __vxge_hw_device_handle_link_down_ind
  441. * @hldev: HW device handle.
  442. *
  443. * Link down indication handler. The function is invoked by HW when
  444. * Titan indicates that the link is down.
  445. */
  446. static enum vxge_hw_status
  447. __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
  448. {
  449. /*
  450. * If the previous link state is not down, return.
  451. */
  452. if (hldev->link_state == VXGE_HW_LINK_DOWN)
  453. goto exit;
  454. hldev->link_state = VXGE_HW_LINK_DOWN;
  455. /* notify driver */
  456. if (hldev->uld_callbacks.link_down)
  457. hldev->uld_callbacks.link_down(hldev);
  458. exit:
  459. return VXGE_HW_OK;
  460. }
  461. /*
  462. * __vxge_hw_device_handle_link_up_ind
  463. * @hldev: HW device handle.
  464. *
  465. * Link up indication handler. The function is invoked by HW when
  466. * Titan indicates that the link is up for programmable amount of time.
  467. */
  468. static enum vxge_hw_status
  469. __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
  470. {
  471. /*
  472. * If the previous link state is not down, return.
  473. */
  474. if (hldev->link_state == VXGE_HW_LINK_UP)
  475. goto exit;
  476. hldev->link_state = VXGE_HW_LINK_UP;
  477. /* notify driver */
  478. if (hldev->uld_callbacks.link_up)
  479. hldev->uld_callbacks.link_up(hldev);
  480. exit:
  481. return VXGE_HW_OK;
  482. }
  483. /*
  484. * __vxge_hw_vpath_alarm_process - Process Alarms.
  485. * @vpath: Virtual Path.
  486. * @skip_alarms: Do not clear the alarms
  487. *
  488. * Process vpath alarms.
  489. *
  490. */
  491. static enum vxge_hw_status
  492. __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
  493. u32 skip_alarms)
  494. {
  495. u64 val64;
  496. u64 alarm_status;
  497. u64 pic_status;
  498. struct __vxge_hw_device *hldev = NULL;
  499. enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
  500. u64 mask64;
  501. struct vxge_hw_vpath_stats_sw_info *sw_stats;
  502. struct vxge_hw_vpath_reg __iomem *vp_reg;
  503. if (vpath == NULL) {
  504. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  505. alarm_event);
  506. goto out2;
  507. }
  508. hldev = vpath->hldev;
  509. vp_reg = vpath->vp_reg;
  510. alarm_status = readq(&vp_reg->vpath_general_int_status);
  511. if (alarm_status == VXGE_HW_ALL_FOXES) {
  512. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
  513. alarm_event);
  514. goto out;
  515. }
  516. sw_stats = vpath->sw_stats;
  517. if (alarm_status & ~(
  518. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  519. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  520. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  521. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  522. sw_stats->error_stats.unknown_alarms++;
  523. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  524. alarm_event);
  525. goto out;
  526. }
  527. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  528. val64 = readq(&vp_reg->xgmac_vp_int_status);
  529. if (val64 &
  530. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  531. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  532. if (((val64 &
  533. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  534. (!(val64 &
  535. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  536. ((val64 &
  537. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
  538. (!(val64 &
  539. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  540. ))) {
  541. sw_stats->error_stats.network_sustained_fault++;
  542. writeq(
  543. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  544. &vp_reg->asic_ntwk_vp_err_mask);
  545. __vxge_hw_device_handle_link_down_ind(hldev);
  546. alarm_event = VXGE_HW_SET_LEVEL(
  547. VXGE_HW_EVENT_LINK_DOWN, alarm_event);
  548. }
  549. if (((val64 &
  550. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  551. (!(val64 &
  552. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  553. ((val64 &
  554. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
  555. (!(val64 &
  556. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  557. ))) {
  558. sw_stats->error_stats.network_sustained_ok++;
  559. writeq(
  560. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  561. &vp_reg->asic_ntwk_vp_err_mask);
  562. __vxge_hw_device_handle_link_up_ind(hldev);
  563. alarm_event = VXGE_HW_SET_LEVEL(
  564. VXGE_HW_EVENT_LINK_UP, alarm_event);
  565. }
  566. writeq(VXGE_HW_INTR_MASK_ALL,
  567. &vp_reg->asic_ntwk_vp_err_reg);
  568. alarm_event = VXGE_HW_SET_LEVEL(
  569. VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
  570. if (skip_alarms)
  571. return VXGE_HW_OK;
  572. }
  573. }
  574. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
  575. pic_status = readq(&vp_reg->vpath_ppif_int_status);
  576. if (pic_status &
  577. VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
  578. val64 = readq(&vp_reg->general_errors_reg);
  579. mask64 = readq(&vp_reg->general_errors_mask);
  580. if ((val64 &
  581. VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
  582. ~mask64) {
  583. sw_stats->error_stats.ini_serr_det++;
  584. alarm_event = VXGE_HW_SET_LEVEL(
  585. VXGE_HW_EVENT_SERR, alarm_event);
  586. }
  587. if ((val64 &
  588. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
  589. ~mask64) {
  590. sw_stats->error_stats.dblgen_fifo0_overflow++;
  591. alarm_event = VXGE_HW_SET_LEVEL(
  592. VXGE_HW_EVENT_FIFO_ERR, alarm_event);
  593. }
  594. if ((val64 &
  595. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
  596. ~mask64)
  597. sw_stats->error_stats.statsb_pif_chain_error++;
  598. if ((val64 &
  599. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
  600. ~mask64)
  601. sw_stats->error_stats.statsb_drop_timeout++;
  602. if ((val64 &
  603. VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
  604. ~mask64)
  605. sw_stats->error_stats.target_illegal_access++;
  606. if (!skip_alarms) {
  607. writeq(VXGE_HW_INTR_MASK_ALL,
  608. &vp_reg->general_errors_reg);
  609. alarm_event = VXGE_HW_SET_LEVEL(
  610. VXGE_HW_EVENT_ALARM_CLEARED,
  611. alarm_event);
  612. }
  613. }
  614. if (pic_status &
  615. VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
  616. val64 = readq(&vp_reg->kdfcctl_errors_reg);
  617. mask64 = readq(&vp_reg->kdfcctl_errors_mask);
  618. if ((val64 &
  619. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
  620. ~mask64) {
  621. sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
  622. alarm_event = VXGE_HW_SET_LEVEL(
  623. VXGE_HW_EVENT_FIFO_ERR,
  624. alarm_event);
  625. }
  626. if ((val64 &
  627. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
  628. ~mask64) {
  629. sw_stats->error_stats.kdfcctl_fifo0_poison++;
  630. alarm_event = VXGE_HW_SET_LEVEL(
  631. VXGE_HW_EVENT_FIFO_ERR,
  632. alarm_event);
  633. }
  634. if ((val64 &
  635. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
  636. ~mask64) {
  637. sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
  638. alarm_event = VXGE_HW_SET_LEVEL(
  639. VXGE_HW_EVENT_FIFO_ERR,
  640. alarm_event);
  641. }
  642. if (!skip_alarms) {
  643. writeq(VXGE_HW_INTR_MASK_ALL,
  644. &vp_reg->kdfcctl_errors_reg);
  645. alarm_event = VXGE_HW_SET_LEVEL(
  646. VXGE_HW_EVENT_ALARM_CLEARED,
  647. alarm_event);
  648. }
  649. }
  650. }
  651. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
  652. val64 = readq(&vp_reg->wrdma_alarm_status);
  653. if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
  654. val64 = readq(&vp_reg->prc_alarm_reg);
  655. mask64 = readq(&vp_reg->prc_alarm_mask);
  656. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
  657. ~mask64)
  658. sw_stats->error_stats.prc_ring_bumps++;
  659. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
  660. ~mask64) {
  661. sw_stats->error_stats.prc_rxdcm_sc_err++;
  662. alarm_event = VXGE_HW_SET_LEVEL(
  663. VXGE_HW_EVENT_VPATH_ERR,
  664. alarm_event);
  665. }
  666. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
  667. & ~mask64) {
  668. sw_stats->error_stats.prc_rxdcm_sc_abort++;
  669. alarm_event = VXGE_HW_SET_LEVEL(
  670. VXGE_HW_EVENT_VPATH_ERR,
  671. alarm_event);
  672. }
  673. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
  674. & ~mask64) {
  675. sw_stats->error_stats.prc_quanta_size_err++;
  676. alarm_event = VXGE_HW_SET_LEVEL(
  677. VXGE_HW_EVENT_VPATH_ERR,
  678. alarm_event);
  679. }
  680. if (!skip_alarms) {
  681. writeq(VXGE_HW_INTR_MASK_ALL,
  682. &vp_reg->prc_alarm_reg);
  683. alarm_event = VXGE_HW_SET_LEVEL(
  684. VXGE_HW_EVENT_ALARM_CLEARED,
  685. alarm_event);
  686. }
  687. }
  688. }
  689. out:
  690. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  691. out2:
  692. if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
  693. (alarm_event == VXGE_HW_EVENT_UNKNOWN))
  694. return VXGE_HW_OK;
  695. __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
  696. if (alarm_event == VXGE_HW_EVENT_SERR)
  697. return VXGE_HW_ERR_CRITICAL;
  698. return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
  699. VXGE_HW_ERR_SLOT_FREEZE :
  700. (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
  701. VXGE_HW_ERR_VPATH;
  702. }
  703. /**
  704. * vxge_hw_device_begin_irq - Begin IRQ processing.
  705. * @hldev: HW device handle.
  706. * @skip_alarms: Do not clear the alarms
  707. * @reason: "Reason" for the interrupt, the value of Titan's
  708. * general_int_status register.
  709. *
  710. * The function performs two actions, It first checks whether (shared IRQ) the
  711. * interrupt was raised by the device. Next, it masks the device interrupts.
  712. *
  713. * Note:
  714. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  715. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  716. *
  717. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  718. * device remain enabled).
  719. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  720. * status.
  721. */
  722. enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
  723. u32 skip_alarms, u64 *reason)
  724. {
  725. u32 i;
  726. u64 val64;
  727. u64 adapter_status;
  728. u64 vpath_mask;
  729. enum vxge_hw_status ret = VXGE_HW_OK;
  730. val64 = readq(&hldev->common_reg->titan_general_int_status);
  731. if (unlikely(!val64)) {
  732. /* not Titan interrupt */
  733. *reason = 0;
  734. ret = VXGE_HW_ERR_WRONG_IRQ;
  735. goto exit;
  736. }
  737. if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
  738. adapter_status = readq(&hldev->common_reg->adapter_status);
  739. if (adapter_status == VXGE_HW_ALL_FOXES) {
  740. __vxge_hw_device_handle_error(hldev,
  741. NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
  742. *reason = 0;
  743. ret = VXGE_HW_ERR_SLOT_FREEZE;
  744. goto exit;
  745. }
  746. }
  747. hldev->stats.sw_dev_info_stats.total_intr_cnt++;
  748. *reason = val64;
  749. vpath_mask = hldev->vpaths_deployed >>
  750. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  751. if (val64 &
  752. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
  753. hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
  754. return VXGE_HW_OK;
  755. }
  756. hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
  757. if (unlikely(val64 &
  758. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
  759. enum vxge_hw_status error_level = VXGE_HW_OK;
  760. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  761. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  762. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  763. continue;
  764. ret = __vxge_hw_vpath_alarm_process(
  765. &hldev->virtual_paths[i], skip_alarms);
  766. error_level = VXGE_HW_SET_LEVEL(ret, error_level);
  767. if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
  768. (ret == VXGE_HW_ERR_SLOT_FREEZE)))
  769. break;
  770. }
  771. ret = error_level;
  772. }
  773. exit:
  774. return ret;
  775. }
  776. /**
  777. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  778. * condition that has caused the Tx and RX interrupt.
  779. * @hldev: HW device.
  780. *
  781. * Acknowledge (that is, clear) the condition that has caused
  782. * the Tx and Rx interrupt.
  783. * See also: vxge_hw_device_begin_irq(),
  784. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  785. */
  786. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  787. {
  788. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  789. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  790. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  791. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  792. &hldev->common_reg->tim_int_status0);
  793. }
  794. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  795. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  796. __vxge_hw_pio_mem_write32_upper(
  797. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  798. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  799. &hldev->common_reg->tim_int_status1);
  800. }
  801. }
  802. /*
  803. * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
  804. * @channel: Channel
  805. * @dtrh: Buffer to return the DTR pointer
  806. *
  807. * Allocates a dtr from the reserve array. If the reserve array is empty,
  808. * it swaps the reserve and free arrays.
  809. *
  810. */
  811. static enum vxge_hw_status
  812. vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
  813. {
  814. void **tmp_arr;
  815. if (channel->reserve_ptr - channel->reserve_top > 0) {
  816. _alloc_after_swap:
  817. *dtrh = channel->reserve_arr[--channel->reserve_ptr];
  818. return VXGE_HW_OK;
  819. }
  820. /* switch between empty and full arrays */
  821. /* the idea behind such a design is that by having free and reserved
  822. * arrays separated we basically separated irq and non-irq parts.
  823. * i.e. no additional lock need to be done when we free a resource */
  824. if (channel->length - channel->free_ptr > 0) {
  825. tmp_arr = channel->reserve_arr;
  826. channel->reserve_arr = channel->free_arr;
  827. channel->free_arr = tmp_arr;
  828. channel->reserve_ptr = channel->length;
  829. channel->reserve_top = channel->free_ptr;
  830. channel->free_ptr = channel->length;
  831. channel->stats->reserve_free_swaps_cnt++;
  832. goto _alloc_after_swap;
  833. }
  834. channel->stats->full_cnt++;
  835. *dtrh = NULL;
  836. return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
  837. }
  838. /*
  839. * vxge_hw_channel_dtr_post - Post a dtr to the channel
  840. * @channelh: Channel
  841. * @dtrh: DTR pointer
  842. *
  843. * Posts a dtr to work array.
  844. *
  845. */
  846. static void
  847. vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
  848. {
  849. vxge_assert(channel->work_arr[channel->post_index] == NULL);
  850. channel->work_arr[channel->post_index++] = dtrh;
  851. /* wrap-around */
  852. if (channel->post_index == channel->length)
  853. channel->post_index = 0;
  854. }
  855. /*
  856. * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
  857. * @channel: Channel
  858. * @dtr: Buffer to return the next completed DTR pointer
  859. *
  860. * Returns the next completed dtr with out removing it from work array
  861. *
  862. */
  863. void
  864. vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
  865. {
  866. vxge_assert(channel->compl_index < channel->length);
  867. *dtrh = channel->work_arr[channel->compl_index];
  868. prefetch(*dtrh);
  869. }
  870. /*
  871. * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
  872. * @channel: Channel handle
  873. *
  874. * Removes the next completed dtr from work array
  875. *
  876. */
  877. void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
  878. {
  879. channel->work_arr[channel->compl_index] = NULL;
  880. /* wrap-around */
  881. if (++channel->compl_index == channel->length)
  882. channel->compl_index = 0;
  883. channel->stats->total_compl_cnt++;
  884. }
  885. /*
  886. * vxge_hw_channel_dtr_free - Frees a dtr
  887. * @channel: Channel handle
  888. * @dtr: DTR pointer
  889. *
  890. * Returns the dtr to free array
  891. *
  892. */
  893. void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
  894. {
  895. channel->free_arr[--channel->free_ptr] = dtrh;
  896. }
  897. /*
  898. * vxge_hw_channel_dtr_count
  899. * @channel: Channel handle. Obtained via vxge_hw_channel_open().
  900. *
  901. * Retreive number of DTRs available. This function can not be called
  902. * from data path. ring_initial_replenishi() is the only user.
  903. */
  904. int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
  905. {
  906. return (channel->reserve_ptr - channel->reserve_top) +
  907. (channel->length - channel->free_ptr);
  908. }
  909. /**
  910. * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
  911. * @ring: Handle to the ring object used for receive
  912. * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
  913. * with a valid handle.
  914. *
  915. * Reserve Rx descriptor for the subsequent filling-in driver
  916. * and posting on the corresponding channel (@channelh)
  917. * via vxge_hw_ring_rxd_post().
  918. *
  919. * Returns: VXGE_HW_OK - success.
  920. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
  921. *
  922. */
  923. enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
  924. void **rxdh)
  925. {
  926. enum vxge_hw_status status;
  927. struct __vxge_hw_channel *channel;
  928. channel = &ring->channel;
  929. status = vxge_hw_channel_dtr_alloc(channel, rxdh);
  930. if (status == VXGE_HW_OK) {
  931. struct vxge_hw_ring_rxd_1 *rxdp =
  932. (struct vxge_hw_ring_rxd_1 *)*rxdh;
  933. rxdp->control_0 = rxdp->control_1 = 0;
  934. }
  935. return status;
  936. }
  937. /**
  938. * vxge_hw_ring_rxd_free - Free descriptor.
  939. * @ring: Handle to the ring object used for receive
  940. * @rxdh: Descriptor handle.
  941. *
  942. * Free the reserved descriptor. This operation is "symmetrical" to
  943. * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
  944. * lifecycle.
  945. *
  946. * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
  947. * be:
  948. *
  949. * - reserved (vxge_hw_ring_rxd_reserve);
  950. *
  951. * - posted (vxge_hw_ring_rxd_post);
  952. *
  953. * - completed (vxge_hw_ring_rxd_next_completed);
  954. *
  955. * - and recycled again (vxge_hw_ring_rxd_free).
  956. *
  957. * For alternative state transitions and more details please refer to
  958. * the design doc.
  959. *
  960. */
  961. void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
  962. {
  963. struct __vxge_hw_channel *channel;
  964. channel = &ring->channel;
  965. vxge_hw_channel_dtr_free(channel, rxdh);
  966. }
  967. /**
  968. * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
  969. * @ring: Handle to the ring object used for receive
  970. * @rxdh: Descriptor handle.
  971. *
  972. * This routine prepares a rxd and posts
  973. */
  974. void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
  975. {
  976. struct __vxge_hw_channel *channel;
  977. channel = &ring->channel;
  978. vxge_hw_channel_dtr_post(channel, rxdh);
  979. }
  980. /**
  981. * vxge_hw_ring_rxd_post_post - Process rxd after post.
  982. * @ring: Handle to the ring object used for receive
  983. * @rxdh: Descriptor handle.
  984. *
  985. * Processes rxd after post
  986. */
  987. void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
  988. {
  989. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  990. struct __vxge_hw_channel *channel;
  991. channel = &ring->channel;
  992. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  993. if (ring->stats->common_stats.usage_cnt > 0)
  994. ring->stats->common_stats.usage_cnt--;
  995. }
  996. /**
  997. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  998. * @ring: Handle to the ring object used for receive
  999. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  1000. *
  1001. * Post descriptor on the ring.
  1002. * Prior to posting the descriptor should be filled in accordance with
  1003. * Host/Titan interface specification for a given service (LL, etc.).
  1004. *
  1005. */
  1006. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  1007. {
  1008. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  1009. struct __vxge_hw_channel *channel;
  1010. channel = &ring->channel;
  1011. wmb();
  1012. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1013. vxge_hw_channel_dtr_post(channel, rxdh);
  1014. if (ring->stats->common_stats.usage_cnt > 0)
  1015. ring->stats->common_stats.usage_cnt--;
  1016. }
  1017. /**
  1018. * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
  1019. * @ring: Handle to the ring object used for receive
  1020. * @rxdh: Descriptor handle.
  1021. *
  1022. * Processes rxd after post with memory barrier.
  1023. */
  1024. void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
  1025. {
  1026. wmb();
  1027. vxge_hw_ring_rxd_post_post(ring, rxdh);
  1028. }
  1029. /**
  1030. * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
  1031. * @ring: Handle to the ring object used for receive
  1032. * @rxdh: Descriptor handle. Returned by HW.
  1033. * @t_code: Transfer code, as per Titan User Guide,
  1034. * Receive Descriptor Format. Returned by HW.
  1035. *
  1036. * Retrieve the _next_ completed descriptor.
  1037. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
  1038. * driver of new completed descriptors. After that
  1039. * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
  1040. * completions (the very first completion is passed by HW via
  1041. * vxge_hw_ring_callback_f).
  1042. *
  1043. * Implementation-wise, the driver is free to call
  1044. * vxge_hw_ring_rxd_next_completed either immediately from inside the
  1045. * ring callback, or in a deferred fashion and separate (from HW)
  1046. * context.
  1047. *
  1048. * Non-zero @t_code means failure to fill-in receive buffer(s)
  1049. * of the descriptor.
  1050. * For instance, parity error detected during the data transfer.
  1051. * In this case Titan will complete the descriptor and indicate
  1052. * for the host that the received data is not to be used.
  1053. * For details please refer to Titan User Guide.
  1054. *
  1055. * Returns: VXGE_HW_OK - success.
  1056. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1057. * are currently available for processing.
  1058. *
  1059. * See also: vxge_hw_ring_callback_f{},
  1060. * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
  1061. */
  1062. enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
  1063. struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
  1064. {
  1065. struct __vxge_hw_channel *channel;
  1066. struct vxge_hw_ring_rxd_1 *rxdp;
  1067. enum vxge_hw_status status = VXGE_HW_OK;
  1068. u64 control_0, own;
  1069. channel = &ring->channel;
  1070. vxge_hw_channel_dtr_try_complete(channel, rxdh);
  1071. rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
  1072. if (rxdp == NULL) {
  1073. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1074. goto exit;
  1075. }
  1076. control_0 = rxdp->control_0;
  1077. own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1078. *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
  1079. /* check whether it is not the end */
  1080. if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
  1081. vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
  1082. 0);
  1083. ++ring->cmpl_cnt;
  1084. vxge_hw_channel_dtr_complete(channel);
  1085. vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
  1086. ring->stats->common_stats.usage_cnt++;
  1087. if (ring->stats->common_stats.usage_max <
  1088. ring->stats->common_stats.usage_cnt)
  1089. ring->stats->common_stats.usage_max =
  1090. ring->stats->common_stats.usage_cnt;
  1091. status = VXGE_HW_OK;
  1092. goto exit;
  1093. }
  1094. /* reset it. since we don't want to return
  1095. * garbage to the driver */
  1096. *rxdh = NULL;
  1097. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1098. exit:
  1099. return status;
  1100. }
  1101. /**
  1102. * vxge_hw_ring_handle_tcode - Handle transfer code.
  1103. * @ring: Handle to the ring object used for receive
  1104. * @rxdh: Descriptor handle.
  1105. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1106. * "transfer codes".
  1107. *
  1108. * Handle descriptor's transfer code. The latter comes with each completed
  1109. * descriptor.
  1110. *
  1111. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1112. * VXGE_HW_OK - for success.
  1113. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1114. */
  1115. enum vxge_hw_status vxge_hw_ring_handle_tcode(
  1116. struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
  1117. {
  1118. struct __vxge_hw_channel *channel;
  1119. enum vxge_hw_status status = VXGE_HW_OK;
  1120. channel = &ring->channel;
  1121. /* If the t_code is not supported and if the
  1122. * t_code is other than 0x5 (unparseable packet
  1123. * such as unknown UPV6 header), Drop it !!!
  1124. */
  1125. if (t_code == VXGE_HW_RING_T_CODE_OK ||
  1126. t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
  1127. status = VXGE_HW_OK;
  1128. goto exit;
  1129. }
  1130. if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
  1131. status = VXGE_HW_ERR_INVALID_TCODE;
  1132. goto exit;
  1133. }
  1134. ring->stats->rxd_t_code_err_cnt[t_code]++;
  1135. exit:
  1136. return status;
  1137. }
  1138. /**
  1139. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  1140. *
  1141. * @fifo: fifohandle
  1142. * @txdl_ptr: The starting location of the TxDL in host memory
  1143. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  1144. * @no_snoop: No snoop flags
  1145. *
  1146. * This function posts a non-offload doorbell to doorbell FIFO
  1147. *
  1148. */
  1149. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  1150. u64 txdl_ptr, u32 num_txds, u32 no_snoop)
  1151. {
  1152. struct __vxge_hw_channel *channel;
  1153. channel = &fifo->channel;
  1154. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  1155. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
  1156. VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
  1157. &fifo->nofl_db->control_0);
  1158. mmiowb();
  1159. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  1160. mmiowb();
  1161. }
  1162. /**
  1163. * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
  1164. * the fifo
  1165. * @fifoh: Handle to the fifo object used for non offload send
  1166. */
  1167. u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
  1168. {
  1169. return vxge_hw_channel_dtr_count(&fifoh->channel);
  1170. }
  1171. /**
  1172. * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
  1173. * @fifoh: Handle to the fifo object used for non offload send
  1174. * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
  1175. * with a valid handle.
  1176. * @txdl_priv: Buffer to return the pointer to per txdl space
  1177. *
  1178. * Reserve a single TxDL (that is, fifo descriptor)
  1179. * for the subsequent filling-in by driver)
  1180. * and posting on the corresponding channel (@channelh)
  1181. * via vxge_hw_fifo_txdl_post().
  1182. *
  1183. * Note: it is the responsibility of driver to reserve multiple descriptors
  1184. * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
  1185. * carries up to configured number (fifo.max_frags) of contiguous buffers.
  1186. *
  1187. * Returns: VXGE_HW_OK - success;
  1188. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
  1189. *
  1190. */
  1191. enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
  1192. struct __vxge_hw_fifo *fifo,
  1193. void **txdlh, void **txdl_priv)
  1194. {
  1195. struct __vxge_hw_channel *channel;
  1196. enum vxge_hw_status status;
  1197. int i;
  1198. channel = &fifo->channel;
  1199. status = vxge_hw_channel_dtr_alloc(channel, txdlh);
  1200. if (status == VXGE_HW_OK) {
  1201. struct vxge_hw_fifo_txd *txdp =
  1202. (struct vxge_hw_fifo_txd *)*txdlh;
  1203. struct __vxge_hw_fifo_txdl_priv *priv;
  1204. priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  1205. /* reset the TxDL's private */
  1206. priv->align_dma_offset = 0;
  1207. priv->align_vaddr_start = priv->align_vaddr;
  1208. priv->align_used_frags = 0;
  1209. priv->frags = 0;
  1210. priv->alloc_frags = fifo->config->max_frags;
  1211. priv->next_txdl_priv = NULL;
  1212. *txdl_priv = (void *)(size_t)txdp->host_control;
  1213. for (i = 0; i < fifo->config->max_frags; i++) {
  1214. txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
  1215. txdp->control_0 = txdp->control_1 = 0;
  1216. }
  1217. }
  1218. return status;
  1219. }
  1220. /**
  1221. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  1222. * descriptor.
  1223. * @fifo: Handle to the fifo object used for non offload send
  1224. * @txdlh: Descriptor handle.
  1225. * @frag_idx: Index of the data buffer in the caller's scatter-gather list
  1226. * (of buffers).
  1227. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
  1228. * @size: Size of the data buffer (in bytes).
  1229. *
  1230. * This API is part of the preparation of the transmit descriptor for posting
  1231. * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
  1232. * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
  1233. * All three APIs fill in the fields of the fifo descriptor,
  1234. * in accordance with the Titan specification.
  1235. *
  1236. */
  1237. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  1238. void *txdlh, u32 frag_idx,
  1239. dma_addr_t dma_pointer, u32 size)
  1240. {
  1241. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1242. struct vxge_hw_fifo_txd *txdp, *txdp_last;
  1243. struct __vxge_hw_channel *channel;
  1244. channel = &fifo->channel;
  1245. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1246. txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
  1247. if (frag_idx != 0)
  1248. txdp->control_0 = txdp->control_1 = 0;
  1249. else {
  1250. txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1251. VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
  1252. txdp->control_1 |= fifo->interrupt_type;
  1253. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
  1254. fifo->tx_intr_num);
  1255. if (txdl_priv->frags) {
  1256. txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
  1257. (txdl_priv->frags - 1);
  1258. txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1259. VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1260. }
  1261. }
  1262. vxge_assert(frag_idx < txdl_priv->alloc_frags);
  1263. txdp->buffer_pointer = (u64)dma_pointer;
  1264. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
  1265. fifo->stats->total_buffers++;
  1266. txdl_priv->frags++;
  1267. }
  1268. /**
  1269. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  1270. * @fifo: Handle to the fifo object used for non offload send
  1271. * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
  1272. * @frags: Number of contiguous buffers that are part of a single
  1273. * transmit operation.
  1274. *
  1275. * Post descriptor on the 'fifo' type channel for transmission.
  1276. * Prior to posting the descriptor should be filled in accordance with
  1277. * Host/Titan interface specification for a given service (LL, etc.).
  1278. *
  1279. */
  1280. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
  1281. {
  1282. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1283. struct vxge_hw_fifo_txd *txdp_last;
  1284. struct vxge_hw_fifo_txd *txdp_first;
  1285. struct __vxge_hw_channel *channel;
  1286. channel = &fifo->channel;
  1287. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1288. txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
  1289. txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
  1290. txdp_last->control_0 |=
  1291. VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1292. txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  1293. vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
  1294. __vxge_hw_non_offload_db_post(fifo,
  1295. (u64)txdl_priv->dma_addr,
  1296. txdl_priv->frags - 1,
  1297. fifo->no_snoop_bits);
  1298. fifo->stats->total_posts++;
  1299. fifo->stats->common_stats.usage_cnt++;
  1300. if (fifo->stats->common_stats.usage_max <
  1301. fifo->stats->common_stats.usage_cnt)
  1302. fifo->stats->common_stats.usage_max =
  1303. fifo->stats->common_stats.usage_cnt;
  1304. }
  1305. /**
  1306. * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
  1307. * @fifo: Handle to the fifo object used for non offload send
  1308. * @txdlh: Descriptor handle. Returned by HW.
  1309. * @t_code: Transfer code, as per Titan User Guide,
  1310. * Transmit Descriptor Format.
  1311. * Returned by HW.
  1312. *
  1313. * Retrieve the _next_ completed descriptor.
  1314. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
  1315. * driver of new completed descriptors. After that
  1316. * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
  1317. * completions (the very first completion is passed by HW via
  1318. * vxge_hw_channel_callback_f).
  1319. *
  1320. * Implementation-wise, the driver is free to call
  1321. * vxge_hw_fifo_txdl_next_completed either immediately from inside the
  1322. * channel callback, or in a deferred fashion and separate (from HW)
  1323. * context.
  1324. *
  1325. * Non-zero @t_code means failure to process the descriptor.
  1326. * The failure could happen, for instance, when the link is
  1327. * down, in which case Titan completes the descriptor because it
  1328. * is not able to send the data out.
  1329. *
  1330. * For details please refer to Titan User Guide.
  1331. *
  1332. * Returns: VXGE_HW_OK - success.
  1333. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1334. * are currently available for processing.
  1335. *
  1336. */
  1337. enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
  1338. struct __vxge_hw_fifo *fifo, void **txdlh,
  1339. enum vxge_hw_fifo_tcode *t_code)
  1340. {
  1341. struct __vxge_hw_channel *channel;
  1342. struct vxge_hw_fifo_txd *txdp;
  1343. enum vxge_hw_status status = VXGE_HW_OK;
  1344. channel = &fifo->channel;
  1345. vxge_hw_channel_dtr_try_complete(channel, txdlh);
  1346. txdp = (struct vxge_hw_fifo_txd *)*txdlh;
  1347. if (txdp == NULL) {
  1348. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1349. goto exit;
  1350. }
  1351. /* check whether host owns it */
  1352. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
  1353. vxge_assert(txdp->host_control != 0);
  1354. vxge_hw_channel_dtr_complete(channel);
  1355. *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
  1356. if (fifo->stats->common_stats.usage_cnt > 0)
  1357. fifo->stats->common_stats.usage_cnt--;
  1358. status = VXGE_HW_OK;
  1359. goto exit;
  1360. }
  1361. /* no more completions */
  1362. *txdlh = NULL;
  1363. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1364. exit:
  1365. return status;
  1366. }
  1367. /**
  1368. * vxge_hw_fifo_handle_tcode - Handle transfer code.
  1369. * @fifo: Handle to the fifo object used for non offload send
  1370. * @txdlh: Descriptor handle.
  1371. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1372. * "transfer codes".
  1373. *
  1374. * Handle descriptor's transfer code. The latter comes with each completed
  1375. * descriptor.
  1376. *
  1377. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1378. * VXGE_HW_OK - for success.
  1379. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1380. */
  1381. enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
  1382. void *txdlh,
  1383. enum vxge_hw_fifo_tcode t_code)
  1384. {
  1385. struct __vxge_hw_channel *channel;
  1386. enum vxge_hw_status status = VXGE_HW_OK;
  1387. channel = &fifo->channel;
  1388. if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
  1389. status = VXGE_HW_ERR_INVALID_TCODE;
  1390. goto exit;
  1391. }
  1392. fifo->stats->txd_t_code_err_cnt[t_code]++;
  1393. exit:
  1394. return status;
  1395. }
  1396. /**
  1397. * vxge_hw_fifo_txdl_free - Free descriptor.
  1398. * @fifo: Handle to the fifo object used for non offload send
  1399. * @txdlh: Descriptor handle.
  1400. *
  1401. * Free the reserved descriptor. This operation is "symmetrical" to
  1402. * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
  1403. * lifecycle.
  1404. *
  1405. * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
  1406. * be:
  1407. *
  1408. * - reserved (vxge_hw_fifo_txdl_reserve);
  1409. *
  1410. * - posted (vxge_hw_fifo_txdl_post);
  1411. *
  1412. * - completed (vxge_hw_fifo_txdl_next_completed);
  1413. *
  1414. * - and recycled again (vxge_hw_fifo_txdl_free).
  1415. *
  1416. * For alternative state transitions and more details please refer to
  1417. * the design doc.
  1418. *
  1419. */
  1420. void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
  1421. {
  1422. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1423. u32 max_frags;
  1424. struct __vxge_hw_channel *channel;
  1425. channel = &fifo->channel;
  1426. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
  1427. (struct vxge_hw_fifo_txd *)txdlh);
  1428. max_frags = fifo->config->max_frags;
  1429. vxge_hw_channel_dtr_free(channel, txdlh);
  1430. }
  1431. /**
  1432. * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
  1433. * to MAC address table.
  1434. * @vp: Vpath handle.
  1435. * @macaddr: MAC address to be added for this vpath into the list
  1436. * @macaddr_mask: MAC address mask for macaddr
  1437. * @duplicate_mode: Duplicate MAC address add mode. Please see
  1438. * enum vxge_hw_vpath_mac_addr_add_mode{}
  1439. *
  1440. * Adds the given mac address and mac address mask into the list for this
  1441. * vpath.
  1442. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
  1443. * vxge_hw_vpath_mac_addr_get_next
  1444. *
  1445. */
  1446. enum vxge_hw_status
  1447. vxge_hw_vpath_mac_addr_add(
  1448. struct __vxge_hw_vpath_handle *vp,
  1449. u8 (macaddr)[ETH_ALEN],
  1450. u8 (macaddr_mask)[ETH_ALEN],
  1451. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
  1452. {
  1453. u32 i;
  1454. u64 data1 = 0ULL;
  1455. u64 data2 = 0ULL;
  1456. enum vxge_hw_status status = VXGE_HW_OK;
  1457. if (vp == NULL) {
  1458. status = VXGE_HW_ERR_INVALID_HANDLE;
  1459. goto exit;
  1460. }
  1461. for (i = 0; i < ETH_ALEN; i++) {
  1462. data1 <<= 8;
  1463. data1 |= (u8)macaddr[i];
  1464. data2 <<= 8;
  1465. data2 |= (u8)macaddr_mask[i];
  1466. }
  1467. switch (duplicate_mode) {
  1468. case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
  1469. i = 0;
  1470. break;
  1471. case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
  1472. i = 1;
  1473. break;
  1474. case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
  1475. i = 2;
  1476. break;
  1477. default:
  1478. i = 0;
  1479. break;
  1480. }
  1481. status = __vxge_hw_vpath_rts_table_set(vp,
  1482. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1483. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1484. 0,
  1485. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1486. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
  1487. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
  1488. exit:
  1489. return status;
  1490. }
  1491. /**
  1492. * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
  1493. * from MAC address table.
  1494. * @vp: Vpath handle.
  1495. * @macaddr: First MAC address entry for this vpath in the list
  1496. * @macaddr_mask: MAC address mask for macaddr
  1497. *
  1498. * Returns the first mac address and mac address mask in the list for this
  1499. * vpath.
  1500. * see also: vxge_hw_vpath_mac_addr_get_next
  1501. *
  1502. */
  1503. enum vxge_hw_status
  1504. vxge_hw_vpath_mac_addr_get(
  1505. struct __vxge_hw_vpath_handle *vp,
  1506. u8 (macaddr)[ETH_ALEN],
  1507. u8 (macaddr_mask)[ETH_ALEN])
  1508. {
  1509. u32 i;
  1510. u64 data1 = 0ULL;
  1511. u64 data2 = 0ULL;
  1512. enum vxge_hw_status status = VXGE_HW_OK;
  1513. if (vp == NULL) {
  1514. status = VXGE_HW_ERR_INVALID_HANDLE;
  1515. goto exit;
  1516. }
  1517. status = __vxge_hw_vpath_rts_table_get(vp,
  1518. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1519. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1520. 0, &data1, &data2);
  1521. if (status != VXGE_HW_OK)
  1522. goto exit;
  1523. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1524. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1525. for (i = ETH_ALEN; i > 0; i--) {
  1526. macaddr[i-1] = (u8)(data1 & 0xFF);
  1527. data1 >>= 8;
  1528. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1529. data2 >>= 8;
  1530. }
  1531. exit:
  1532. return status;
  1533. }
  1534. /**
  1535. * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
  1536. * vpath
  1537. * from MAC address table.
  1538. * @vp: Vpath handle.
  1539. * @macaddr: Next MAC address entry for this vpath in the list
  1540. * @macaddr_mask: MAC address mask for macaddr
  1541. *
  1542. * Returns the next mac address and mac address mask in the list for this
  1543. * vpath.
  1544. * see also: vxge_hw_vpath_mac_addr_get
  1545. *
  1546. */
  1547. enum vxge_hw_status
  1548. vxge_hw_vpath_mac_addr_get_next(
  1549. struct __vxge_hw_vpath_handle *vp,
  1550. u8 (macaddr)[ETH_ALEN],
  1551. u8 (macaddr_mask)[ETH_ALEN])
  1552. {
  1553. u32 i;
  1554. u64 data1 = 0ULL;
  1555. u64 data2 = 0ULL;
  1556. enum vxge_hw_status status = VXGE_HW_OK;
  1557. if (vp == NULL) {
  1558. status = VXGE_HW_ERR_INVALID_HANDLE;
  1559. goto exit;
  1560. }
  1561. status = __vxge_hw_vpath_rts_table_get(vp,
  1562. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1563. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1564. 0, &data1, &data2);
  1565. if (status != VXGE_HW_OK)
  1566. goto exit;
  1567. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1568. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1569. for (i = ETH_ALEN; i > 0; i--) {
  1570. macaddr[i-1] = (u8)(data1 & 0xFF);
  1571. data1 >>= 8;
  1572. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1573. data2 >>= 8;
  1574. }
  1575. exit:
  1576. return status;
  1577. }
  1578. /**
  1579. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
  1580. * to MAC address table.
  1581. * @vp: Vpath handle.
  1582. * @macaddr: MAC address to be added for this vpath into the list
  1583. * @macaddr_mask: MAC address mask for macaddr
  1584. *
  1585. * Delete the given mac address and mac address mask into the list for this
  1586. * vpath.
  1587. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
  1588. * vxge_hw_vpath_mac_addr_get_next
  1589. *
  1590. */
  1591. enum vxge_hw_status
  1592. vxge_hw_vpath_mac_addr_delete(
  1593. struct __vxge_hw_vpath_handle *vp,
  1594. u8 (macaddr)[ETH_ALEN],
  1595. u8 (macaddr_mask)[ETH_ALEN])
  1596. {
  1597. u32 i;
  1598. u64 data1 = 0ULL;
  1599. u64 data2 = 0ULL;
  1600. enum vxge_hw_status status = VXGE_HW_OK;
  1601. if (vp == NULL) {
  1602. status = VXGE_HW_ERR_INVALID_HANDLE;
  1603. goto exit;
  1604. }
  1605. for (i = 0; i < ETH_ALEN; i++) {
  1606. data1 <<= 8;
  1607. data1 |= (u8)macaddr[i];
  1608. data2 <<= 8;
  1609. data2 |= (u8)macaddr_mask[i];
  1610. }
  1611. status = __vxge_hw_vpath_rts_table_set(vp,
  1612. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1613. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1614. 0,
  1615. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1616. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
  1617. exit:
  1618. return status;
  1619. }
  1620. /**
  1621. * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
  1622. * to vlan id table.
  1623. * @vp: Vpath handle.
  1624. * @vid: vlan id to be added for this vpath into the list
  1625. *
  1626. * Adds the given vlan id into the list for this vpath.
  1627. * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
  1628. * vxge_hw_vpath_vid_get_next
  1629. *
  1630. */
  1631. enum vxge_hw_status
  1632. vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1633. {
  1634. enum vxge_hw_status status = VXGE_HW_OK;
  1635. if (vp == NULL) {
  1636. status = VXGE_HW_ERR_INVALID_HANDLE;
  1637. goto exit;
  1638. }
  1639. status = __vxge_hw_vpath_rts_table_set(vp,
  1640. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1641. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1642. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1643. exit:
  1644. return status;
  1645. }
  1646. /**
  1647. * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
  1648. * from vlan id table.
  1649. * @vp: Vpath handle.
  1650. * @vid: Buffer to return vlan id
  1651. *
  1652. * Returns the first vlan id in the list for this vpath.
  1653. * see also: vxge_hw_vpath_vid_get_next
  1654. *
  1655. */
  1656. enum vxge_hw_status
  1657. vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
  1658. {
  1659. u64 data;
  1660. enum vxge_hw_status status = VXGE_HW_OK;
  1661. if (vp == NULL) {
  1662. status = VXGE_HW_ERR_INVALID_HANDLE;
  1663. goto exit;
  1664. }
  1665. status = __vxge_hw_vpath_rts_table_get(vp,
  1666. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1667. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1668. 0, vid, &data);
  1669. *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
  1670. exit:
  1671. return status;
  1672. }
  1673. /**
  1674. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  1675. * to vlan id table.
  1676. * @vp: Vpath handle.
  1677. * @vid: vlan id to be added for this vpath into the list
  1678. *
  1679. * Adds the given vlan id into the list for this vpath.
  1680. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
  1681. * vxge_hw_vpath_vid_get_next
  1682. *
  1683. */
  1684. enum vxge_hw_status
  1685. vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1686. {
  1687. enum vxge_hw_status status = VXGE_HW_OK;
  1688. if (vp == NULL) {
  1689. status = VXGE_HW_ERR_INVALID_HANDLE;
  1690. goto exit;
  1691. }
  1692. status = __vxge_hw_vpath_rts_table_set(vp,
  1693. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1694. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1695. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1696. exit:
  1697. return status;
  1698. }
  1699. /**
  1700. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
  1701. * @vp: Vpath handle.
  1702. *
  1703. * Enable promiscuous mode of Titan-e operation.
  1704. *
  1705. * See also: vxge_hw_vpath_promisc_disable().
  1706. */
  1707. enum vxge_hw_status vxge_hw_vpath_promisc_enable(
  1708. struct __vxge_hw_vpath_handle *vp)
  1709. {
  1710. u64 val64;
  1711. struct __vxge_hw_virtualpath *vpath;
  1712. enum vxge_hw_status status = VXGE_HW_OK;
  1713. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1714. status = VXGE_HW_ERR_INVALID_HANDLE;
  1715. goto exit;
  1716. }
  1717. vpath = vp->vpath;
  1718. /* Enable promiscous mode for function 0 only */
  1719. if (!(vpath->hldev->access_rights &
  1720. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
  1721. return VXGE_HW_OK;
  1722. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1723. if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
  1724. val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1725. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1726. VXGE_HW_RXMAC_VCFG0_BCAST_EN |
  1727. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
  1728. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1729. }
  1730. exit:
  1731. return status;
  1732. }
  1733. /**
  1734. * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
  1735. * @vp: Vpath handle.
  1736. *
  1737. * Disable promiscuous mode of Titan-e operation.
  1738. *
  1739. * See also: vxge_hw_vpath_promisc_enable().
  1740. */
  1741. enum vxge_hw_status vxge_hw_vpath_promisc_disable(
  1742. struct __vxge_hw_vpath_handle *vp)
  1743. {
  1744. u64 val64;
  1745. struct __vxge_hw_virtualpath *vpath;
  1746. enum vxge_hw_status status = VXGE_HW_OK;
  1747. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1748. status = VXGE_HW_ERR_INVALID_HANDLE;
  1749. goto exit;
  1750. }
  1751. vpath = vp->vpath;
  1752. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1753. if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
  1754. val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1755. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1756. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
  1757. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1758. }
  1759. exit:
  1760. return status;
  1761. }
  1762. /*
  1763. * vxge_hw_vpath_bcast_enable - Enable broadcast
  1764. * @vp: Vpath handle.
  1765. *
  1766. * Enable receiving broadcasts.
  1767. */
  1768. enum vxge_hw_status vxge_hw_vpath_bcast_enable(
  1769. struct __vxge_hw_vpath_handle *vp)
  1770. {
  1771. u64 val64;
  1772. struct __vxge_hw_virtualpath *vpath;
  1773. enum vxge_hw_status status = VXGE_HW_OK;
  1774. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1775. status = VXGE_HW_ERR_INVALID_HANDLE;
  1776. goto exit;
  1777. }
  1778. vpath = vp->vpath;
  1779. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1780. if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
  1781. val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
  1782. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1783. }
  1784. exit:
  1785. return status;
  1786. }
  1787. /**
  1788. * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
  1789. * @vp: Vpath handle.
  1790. *
  1791. * Enable Titan-e multicast addresses.
  1792. * Returns: VXGE_HW_OK on success.
  1793. *
  1794. */
  1795. enum vxge_hw_status vxge_hw_vpath_mcast_enable(
  1796. struct __vxge_hw_vpath_handle *vp)
  1797. {
  1798. u64 val64;
  1799. struct __vxge_hw_virtualpath *vpath;
  1800. enum vxge_hw_status status = VXGE_HW_OK;
  1801. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1802. status = VXGE_HW_ERR_INVALID_HANDLE;
  1803. goto exit;
  1804. }
  1805. vpath = vp->vpath;
  1806. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1807. if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
  1808. val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1809. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1810. }
  1811. exit:
  1812. return status;
  1813. }
  1814. /**
  1815. * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
  1816. * @vp: Vpath handle.
  1817. *
  1818. * Disable Titan-e multicast addresses.
  1819. * Returns: VXGE_HW_OK - success.
  1820. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
  1821. *
  1822. */
  1823. enum vxge_hw_status
  1824. vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
  1825. {
  1826. u64 val64;
  1827. struct __vxge_hw_virtualpath *vpath;
  1828. enum vxge_hw_status status = VXGE_HW_OK;
  1829. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1830. status = VXGE_HW_ERR_INVALID_HANDLE;
  1831. goto exit;
  1832. }
  1833. vpath = vp->vpath;
  1834. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1835. if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
  1836. val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1837. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1838. }
  1839. exit:
  1840. return status;
  1841. }
  1842. /*
  1843. * vxge_hw_vpath_alarm_process - Process Alarms.
  1844. * @vpath: Virtual Path.
  1845. * @skip_alarms: Do not clear the alarms
  1846. *
  1847. * Process vpath alarms.
  1848. *
  1849. */
  1850. enum vxge_hw_status vxge_hw_vpath_alarm_process(
  1851. struct __vxge_hw_vpath_handle *vp,
  1852. u32 skip_alarms)
  1853. {
  1854. enum vxge_hw_status status = VXGE_HW_OK;
  1855. if (vp == NULL) {
  1856. status = VXGE_HW_ERR_INVALID_HANDLE;
  1857. goto exit;
  1858. }
  1859. status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
  1860. exit:
  1861. return status;
  1862. }
  1863. /**
  1864. * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
  1865. * alrms
  1866. * @vp: Virtual Path handle.
  1867. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
  1868. * interrupts(Can be repeated). If fifo or ring are not enabled
  1869. * the MSIX vector for that should be set to 0
  1870. * @alarm_msix_id: MSIX vector for alarm.
  1871. *
  1872. * This API will associate a given MSIX vector numbers with the four TIM
  1873. * interrupts and alarm interrupt.
  1874. */
  1875. void
  1876. vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
  1877. int alarm_msix_id)
  1878. {
  1879. u64 val64;
  1880. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  1881. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  1882. u32 vp_id = vp->vpath->vp_id;
  1883. val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
  1884. (vp_id * 4) + tim_msix_id[0]) |
  1885. VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
  1886. (vp_id * 4) + tim_msix_id[1]);
  1887. writeq(val64, &vp_reg->interrupt_cfg0);
  1888. writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
  1889. (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
  1890. &vp_reg->interrupt_cfg2);
  1891. if (vpath->hldev->config.intr_mode ==
  1892. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1893. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1894. VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
  1895. 0, 32), &vp_reg->one_shot_vect0_en);
  1896. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1897. VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
  1898. 0, 32), &vp_reg->one_shot_vect1_en);
  1899. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1900. VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
  1901. 0, 32), &vp_reg->one_shot_vect2_en);
  1902. }
  1903. }
  1904. /**
  1905. * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
  1906. * @vp: Virtual Path handle.
  1907. * @msix_id: MSIX ID
  1908. *
  1909. * The function masks the msix interrupt for the given msix_id
  1910. *
  1911. * Returns: 0,
  1912. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1913. * status.
  1914. * See also:
  1915. */
  1916. void
  1917. vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1918. {
  1919. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1920. __vxge_hw_pio_mem_write32_upper(
  1921. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1922. &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
  1923. }
  1924. /**
  1925. * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
  1926. * @vp: Virtual Path handle.
  1927. * @msix_id: MSI ID
  1928. *
  1929. * The function clears the msix interrupt for the given msix_id
  1930. *
  1931. * Returns: 0,
  1932. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1933. * status.
  1934. * See also:
  1935. */
  1936. void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1937. {
  1938. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1939. if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
  1940. __vxge_hw_pio_mem_write32_upper(
  1941. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1942. &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  1943. else
  1944. __vxge_hw_pio_mem_write32_upper(
  1945. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1946. &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
  1947. }
  1948. /**
  1949. * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  1950. * @vp: Virtual Path handle.
  1951. * @msix_id: MSI ID
  1952. *
  1953. * The function unmasks the msix interrupt for the given msix_id
  1954. *
  1955. * Returns: 0,
  1956. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1957. * status.
  1958. * See also:
  1959. */
  1960. void
  1961. vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1962. {
  1963. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1964. __vxge_hw_pio_mem_write32_upper(
  1965. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1966. &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
  1967. }
  1968. /**
  1969. * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  1970. * @vp: Virtual Path handle.
  1971. *
  1972. * Mask Tx and Rx vpath interrupts.
  1973. *
  1974. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1975. */
  1976. void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1977. {
  1978. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1979. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1980. u64 val64;
  1981. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1982. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1983. tim_int_mask1, vp->vpath->vp_id);
  1984. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1985. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1986. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1987. writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1988. tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
  1989. &hldev->common_reg->tim_int_mask0);
  1990. }
  1991. val64 = readl(&hldev->common_reg->tim_int_mask1);
  1992. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1993. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1994. __vxge_hw_pio_mem_write32_upper(
  1995. (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1996. tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
  1997. &hldev->common_reg->tim_int_mask1);
  1998. }
  1999. }
  2000. /**
  2001. * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
  2002. * @vp: Virtual Path handle.
  2003. *
  2004. * Unmask Tx and Rx vpath interrupts.
  2005. *
  2006. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  2007. */
  2008. void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  2009. {
  2010. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  2011. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  2012. u64 val64;
  2013. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  2014. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  2015. tim_int_mask1, vp->vpath->vp_id);
  2016. val64 = readq(&hldev->common_reg->tim_int_mask0);
  2017. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2018. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2019. writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  2020. tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
  2021. &hldev->common_reg->tim_int_mask0);
  2022. }
  2023. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2024. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2025. __vxge_hw_pio_mem_write32_upper(
  2026. (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  2027. tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
  2028. &hldev->common_reg->tim_int_mask1);
  2029. }
  2030. }
  2031. /**
  2032. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  2033. * descriptors and process the same.
  2034. * @ring: Handle to the ring object used for receive
  2035. *
  2036. * The function polls the Rx for the completed descriptors and calls
  2037. * the driver via supplied completion callback.
  2038. *
  2039. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2040. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2041. * descriptors available which are yet to be processed.
  2042. *
  2043. * See also: vxge_hw_vpath_poll_rx()
  2044. */
  2045. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  2046. {
  2047. u8 t_code;
  2048. enum vxge_hw_status status = VXGE_HW_OK;
  2049. void *first_rxdh;
  2050. u64 val64 = 0;
  2051. int new_count = 0;
  2052. ring->cmpl_cnt = 0;
  2053. status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
  2054. if (status == VXGE_HW_OK)
  2055. ring->callback(ring, first_rxdh,
  2056. t_code, ring->channel.userdata);
  2057. if (ring->cmpl_cnt != 0) {
  2058. ring->doorbell_cnt += ring->cmpl_cnt;
  2059. if (ring->doorbell_cnt >= ring->rxds_limit) {
  2060. /*
  2061. * Each RxD is of 4 qwords, update the number of
  2062. * qwords replenished
  2063. */
  2064. new_count = (ring->doorbell_cnt * 4);
  2065. /* For each block add 4 more qwords */
  2066. ring->total_db_cnt += ring->doorbell_cnt;
  2067. if (ring->total_db_cnt >= ring->rxds_per_block) {
  2068. new_count += 4;
  2069. /* Reset total count */
  2070. ring->total_db_cnt %= ring->rxds_per_block;
  2071. }
  2072. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
  2073. &ring->vp_reg->prc_rxd_doorbell);
  2074. val64 =
  2075. readl(&ring->common_reg->titan_general_int_status);
  2076. ring->doorbell_cnt = 0;
  2077. }
  2078. }
  2079. return status;
  2080. }
  2081. /**
  2082. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  2083. * the same.
  2084. * @fifo: Handle to the fifo object used for non offload send
  2085. *
  2086. * The function polls the Tx for the completed descriptors and calls
  2087. * the driver via supplied completion callback.
  2088. *
  2089. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2090. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2091. * descriptors available which are yet to be processed.
  2092. */
  2093. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
  2094. struct sk_buff ***skb_ptr, int nr_skb,
  2095. int *more)
  2096. {
  2097. enum vxge_hw_fifo_tcode t_code;
  2098. void *first_txdlh;
  2099. enum vxge_hw_status status = VXGE_HW_OK;
  2100. struct __vxge_hw_channel *channel;
  2101. channel = &fifo->channel;
  2102. status = vxge_hw_fifo_txdl_next_completed(fifo,
  2103. &first_txdlh, &t_code);
  2104. if (status == VXGE_HW_OK)
  2105. if (fifo->callback(fifo, first_txdlh, t_code,
  2106. channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
  2107. status = VXGE_HW_COMPLETIONS_REMAIN;
  2108. return status;
  2109. }