bnx2x_cmn.h 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /* bnx2x_cmn.h: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2012 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #ifndef BNX2X_CMN_H
  18. #define BNX2X_CMN_H
  19. #include <linux/types.h>
  20. #include <linux/pci.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include "bnx2x.h"
  24. /* This is used as a replacement for an MCP if it's not present */
  25. extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
  26. extern int num_queues;
  27. extern int int_mode;
  28. /************************ Macros ********************************/
  29. #define BNX2X_PCI_FREE(x, y, size) \
  30. do { \
  31. if (x) { \
  32. dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
  33. x = NULL; \
  34. y = 0; \
  35. } \
  36. } while (0)
  37. #define BNX2X_FREE(x) \
  38. do { \
  39. if (x) { \
  40. kfree((void *)x); \
  41. x = NULL; \
  42. } \
  43. } while (0)
  44. #define BNX2X_PCI_ALLOC(x, y, size) \
  45. do { \
  46. x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
  47. if (x == NULL) \
  48. goto alloc_mem_err; \
  49. memset((void *)x, 0, size); \
  50. } while (0)
  51. #define BNX2X_ALLOC(x, size) \
  52. do { \
  53. x = kzalloc(size, GFP_KERNEL); \
  54. if (x == NULL) \
  55. goto alloc_mem_err; \
  56. } while (0)
  57. /*********************** Interfaces ****************************
  58. * Functions that need to be implemented by each driver version
  59. */
  60. /* Init */
  61. /**
  62. * bnx2x_send_unload_req - request unload mode from the MCP.
  63. *
  64. * @bp: driver handle
  65. * @unload_mode: requested function's unload mode
  66. *
  67. * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
  68. */
  69. u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
  70. /**
  71. * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  72. *
  73. * @bp: driver handle
  74. */
  75. void bnx2x_send_unload_done(struct bnx2x *bp);
  76. /**
  77. * bnx2x_config_rss_pf - configure RSS parameters in a PF.
  78. *
  79. * @bp: driver handle
  80. * @rss_obj: RSS object to use
  81. * @ind_table: indirection table to configure
  82. * @config_hash: re-configure RSS hash keys configuration
  83. */
  84. int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
  85. bool config_hash);
  86. /**
  87. * bnx2x__init_func_obj - init function object
  88. *
  89. * @bp: driver handle
  90. *
  91. * Initializes the Function Object with the appropriate
  92. * parameters which include a function slow path driver
  93. * interface.
  94. */
  95. void bnx2x__init_func_obj(struct bnx2x *bp);
  96. /**
  97. * bnx2x_setup_queue - setup eth queue.
  98. *
  99. * @bp: driver handle
  100. * @fp: pointer to the fastpath structure
  101. * @leading: boolean
  102. *
  103. */
  104. int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  105. bool leading);
  106. /**
  107. * bnx2x_setup_leading - bring up a leading eth queue.
  108. *
  109. * @bp: driver handle
  110. */
  111. int bnx2x_setup_leading(struct bnx2x *bp);
  112. /**
  113. * bnx2x_fw_command - send the MCP a request
  114. *
  115. * @bp: driver handle
  116. * @command: request
  117. * @param: request's parameter
  118. *
  119. * block until there is a reply
  120. */
  121. u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
  122. /**
  123. * bnx2x_initial_phy_init - initialize link parameters structure variables.
  124. *
  125. * @bp: driver handle
  126. * @load_mode: current mode
  127. */
  128. u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
  129. /**
  130. * bnx2x_link_set - configure hw according to link parameters structure.
  131. *
  132. * @bp: driver handle
  133. */
  134. void bnx2x_link_set(struct bnx2x *bp);
  135. /**
  136. * bnx2x_link_test - query link status.
  137. *
  138. * @bp: driver handle
  139. * @is_serdes: bool
  140. *
  141. * Returns 0 if link is UP.
  142. */
  143. u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
  144. /**
  145. * bnx2x_drv_pulse - write driver pulse to shmem
  146. *
  147. * @bp: driver handle
  148. *
  149. * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
  150. * in the shmem.
  151. */
  152. void bnx2x_drv_pulse(struct bnx2x *bp);
  153. /**
  154. * bnx2x_igu_ack_sb - update IGU with current SB value
  155. *
  156. * @bp: driver handle
  157. * @igu_sb_id: SB id
  158. * @segment: SB segment
  159. * @index: SB index
  160. * @op: SB operation
  161. * @update: is HW update required
  162. */
  163. void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
  164. u16 index, u8 op, u8 update);
  165. /* Disable transactions from chip to host */
  166. void bnx2x_pf_disable(struct bnx2x *bp);
  167. /**
  168. * bnx2x__link_status_update - handles link status change.
  169. *
  170. * @bp: driver handle
  171. */
  172. void bnx2x__link_status_update(struct bnx2x *bp);
  173. /**
  174. * bnx2x_link_report - report link status to upper layer.
  175. *
  176. * @bp: driver handle
  177. */
  178. void bnx2x_link_report(struct bnx2x *bp);
  179. /* None-atomic version of bnx2x_link_report() */
  180. void __bnx2x_link_report(struct bnx2x *bp);
  181. /**
  182. * bnx2x_get_mf_speed - calculate MF speed.
  183. *
  184. * @bp: driver handle
  185. *
  186. * Takes into account current linespeed and MF configuration.
  187. */
  188. u16 bnx2x_get_mf_speed(struct bnx2x *bp);
  189. /**
  190. * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
  191. *
  192. * @irq: irq number
  193. * @dev_instance: private instance
  194. */
  195. irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
  196. /**
  197. * bnx2x_interrupt - non MSI-X interrupt handler
  198. *
  199. * @irq: irq number
  200. * @dev_instance: private instance
  201. */
  202. irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
  203. #ifdef BCM_CNIC
  204. /**
  205. * bnx2x_cnic_notify - send command to cnic driver
  206. *
  207. * @bp: driver handle
  208. * @cmd: command
  209. */
  210. int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
  211. /**
  212. * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
  213. *
  214. * @bp: driver handle
  215. */
  216. void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
  217. /**
  218. * bnx2x_setup_cnic_info - provides cnic with updated info
  219. *
  220. * @bp: driver handle
  221. */
  222. void bnx2x_setup_cnic_info(struct bnx2x *bp);
  223. #endif
  224. /**
  225. * bnx2x_int_enable - enable HW interrupts.
  226. *
  227. * @bp: driver handle
  228. */
  229. void bnx2x_int_enable(struct bnx2x *bp);
  230. /**
  231. * bnx2x_int_disable_sync - disable interrupts.
  232. *
  233. * @bp: driver handle
  234. * @disable_hw: true, disable HW interrupts.
  235. *
  236. * This function ensures that there are no
  237. * ISRs or SP DPCs (sp_task) are running after it returns.
  238. */
  239. void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
  240. /**
  241. * bnx2x_nic_init - init driver internals.
  242. *
  243. * @bp: driver handle
  244. * @load_code: COMMON, PORT or FUNCTION
  245. *
  246. * Initializes:
  247. * - rings
  248. * - status blocks
  249. * - etc.
  250. */
  251. void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
  252. /**
  253. * bnx2x_alloc_mem - allocate driver's memory.
  254. *
  255. * @bp: driver handle
  256. */
  257. int bnx2x_alloc_mem(struct bnx2x *bp);
  258. /**
  259. * bnx2x_free_mem - release driver's memory.
  260. *
  261. * @bp: driver handle
  262. */
  263. void bnx2x_free_mem(struct bnx2x *bp);
  264. /**
  265. * bnx2x_set_num_queues - set number of queues according to mode.
  266. *
  267. * @bp: driver handle
  268. */
  269. void bnx2x_set_num_queues(struct bnx2x *bp);
  270. /**
  271. * bnx2x_chip_cleanup - cleanup chip internals.
  272. *
  273. * @bp: driver handle
  274. * @unload_mode: COMMON, PORT, FUNCTION
  275. *
  276. * - Cleanup MAC configuration.
  277. * - Closes clients.
  278. * - etc.
  279. */
  280. void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
  281. /**
  282. * bnx2x_acquire_hw_lock - acquire HW lock.
  283. *
  284. * @bp: driver handle
  285. * @resource: resource bit which was locked
  286. */
  287. int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
  288. /**
  289. * bnx2x_release_hw_lock - release HW lock.
  290. *
  291. * @bp: driver handle
  292. * @resource: resource bit which was locked
  293. */
  294. int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
  295. /**
  296. * bnx2x_release_leader_lock - release recovery leader lock
  297. *
  298. * @bp: driver handle
  299. */
  300. int bnx2x_release_leader_lock(struct bnx2x *bp);
  301. /**
  302. * bnx2x_set_eth_mac - configure eth MAC address in the HW
  303. *
  304. * @bp: driver handle
  305. * @set: set or clear
  306. *
  307. * Configures according to the value in netdev->dev_addr.
  308. */
  309. int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
  310. /**
  311. * bnx2x_set_rx_mode - set MAC filtering configurations.
  312. *
  313. * @dev: netdevice
  314. *
  315. * called with netif_tx_lock from dev_mcast.c
  316. * If bp->state is OPEN, should be called with
  317. * netif_addr_lock_bh()
  318. */
  319. void bnx2x_set_rx_mode(struct net_device *dev);
  320. /**
  321. * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
  322. *
  323. * @bp: driver handle
  324. *
  325. * If bp->state is OPEN, should be called with
  326. * netif_addr_lock_bh().
  327. */
  328. void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
  329. /**
  330. * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
  331. *
  332. * @bp: driver handle
  333. * @cl_id: client id
  334. * @rx_mode_flags: rx mode configuration
  335. * @rx_accept_flags: rx accept configuration
  336. * @tx_accept_flags: tx accept configuration (tx switch)
  337. * @ramrod_flags: ramrod configuration
  338. */
  339. void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
  340. unsigned long rx_mode_flags,
  341. unsigned long rx_accept_flags,
  342. unsigned long tx_accept_flags,
  343. unsigned long ramrod_flags);
  344. /* Parity errors related */
  345. void bnx2x_set_pf_load(struct bnx2x *bp);
  346. bool bnx2x_clear_pf_load(struct bnx2x *bp);
  347. bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
  348. bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
  349. void bnx2x_set_reset_in_progress(struct bnx2x *bp);
  350. void bnx2x_set_reset_global(struct bnx2x *bp);
  351. void bnx2x_disable_close_the_gate(struct bnx2x *bp);
  352. /**
  353. * bnx2x_sp_event - handle ramrods completion.
  354. *
  355. * @fp: fastpath handle for the event
  356. * @rr_cqe: eth_rx_cqe
  357. */
  358. void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
  359. /**
  360. * bnx2x_ilt_set_info - prepare ILT configurations.
  361. *
  362. * @bp: driver handle
  363. */
  364. void bnx2x_ilt_set_info(struct bnx2x *bp);
  365. /**
  366. * bnx2x_dcbx_init - initialize dcbx protocol.
  367. *
  368. * @bp: driver handle
  369. */
  370. void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
  371. /**
  372. * bnx2x_set_power_state - set power state to the requested value.
  373. *
  374. * @bp: driver handle
  375. * @state: required state D0 or D3hot
  376. *
  377. * Currently only D0 and D3hot are supported.
  378. */
  379. int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
  380. /**
  381. * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
  382. *
  383. * @bp: driver handle
  384. * @value: new value
  385. */
  386. void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
  387. /* Error handling */
  388. void bnx2x_panic_dump(struct bnx2x *bp);
  389. void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
  390. /* validate currect fw is loaded */
  391. bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
  392. /* dev_close main block */
  393. int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
  394. /* dev_open main block */
  395. int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
  396. /* hard_xmit callback */
  397. netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
  398. /* setup_tc callback */
  399. int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
  400. /* select_queue callback */
  401. u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
  402. /* reload helper */
  403. int bnx2x_reload_if_running(struct net_device *dev);
  404. int bnx2x_change_mac_addr(struct net_device *dev, void *p);
  405. /* NAPI poll Rx part */
  406. int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
  407. void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  408. u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
  409. /* NAPI poll Tx part */
  410. int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
  411. /* suspend/resume callbacks */
  412. int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
  413. int bnx2x_resume(struct pci_dev *pdev);
  414. /* Release IRQ vectors */
  415. void bnx2x_free_irq(struct bnx2x *bp);
  416. void bnx2x_free_fp_mem(struct bnx2x *bp);
  417. int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  418. void bnx2x_init_rx_rings(struct bnx2x *bp);
  419. void bnx2x_free_skbs(struct bnx2x *bp);
  420. void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
  421. void bnx2x_netif_start(struct bnx2x *bp);
  422. /**
  423. * bnx2x_enable_msix - set msix configuration.
  424. *
  425. * @bp: driver handle
  426. *
  427. * fills msix_table, requests vectors, updates num_queues
  428. * according to number of available vectors.
  429. */
  430. int bnx2x_enable_msix(struct bnx2x *bp);
  431. /**
  432. * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
  433. *
  434. * @bp: driver handle
  435. */
  436. int bnx2x_enable_msi(struct bnx2x *bp);
  437. /**
  438. * bnx2x_poll - NAPI callback
  439. *
  440. * @napi: napi structure
  441. * @budget:
  442. *
  443. */
  444. int bnx2x_poll(struct napi_struct *napi, int budget);
  445. /**
  446. * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
  447. *
  448. * @bp: driver handle
  449. */
  450. int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
  451. /**
  452. * bnx2x_free_mem_bp - release memories outsize main driver structure
  453. *
  454. * @bp: driver handle
  455. */
  456. void bnx2x_free_mem_bp(struct bnx2x *bp);
  457. /**
  458. * bnx2x_change_mtu - change mtu netdev callback
  459. *
  460. * @dev: net device
  461. * @new_mtu: requested mtu
  462. *
  463. */
  464. int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
  465. #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
  466. /**
  467. * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
  468. *
  469. * @dev: net_device
  470. * @wwn: output buffer
  471. * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
  472. *
  473. */
  474. int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
  475. #endif
  476. netdev_features_t bnx2x_fix_features(struct net_device *dev,
  477. netdev_features_t features);
  478. int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
  479. /**
  480. * bnx2x_tx_timeout - tx timeout netdev callback
  481. *
  482. * @dev: net device
  483. */
  484. void bnx2x_tx_timeout(struct net_device *dev);
  485. /*********************** Inlines **********************************/
  486. /*********************** Fast path ********************************/
  487. static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
  488. {
  489. barrier(); /* status block is written to by the chip */
  490. fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
  491. }
  492. static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
  493. struct bnx2x_fastpath *fp, u16 bd_prod,
  494. u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
  495. {
  496. struct ustorm_eth_rx_producers rx_prods = {0};
  497. u32 i;
  498. /* Update producers */
  499. rx_prods.bd_prod = bd_prod;
  500. rx_prods.cqe_prod = rx_comp_prod;
  501. rx_prods.sge_prod = rx_sge_prod;
  502. /*
  503. * Make sure that the BD and SGE data is updated before updating the
  504. * producers since FW might read the BD/SGE right after the producer
  505. * is updated.
  506. * This is only applicable for weak-ordered memory model archs such
  507. * as IA-64. The following barrier is also mandatory since FW will
  508. * assumes BDs must have buffers.
  509. */
  510. wmb();
  511. for (i = 0; i < sizeof(rx_prods)/4; i++)
  512. REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
  513. mmiowb(); /* keep prod updates ordered */
  514. DP(NETIF_MSG_RX_STATUS,
  515. "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
  516. fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
  517. }
  518. static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
  519. u8 segment, u16 index, u8 op,
  520. u8 update, u32 igu_addr)
  521. {
  522. struct igu_regular cmd_data = {0};
  523. cmd_data.sb_id_and_flags =
  524. ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  525. (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  526. (update << IGU_REGULAR_BUPDATE_SHIFT) |
  527. (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  528. DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
  529. cmd_data.sb_id_and_flags, igu_addr);
  530. REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
  531. /* Make sure that ACK is written */
  532. mmiowb();
  533. barrier();
  534. }
  535. static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
  536. u8 storm, u16 index, u8 op, u8 update)
  537. {
  538. u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
  539. COMMAND_REG_INT_ACK);
  540. struct igu_ack_register igu_ack;
  541. igu_ack.status_block_index = index;
  542. igu_ack.sb_id_and_flags =
  543. ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
  544. (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
  545. (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
  546. (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
  547. REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
  548. /* Make sure that ACK is written */
  549. mmiowb();
  550. barrier();
  551. }
  552. static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
  553. u16 index, u8 op, u8 update)
  554. {
  555. if (bp->common.int_block == INT_BLOCK_HC)
  556. bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
  557. else {
  558. u8 segment;
  559. if (CHIP_INT_MODE_IS_BC(bp))
  560. segment = storm;
  561. else if (igu_sb_id != bp->igu_dsb_id)
  562. segment = IGU_SEG_ACCESS_DEF;
  563. else if (storm == ATTENTION_ID)
  564. segment = IGU_SEG_ACCESS_ATTN;
  565. else
  566. segment = IGU_SEG_ACCESS_DEF;
  567. bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
  568. }
  569. }
  570. static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
  571. {
  572. u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
  573. COMMAND_REG_SIMD_MASK);
  574. u32 result = REG_RD(bp, hc_addr);
  575. barrier();
  576. return result;
  577. }
  578. static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
  579. {
  580. u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
  581. u32 result = REG_RD(bp, igu_addr);
  582. DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
  583. result, igu_addr);
  584. barrier();
  585. return result;
  586. }
  587. static inline u16 bnx2x_ack_int(struct bnx2x *bp)
  588. {
  589. barrier();
  590. if (bp->common.int_block == INT_BLOCK_HC)
  591. return bnx2x_hc_ack_int(bp);
  592. else
  593. return bnx2x_igu_ack_int(bp);
  594. }
  595. static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
  596. {
  597. /* Tell compiler that consumer and producer can change */
  598. barrier();
  599. return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
  600. }
  601. static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
  602. struct bnx2x_fp_txdata *txdata)
  603. {
  604. s16 used;
  605. u16 prod;
  606. u16 cons;
  607. prod = txdata->tx_bd_prod;
  608. cons = txdata->tx_bd_cons;
  609. used = SUB_S16(prod, cons);
  610. #ifdef BNX2X_STOP_ON_ERROR
  611. WARN_ON(used < 0);
  612. WARN_ON(used > txdata->tx_ring_size);
  613. WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
  614. #endif
  615. return (s16)(txdata->tx_ring_size) - used;
  616. }
  617. static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
  618. {
  619. u16 hw_cons;
  620. /* Tell compiler that status block fields can change */
  621. barrier();
  622. hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
  623. return hw_cons != txdata->tx_pkt_cons;
  624. }
  625. static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
  626. {
  627. u8 cos;
  628. for_each_cos_in_tx_queue(fp, cos)
  629. if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
  630. return true;
  631. return false;
  632. }
  633. static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
  634. {
  635. u16 rx_cons_sb;
  636. /* Tell compiler that status block fields can change */
  637. barrier();
  638. rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
  639. if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
  640. rx_cons_sb++;
  641. return (fp->rx_comp_cons != rx_cons_sb);
  642. }
  643. /**
  644. * bnx2x_tx_disable - disables tx from stack point of view
  645. *
  646. * @bp: driver handle
  647. */
  648. static inline void bnx2x_tx_disable(struct bnx2x *bp)
  649. {
  650. netif_tx_disable(bp->dev);
  651. netif_carrier_off(bp->dev);
  652. }
  653. static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
  654. struct bnx2x_fastpath *fp, u16 index)
  655. {
  656. struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
  657. struct page *page = sw_buf->page;
  658. struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
  659. /* Skip "next page" elements */
  660. if (!page)
  661. return;
  662. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
  663. SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
  664. __free_pages(page, PAGES_PER_SGE_SHIFT);
  665. sw_buf->page = NULL;
  666. sge->addr_hi = 0;
  667. sge->addr_lo = 0;
  668. }
  669. static inline void bnx2x_add_all_napi(struct bnx2x *bp)
  670. {
  671. int i;
  672. bp->num_napi_queues = bp->num_queues;
  673. /* Add NAPI objects */
  674. for_each_rx_queue(bp, i)
  675. netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  676. bnx2x_poll, BNX2X_NAPI_WEIGHT);
  677. }
  678. static inline void bnx2x_del_all_napi(struct bnx2x *bp)
  679. {
  680. int i;
  681. for_each_rx_queue(bp, i)
  682. netif_napi_del(&bnx2x_fp(bp, i, napi));
  683. }
  684. void bnx2x_set_int_mode(struct bnx2x *bp);
  685. static inline void bnx2x_disable_msi(struct bnx2x *bp)
  686. {
  687. if (bp->flags & USING_MSIX_FLAG) {
  688. pci_disable_msix(bp->pdev);
  689. bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
  690. } else if (bp->flags & USING_MSI_FLAG) {
  691. pci_disable_msi(bp->pdev);
  692. bp->flags &= ~USING_MSI_FLAG;
  693. }
  694. }
  695. static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
  696. {
  697. return num_queues ?
  698. min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
  699. min_t(int, netif_get_num_default_rss_queues(),
  700. BNX2X_MAX_QUEUES(bp));
  701. }
  702. static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
  703. {
  704. int i, j;
  705. for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
  706. int idx = RX_SGE_CNT * i - 1;
  707. for (j = 0; j < 2; j++) {
  708. BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
  709. idx--;
  710. }
  711. }
  712. }
  713. static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
  714. {
  715. /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
  716. memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
  717. /* Clear the two last indices in the page to 1:
  718. these are the indices that correspond to the "next" element,
  719. hence will never be indicated and should be removed from
  720. the calculations. */
  721. bnx2x_clear_sge_mask_next_elems(fp);
  722. }
  723. /* note that we are not allocating a new buffer,
  724. * we are just moving one from cons to prod
  725. * we are not creating a new mapping,
  726. * so there is no need to check for dma_mapping_error().
  727. */
  728. static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
  729. u16 cons, u16 prod)
  730. {
  731. struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
  732. struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
  733. struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
  734. struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
  735. dma_unmap_addr_set(prod_rx_buf, mapping,
  736. dma_unmap_addr(cons_rx_buf, mapping));
  737. prod_rx_buf->data = cons_rx_buf->data;
  738. *prod_bd = *cons_bd;
  739. }
  740. /************************* Init ******************************************/
  741. /* returns func by VN for current port */
  742. static inline int func_by_vn(struct bnx2x *bp, int vn)
  743. {
  744. return 2 * vn + BP_PORT(bp);
  745. }
  746. static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
  747. {
  748. return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
  749. }
  750. /**
  751. * bnx2x_func_start - init function
  752. *
  753. * @bp: driver handle
  754. *
  755. * Must be called before sending CLIENT_SETUP for the first client.
  756. */
  757. static inline int bnx2x_func_start(struct bnx2x *bp)
  758. {
  759. struct bnx2x_func_state_params func_params = {NULL};
  760. struct bnx2x_func_start_params *start_params =
  761. &func_params.params.start;
  762. /* Prepare parameters for function state transitions */
  763. __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
  764. func_params.f_obj = &bp->func_obj;
  765. func_params.cmd = BNX2X_F_CMD_START;
  766. /* Function parameters */
  767. start_params->mf_mode = bp->mf_mode;
  768. start_params->sd_vlan_tag = bp->mf_ov;
  769. if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
  770. start_params->network_cos_mode = STATIC_COS;
  771. else /* CHIP_IS_E1X */
  772. start_params->network_cos_mode = FW_WRR;
  773. return bnx2x_func_state_change(bp, &func_params);
  774. }
  775. /**
  776. * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
  777. *
  778. * @fw_hi: pointer to upper part
  779. * @fw_mid: pointer to middle part
  780. * @fw_lo: pointer to lower part
  781. * @mac: pointer to MAC address
  782. */
  783. static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
  784. u8 *mac)
  785. {
  786. ((u8 *)fw_hi)[0] = mac[1];
  787. ((u8 *)fw_hi)[1] = mac[0];
  788. ((u8 *)fw_mid)[0] = mac[3];
  789. ((u8 *)fw_mid)[1] = mac[2];
  790. ((u8 *)fw_lo)[0] = mac[5];
  791. ((u8 *)fw_lo)[1] = mac[4];
  792. }
  793. static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
  794. struct bnx2x_fastpath *fp, int last)
  795. {
  796. int i;
  797. if (fp->disable_tpa)
  798. return;
  799. for (i = 0; i < last; i++)
  800. bnx2x_free_rx_sge(bp, fp, i);
  801. }
  802. static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
  803. {
  804. int i;
  805. for (i = 1; i <= NUM_RX_RINGS; i++) {
  806. struct eth_rx_bd *rx_bd;
  807. rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
  808. rx_bd->addr_hi =
  809. cpu_to_le32(U64_HI(fp->rx_desc_mapping +
  810. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  811. rx_bd->addr_lo =
  812. cpu_to_le32(U64_LO(fp->rx_desc_mapping +
  813. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  814. }
  815. }
  816. /* Statistics ID are global per chip/path, while Client IDs for E1x are per
  817. * port.
  818. */
  819. static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
  820. {
  821. struct bnx2x *bp = fp->bp;
  822. if (!CHIP_IS_E1x(bp)) {
  823. #ifdef BCM_CNIC
  824. /* there are special statistics counters for FCoE 136..140 */
  825. if (IS_FCOE_FP(fp))
  826. return bp->cnic_base_cl_id + (bp->pf_num >> 1);
  827. #endif
  828. return fp->cl_id;
  829. }
  830. return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
  831. }
  832. static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
  833. bnx2x_obj_type obj_type)
  834. {
  835. struct bnx2x *bp = fp->bp;
  836. /* Configure classification DBs */
  837. bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
  838. fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
  839. bnx2x_sp_mapping(bp, mac_rdata),
  840. BNX2X_FILTER_MAC_PENDING,
  841. &bp->sp_state, obj_type,
  842. &bp->macs_pool);
  843. }
  844. /**
  845. * bnx2x_get_path_func_num - get number of active functions
  846. *
  847. * @bp: driver handle
  848. *
  849. * Calculates the number of active (not hidden) functions on the
  850. * current path.
  851. */
  852. static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
  853. {
  854. u8 func_num = 0, i;
  855. /* 57710 has only one function per-port */
  856. if (CHIP_IS_E1(bp))
  857. return 1;
  858. /* Calculate a number of functions enabled on the current
  859. * PATH/PORT.
  860. */
  861. if (CHIP_REV_IS_SLOW(bp)) {
  862. if (IS_MF(bp))
  863. func_num = 4;
  864. else
  865. func_num = 2;
  866. } else {
  867. for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
  868. u32 func_config =
  869. MF_CFG_RD(bp,
  870. func_mf_config[BP_PORT(bp) + 2 * i].
  871. config);
  872. func_num +=
  873. ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
  874. }
  875. }
  876. WARN_ON(!func_num);
  877. return func_num;
  878. }
  879. static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
  880. {
  881. /* RX_MODE controlling object */
  882. bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
  883. /* multicast configuration controlling object */
  884. bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
  885. BP_FUNC(bp), BP_FUNC(bp),
  886. bnx2x_sp(bp, mcast_rdata),
  887. bnx2x_sp_mapping(bp, mcast_rdata),
  888. BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
  889. BNX2X_OBJ_TYPE_RX);
  890. /* Setup CAM credit pools */
  891. bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
  892. bnx2x_get_path_func_num(bp));
  893. /* RSS configuration object */
  894. bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
  895. bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
  896. bnx2x_sp(bp, rss_rdata),
  897. bnx2x_sp_mapping(bp, rss_rdata),
  898. BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
  899. BNX2X_OBJ_TYPE_RX);
  900. }
  901. static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
  902. {
  903. if (CHIP_IS_E1x(fp->bp))
  904. return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
  905. else
  906. return fp->cl_id;
  907. }
  908. static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
  909. {
  910. struct bnx2x *bp = fp->bp;
  911. if (!CHIP_IS_E1x(bp))
  912. return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
  913. else
  914. return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
  915. }
  916. static inline void bnx2x_init_txdata(struct bnx2x *bp,
  917. struct bnx2x_fp_txdata *txdata, u32 cid,
  918. int txq_index, __le16 *tx_cons_sb,
  919. struct bnx2x_fastpath *fp)
  920. {
  921. txdata->cid = cid;
  922. txdata->txq_index = txq_index;
  923. txdata->tx_cons_sb = tx_cons_sb;
  924. txdata->parent_fp = fp;
  925. txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
  926. DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
  927. txdata->cid, txdata->txq_index);
  928. }
  929. #ifdef BCM_CNIC
  930. static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
  931. {
  932. return bp->cnic_base_cl_id + cl_idx +
  933. (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
  934. }
  935. static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
  936. {
  937. /* the 'first' id is allocated for the cnic */
  938. return bp->base_fw_ndsb;
  939. }
  940. static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
  941. {
  942. return bp->igu_base_sb;
  943. }
  944. static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
  945. {
  946. struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  947. unsigned long q_type = 0;
  948. bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
  949. bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
  950. BNX2X_FCOE_ETH_CL_ID_IDX);
  951. bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
  952. bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
  953. bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
  954. bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
  955. bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
  956. fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
  957. fp);
  958. DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
  959. /* qZone id equals to FW (per path) client id */
  960. bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
  961. /* init shortcut */
  962. bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
  963. bnx2x_rx_ustorm_prods_offset(fp);
  964. /* Configure Queue State object */
  965. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  966. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  967. /* No multi-CoS for FCoE L2 client */
  968. BUG_ON(fp->max_cos != 1);
  969. bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
  970. &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
  971. bnx2x_sp_mapping(bp, q_rdata), q_type);
  972. DP(NETIF_MSG_IFUP,
  973. "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
  974. fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
  975. fp->igu_sb_id);
  976. }
  977. #endif
  978. static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
  979. struct bnx2x_fp_txdata *txdata)
  980. {
  981. int cnt = 1000;
  982. while (bnx2x_has_tx_work_unload(txdata)) {
  983. if (!cnt) {
  984. BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
  985. txdata->txq_index, txdata->tx_pkt_prod,
  986. txdata->tx_pkt_cons);
  987. #ifdef BNX2X_STOP_ON_ERROR
  988. bnx2x_panic();
  989. return -EBUSY;
  990. #else
  991. break;
  992. #endif
  993. }
  994. cnt--;
  995. usleep_range(1000, 1000);
  996. }
  997. return 0;
  998. }
  999. int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
  1000. static inline void __storm_memset_struct(struct bnx2x *bp,
  1001. u32 addr, size_t size, u32 *data)
  1002. {
  1003. int i;
  1004. for (i = 0; i < size/4; i++)
  1005. REG_WR(bp, addr + (i * 4), data[i]);
  1006. }
  1007. /**
  1008. * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
  1009. *
  1010. * @bp: driver handle
  1011. * @mask: bits that need to be cleared
  1012. */
  1013. static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
  1014. {
  1015. int tout = 5000; /* Wait for 5 secs tops */
  1016. while (tout--) {
  1017. smp_mb();
  1018. netif_addr_lock_bh(bp->dev);
  1019. if (!(bp->sp_state & mask)) {
  1020. netif_addr_unlock_bh(bp->dev);
  1021. return true;
  1022. }
  1023. netif_addr_unlock_bh(bp->dev);
  1024. usleep_range(1000, 1000);
  1025. }
  1026. smp_mb();
  1027. netif_addr_lock_bh(bp->dev);
  1028. if (bp->sp_state & mask) {
  1029. BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
  1030. bp->sp_state, mask);
  1031. netif_addr_unlock_bh(bp->dev);
  1032. return false;
  1033. }
  1034. netif_addr_unlock_bh(bp->dev);
  1035. return true;
  1036. }
  1037. /**
  1038. * bnx2x_set_ctx_validation - set CDU context validation values
  1039. *
  1040. * @bp: driver handle
  1041. * @cxt: context of the connection on the host memory
  1042. * @cid: SW CID of the connection to be configured
  1043. */
  1044. void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
  1045. u32 cid);
  1046. void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
  1047. u8 sb_index, u8 disable, u16 usec);
  1048. void bnx2x_acquire_phy_lock(struct bnx2x *bp);
  1049. void bnx2x_release_phy_lock(struct bnx2x *bp);
  1050. /**
  1051. * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
  1052. *
  1053. * @bp: driver handle
  1054. * @mf_cfg: MF configuration
  1055. *
  1056. */
  1057. static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
  1058. {
  1059. u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
  1060. FUNC_MF_CFG_MAX_BW_SHIFT;
  1061. if (!max_cfg) {
  1062. DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
  1063. "Max BW configured to 0 - using 100 instead\n");
  1064. max_cfg = 100;
  1065. }
  1066. return max_cfg;
  1067. }
  1068. /* checks if HW supports GRO for given MTU */
  1069. static inline bool bnx2x_mtu_allows_gro(int mtu)
  1070. {
  1071. /* gro frags per page */
  1072. int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
  1073. /*
  1074. * 1. number of frags should not grow above MAX_SKB_FRAGS
  1075. * 2. frag must fit the page
  1076. */
  1077. return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
  1078. }
  1079. #ifdef BCM_CNIC
  1080. /**
  1081. * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
  1082. *
  1083. * @bp: driver handle
  1084. *
  1085. */
  1086. void bnx2x_get_iscsi_info(struct bnx2x *bp);
  1087. #endif
  1088. /**
  1089. * bnx2x_link_sync_notify - send notification to other functions.
  1090. *
  1091. * @bp: driver handle
  1092. *
  1093. */
  1094. static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
  1095. {
  1096. int func;
  1097. int vn;
  1098. /* Set the attention towards other drivers on the same port */
  1099. for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
  1100. if (vn == BP_VN(bp))
  1101. continue;
  1102. func = func_by_vn(bp, vn);
  1103. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
  1104. (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
  1105. }
  1106. }
  1107. /**
  1108. * bnx2x_update_drv_flags - update flags in shmem
  1109. *
  1110. * @bp: driver handle
  1111. * @flags: flags to update
  1112. * @set: set or clear
  1113. *
  1114. */
  1115. static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
  1116. {
  1117. if (SHMEM2_HAS(bp, drv_flags)) {
  1118. u32 drv_flags;
  1119. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
  1120. drv_flags = SHMEM2_RD(bp, drv_flags);
  1121. if (set)
  1122. SET_FLAGS(drv_flags, flags);
  1123. else
  1124. RESET_FLAGS(drv_flags, flags);
  1125. SHMEM2_WR(bp, drv_flags, drv_flags);
  1126. DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
  1127. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
  1128. }
  1129. }
  1130. static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
  1131. {
  1132. if (is_valid_ether_addr(addr))
  1133. return true;
  1134. #ifdef BCM_CNIC
  1135. if (is_zero_ether_addr(addr) &&
  1136. (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
  1137. return true;
  1138. #endif
  1139. return false;
  1140. }
  1141. #endif /* BNX2X_CMN_H */