|
@@ -30,63 +30,6 @@
|
|
|
#include "ixgbe_dcb.h"
|
|
|
#include "ixgbe_dcb_82599.h"
|
|
|
|
|
|
-/**
|
|
|
- * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
|
|
|
- * @hw: pointer to hardware structure
|
|
|
- * @rx_pba: method to distribute packet buffer
|
|
|
- *
|
|
|
- * Configure packet buffers for DCB mode.
|
|
|
- */
|
|
|
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
|
|
|
-{
|
|
|
- int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
|
|
|
- u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
|
|
|
- u32 rxpktsize;
|
|
|
- u32 txpktsize;
|
|
|
- u32 txpbthresh;
|
|
|
- u8 i = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * This really means configure the first half of the TCs
|
|
|
- * (Traffic Classes) to use 5/8 of the Rx packet buffer
|
|
|
- * space. To determine the size of the buffer for each TC,
|
|
|
- * we are multiplying the average size by 5/4 and applying
|
|
|
- * it to half of the traffic classes.
|
|
|
- */
|
|
|
- if (rx_pba == pba_80_48) {
|
|
|
- rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
|
|
|
- rx_pb_size -= rxpktsize * (num_tcs / 2);
|
|
|
- for (; i < (num_tcs / 2); i++)
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
|
|
|
- }
|
|
|
-
|
|
|
- /* Divide the remaining Rx packet buffer evenly among the TCs */
|
|
|
- rxpktsize = rx_pb_size / (num_tcs - i);
|
|
|
- for (; i < num_tcs; i++)
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
|
|
|
-
|
|
|
- /*
|
|
|
- * Setup Tx packet buffer and threshold equally for all TCs
|
|
|
- * TXPBTHRESH register is set in K so divide by 1024 and subtract
|
|
|
- * 10 since the largest packet we support is just over 9K.
|
|
|
- */
|
|
|
- txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
|
|
|
- txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
|
|
|
- for (i = 0; i < num_tcs; i++) {
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
|
|
|
- }
|
|
|
-
|
|
|
- /* Clear unused TCs, if any, to zero buffer size*/
|
|
|
- for (; i < MAX_TRAFFIC_CLASS; i++) {
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
|
|
|
* @hw: pointer to hardware structure
|
|
@@ -434,7 +377,6 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
|
|
|
/**
|
|
|
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
|
|
|
* @hw: pointer to hardware structure
|
|
|
- * @rx_pba: method to distribute packet buffer
|
|
|
* @refill: refill credits index by traffic class
|
|
|
* @max: max credits index by traffic class
|
|
|
* @bwg_id: bandwidth grouping indexed by traffic class
|
|
@@ -443,11 +385,9 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
|
|
|
*
|
|
|
* Configure dcb settings and enable dcb mode.
|
|
|
*/
|
|
|
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
|
|
|
- u8 rx_pba, u8 pfc_en, u16 *refill,
|
|
|
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
|
|
|
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
|
|
|
{
|
|
|
- ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
|
|
|
ixgbe_dcb_config_82599(hw);
|
|
|
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
|
|
|
prio_type, prio_tc);
|