RSS reta update = Y
VMDq = Y
SR-IOV = Y
+DCB = Y
VLAN filter = Y
Rate limitation = Y
CRC offload = P
- Link state information
- Interrupt mode for RX
- Scattered and gather for TX and RX
+- DCB
- LRO
Prerequisites
# Copyright(c) 2015-2020
sources = [
+ 'txgbe_dcb_hw.c',
+ 'txgbe_dcb.c',
'txgbe_eeprom.c',
'txgbe_hw.c',
'txgbe_mbx.c',
#include "txgbe_eeprom.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
+#include "txgbe_dcb.h"
#endif /* _TXGBE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
+#include "txgbe_dcb.h"
+#include "txgbe_dcb_hw.h"
+
+/**
+ * txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame_size: Maximum frame size
+ * @direction: Configuring either Tx or Rx
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * txgbe_dcb_check_config_cee().
+ */
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config,
+ u32 max_frame_size, u8 direction)
+{
+ struct txgbe_dcb_tc_path *p;
+ u32 min_multiplier = 0;
+ u16 min_percent = 100;
+ s32 ret_val = 0;
+ /* Initialization values default for Tx settings */
+ u32 min_credit = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u16 link_percentage = 0;
+ u8 bw_percent = 0;
+ u8 i;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ if (dcb_config == NULL) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
+ TXGBE_DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the link percentage for each TC first */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+
+ link_percentage = p->bwg_percent;
+ /* Must be careful of integer division for very small nums */
+ link_percentage = (link_percentage * bw_percent) / 100;
+ if (p->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ /* Save link_percentage for reference */
+ p->link_percent = (u8)link_percentage;
+
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ (u32)TXGBE_DCB_MAX_CREDIT_REFILL);
+
+ /* Refill at least minimum credit */
+ if (credit_refill < min_credit)
+ credit_refill = min_credit;
+
+ p->data_credits_refill = (u16)credit_refill;
+
+ /* Calculate maximum credit for the TC */
+ credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100;
+
+ /*
+ * Adjustment based on rule checking, if the percentage
+ * of a TC is too small, the maximum credit may not be
+ * enough to send out a jumbo frame in data plane arbitration.
+ */
+ if (credit_max < min_credit)
+ credit_max = min_credit;
+
+ if (direction == TXGBE_DCB_TX_CONFIG) {
+ dcb_config->tc_config[i].desc_credits_max =
+ (u16)credit_max;
+ }
+
+ p->data_credits_max = (u16)credit_max;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int up;
+
+ /*
+ * If the TC for this user priority has PFC enabled then set the
+ * matching bit in 'pfc_up' to reflect that PFC is enabled.
+ */
+ for (*pfc_up = 0, up = 0; up < TXGBE_DCB_UP_MAX; up++) {
+ if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled)
+ *pfc_up |= 1 << up;
+ }
+}
+
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *tsa)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
+ }
+out:
+ return tc;
+}
+
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *map)
+{
+ u8 up;
+
+ for (up = 0; up < TXGBE_DCB_UP_MAX; up++)
+ map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_H_
+#define _TXGBE_DCB_H_
+
+#include "txgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define TXGBE_DCB_CREDIT_QUANTUM 64
+#define TXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define TXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
+#define TXGBE_DCB_MAX_CREDIT (2 * TXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define TXGBE_DCB_MIN_TSO_CREDIT \
+ ((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+#define TXGBE_DCB_TX_CONFIG 0
+#define TXGBE_DCB_RX_CONFIG 1
+
+struct txgbe_dcb_support {
+ u32 capabilities; /* DCB capabilities */
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80.
+ */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+enum txgbe_dcb_tsa {
+ txgbe_dcb_tsa_ets = 0,
+ txgbe_dcb_tsa_group_strict_cee,
+ txgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct txgbe_dcb_tc_path {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.
+ */
+ enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum txgbe_dcb_pfc {
+ txgbe_dcb_pfc_disabled = 0,
+ txgbe_dcb_pfc_enabled,
+ txgbe_dcb_pfc_enabled_txonly,
+ txgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct txgbe_dcb_tc_config {
+ struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+ enum txgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+enum txgbe_dcb_pba {
+ /* PBA[0-7] each use 64KB FIFO */
+ txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+ txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct txgbe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct txgbe_dcb_config {
+ struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_TC_MAX];
+ struct txgbe_dcb_support support;
+ struct txgbe_dcb_num_tcs num_tcs;
+ u8 bw_percentage[TXGBE_DCB_BWG_MAX][2]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum txgbe_dcb_pba rx_pba_cfg;
+
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+ bool vt_mode;
+};
+
+/* DCB credits calculation */
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config,
+ u32 max_frame_size, u8 direction);
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map);
+
+/* DCB unpack routines */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg,
+ u8 *map, u8 *pfc_up);
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction,
+ u16 *refill);
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max);
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *bwgid);
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *tsa);
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *map);
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up);
+
+#include "txgbe_dcb_hw.h"
+
+#endif /* _TXGBE_DCB_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+
+#include "txgbe_dcb.h"
+
+/**
+ * txgbe_dcb_config_rx_arbiter_raptor - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP |
+ TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+ reg |= (map[i] << (i * TXGBE_RPUP2TC_UP_SHIFT));
+
+ wr32(hw, TXGBE_RPUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = TXGBE_QARBRXCFG_CRQ(credit_refill) |
+ TXGBE_QARBRXCFG_MCL(credit_max) |
+ TXGBE_QARBRXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_QARBRXCFG_LSP;
+
+ wr32(hw, TXGBE_QARBRXCFG(i), reg);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_desc_arbiter_raptor - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ /* Clear the per-Tx queue credits; we use per-TC instead */
+ for (i = 0; i < 128; i++)
+ wr32(hw, TXGBE_QARBTXCRED(i), 0);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ max_credits = max[i];
+ reg = TXGBE_QARBTXCFG_MCL(max_credits) |
+ TXGBE_QARBTXCFG_CRQ(refill[i]) |
+ TXGBE_QARBTXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+ reg |= TXGBE_QARBTXCFG_GSP;
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_QARBTXCFG_LSP;
+
+ wr32(hw, TXGBE_QARBTXCFG(i), reg);
+ }
+
+ /*
+ * Configure Tx descriptor plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBTXCTL_WSP | TXGBE_ARBTXCTL_RRM;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_data_arbiter_raptor - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg;
+ u8 i;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; SP; arb delay)
+ */
+ reg = TXGBE_PARBTXCTL_SP |
+ TXGBE_PARBTXCTL_RECYC |
+ TXGBE_PARBTXCTL_DA;
+ wr32(hw, TXGBE_PARBTXCTL, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+ reg |= TXGBE_DCBUP2TC_MAP(i, map[i]);
+
+ wr32(hw, TXGBE_PBRXUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ reg = TXGBE_PARBTXCFG_CRQ(refill[i]) |
+ TXGBE_PARBTXCFG_MCL(max[i]) |
+ TXGBE_PARBTXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+ reg |= TXGBE_PARBTXCFG_GSP;
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_PARBTXCFG_LSP;
+
+ wr32(hw, TXGBE_PARBTXCFG(i), reg);
+ }
+
+ /*
+ * Configure Tx packet plane (recycle mode; SP; arb delay) and
+ * enable arbiter
+ */
+ reg = TXGBE_PARBTXCTL_SP | TXGBE_PARBTXCTL_RECYC;
+ wr32(hw, TXGBE_PARBTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tc_stats_raptor - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ u8 tc_count = 8;
+ bool vt_mode = false;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ if (dcb_config != NULL) {
+ tc_count = dcb_config->num_tcs.pg_tcs;
+ vt_mode = dcb_config->vt_mode;
+ }
+
+ if (!((tc_count == 8 && !vt_mode) || tc_count == 4))
+ return TXGBE_ERR_PARAM;
+
+ return 0;
+}
+
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_HW_H_
+#define _TXGBE_DCB_HW_H_
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc_raptor(struct txgbe_hw *hw, u8 pfc_en, u8 *map);
+
+/* DCB stats */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config);
+
+/* DCB config arbiters */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa);
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map);
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa, u8 *map);
+
+#endif /* _TXGBE_DCB_HW_H_ */
#include "txgbe_type.h"
#include "txgbe_mbx.h"
#include "txgbe_phy.h"
+#include "txgbe_dcb.h"
#include "txgbe_eeprom.h"
#include "txgbe_mng.h"
#include "txgbe_hw.h"
#define TXGBE_RAPTOR_RAR_ENTRIES 128
#define TXGBE_RAPTOR_MC_TBL_SIZE 128
#define TXGBE_RAPTOR_VFT_TBL_SIZE 128
+#define TXGBE_RAPTOR_RX_PB_SIZE 512 /*KB*/
static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw,
u32 speed,
return 0;
}
+/**
+ * txgbe_set_pba - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* txgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= 10;
+ for (; i < (num_pb / 2); i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+ /* fall through - configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i));
+ rxpktsize <<= 10;
+ for (; i < num_pb; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = TXGBE_PBTXSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < TXGBE_MAX_UP; i++) {
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+}
+
/**
* txgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
* @hw: pointer to the hardware structure
/* Link */
mac->get_link_capabilities = txgbe_get_link_capabilities_raptor;
mac->check_link = txgbe_check_mac_link;
+ mac->setup_pba = txgbe_set_pba;
/* Manageability interface */
mac->set_fw_drv_ver = txgbe_hic_set_drv_ver;
mac->mcft_size = TXGBE_RAPTOR_MC_TBL_SIZE;
mac->vft_size = TXGBE_RAPTOR_VFT_TBL_SIZE;
mac->num_rar_entries = TXGBE_RAPTOR_RAR_ENTRIES;
+ mac->rx_pb_size = TXGBE_RAPTOR_RX_PB_SIZE;
mac->max_rx_queues = TXGBE_RAPTOR_MAX_RX_QUEUES;
mac->max_tx_queues = TXGBE_RAPTOR_MAX_TX_QUEUES;
void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw,
bool enable, int vf);
s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps);
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
void txgbe_clear_tx_pending(struct txgbe_hw *hw);
s32 txgbe_reset_pipeline_raptor(struct txgbe_hw *hw);
#define _TXGBE_TYPE_H_
#define TXGBE_DCB_TC_MAX TXGBE_MAX_UP
+#define TXGBE_DCB_UP_MAX TXGBE_MAX_UP
+#define TXGBE_DCB_BWG_MAX TXGBE_MAX_UP
#define TXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define TXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
#define TXGBE_FRAME_SIZE_MAX (9728) /* Maximum frame size, +FCS */
#define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */
#define TXGBE_NUM_POOL (64)
+#define TXGBE_PBTXSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
#define TXGBE_MAX_UP 8
#define TXGBE_MAX_QP (128)
#define TXGBE_MAX_UTA 128
struct txgbe_thermal_diode_data sensor[1];
};
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
/* Physical layer type */
#define TXGBE_PHYSICAL_LAYER_UNKNOWN 0
#define TXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
u32 mcft_size;
u32 vft_size;
u32 num_rar_entries;
+ u32 rx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
-
u8 san_mac_rar_index;
bool get_link_status;
u64 orig_autoc; /* cached value of AUTOC */
return 0;
}
+static void
+txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
+{
+ int i;
+ u8 bwgp;
+ struct txgbe_dcb_tc_config *tc;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
+ dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
+ bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
+ tc->pfc = txgbe_dcb_pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &dcb_config->tc_config[0];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+ for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
+ dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
+ dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
+ }
+ dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
+ dcb_config->pfc_mode_enable = false;
+ dcb_config->vt_mode = true;
+ dcb_config->round_robin_enable = false;
+ /* support all DCB capabilities */
+ dcb_config->support.capabilities = 0xFF;
+}
+
/*
* Ensure that all locks are released before first NVM or PHY access
*/
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+ struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
const struct rte_memzone *mz;
uint32_t ctrl_ext;
/* Unlock any pending hardware semaphore */
txgbe_swfw_lock_reset(hw);
+ /* Initialize DCB configuration*/
+ memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
+ txgbe_dcb_init(hw, dcb_config);
+
err = hw->rom.init_params(hw);
if (err != 0) {
PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
txgbe_vmdq_vlan_hw_filter_enable(dev);
}
+ /* Configure DCB hw */
+ txgbe_configure_pb(dev);
+ txgbe_configure_port(dev);
+ txgbe_configure_dcb(dev);
+
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
};
+/* The configuration of bandwidth */
+struct txgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
+};
+
/*
* Structure to store private data for each driver instance (for each port).
*/
struct txgbe_stat_mappings stat_mappings;
struct txgbe_vfta shadow_vfta;
struct txgbe_hwstrip hwstrip;
+ struct txgbe_dcb_config dcb_config;
struct txgbe_mirror_info mr_data;
struct txgbe_vf_info *vfdata;
struct txgbe_uta_info uta_info;
struct txgbe_filter_info filter;
+ struct txgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
/* For RSS reta table update */
uint8_t rss_reta_updated;
#define TXGBE_DEV_HWSTRIP(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)
+#define TXGBE_DEV_DCB_CONFIG(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)
+
#define TXGBE_DEV_VFDATA(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
#define TXGBE_DEV_FILTER(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
+#define TXGBE_DEV_BW_CONF(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
+
/*
* RX/TX function prototypes
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
+void txgbe_configure_pb(struct rte_eth_dev *dev);
+void txgbe_configure_port(struct rte_eth_dev *dev);
+void txgbe_configure_dcb(struct rte_eth_dev *dev);
+
int
txgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait_to_complete);
}
#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+ uint16_t pbsize;
+ uint8_t nb_tcs; /* number of traffic classes */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ num_pools = cfg->nb_queue_pools;
+ /* Check we have a valid number of pools */
+ if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ txgbe_rss_disable(dev);
+ return;
+ }
+ /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+ nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+ /*
+ * split rx buffer up into sections, each for 1 traffic class
+ */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ rxpbsize |= (pbsize << 10); /* set value */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+
+ if (num_pools == ETH_16_POOLS) {
+ mrqc = TXGBE_PORTCTL_NUMTC_8;
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ } else {
+ mrqc = TXGBE_PORTCTL_NUMTC_4;
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ }
+ wr32m(hw, TXGBE_PORTCTL,
+ TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ queue_mapping = 0;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ /*
+ * mapping is done with 3 bits per priority,
+ * so shift by i*3 each time
+ */
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
+
+ wr32(hw, TXGBE_RPUP2TC, queue_mapping);
+
+ wr32(hw, TXGBE_ARBRXCTL, TXGBE_ARBRXCTL_RRM);
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ wr32(hw, TXGBE_POOLRXENA(0),
+ num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ (cfg->pool_map[i].vlan_id & 0xFFF)));
+
+ wr32(hw, TXGBE_PSRVLANPLM(0), cfg->pool_map[i].pools);
+ }
+}
+
+/**
+ * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= TXGBE_PORTCTL_NUMTC_MASK;
+ reg |= TXGBE_PORTCTL_DCB;
+ if (dcb_config->num_tcs.pg_tcs == 8)
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ else
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+}
+
+/**
+ * txgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0),
+ vmdq_tx_conf->nb_queue_pools ==
+ ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+}
+
+static void
+txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+/**
+ * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+ uint32_t q;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_32;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_16;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ }
+
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* VLANTBL - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+}
+
+static void
+txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+}
+
+static void
+txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * txgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static int
+txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i, nb_tcs;
+ uint16_t pbsize, rx_buffer_size;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t max[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t map[TXGBE_DCB_TC_MAX] = {0};
+ struct txgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu +
+ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
+
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ * get dcb and VT rx configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_vmdq_dcb_rx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_rx_config(dev, dcb_config);
+ /*Configure general DCB RX parameters*/
+ txgbe_dcb_rx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_dcb_vt_tx_config(dev, dcb_config);
+ /* Configure general VMDQ and DCB TX parameters */
+ txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_tx_config(dev, dcb_config);
+ /* Configure general DCB TX parameters */
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ if (nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~(1 << map[i])));
+ for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
+ if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+
+ if (config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ uint32_t rxpbsize = pbsize << 10;
+
+ for (i = 0; i < nb_tcs; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ }
+ if (config_dcb_tx) {
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
+ uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) -
+ TXGBE_TXPKT_SIZE_MAX;
+
+ for (i = 0; i < nb_tcs; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_TX_CONFIG);
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_RX_CONFIG);
+
+ if (config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ if (config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ /* Configure queue statistics registers */
+ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+ return ret;
+}
+
+void txgbe_configure_pb(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ int hdrm;
+ int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+
+ /* Reserve 256KB(/512KB) rx buffer for fdir */
+ hdrm = 256; /*KB*/
+
+ hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+void txgbe_configure_port(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i = 0;
+ uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+ 0x9100, 0x9200,
+ 0x0000, 0x0000,
+ 0x0000, 0x0000};
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* default outer vlan tpid */
+ wr32(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+ TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+ /* default inner vlan tpid */
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+ /* default vlan tpid filters */
+ for (i = 0; i < 8; i++) {
+ wr32m(hw, TXGBE_TAGTPID(i / 2),
+ (i % 2 ? TXGBE_TAGTPID_MSB_MASK
+ : TXGBE_TAGTPID_LSB_MASK),
+ (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
+ : TXGBE_TAGTPID_LSB(tpids[i])));
+ }
+
+ /* default vxlan port */
+ wr32(hw, TXGBE_VXLANPORT, 4789);
+}
+
+/**
+ * txgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void txgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+ return;
+
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ txgbe_dcb_hw_configure(dev, dcb_cfg);
+}
/*
* VMDq only support for 10 GbE NIC.
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
/*
* SRIOV inactive scheme
- * any RSS w/o VMDq multi-queue setting
+ * any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
case ETH_MQ_RX_VMDQ_RSS:
txgbe_rss_configure(dev);
break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+
case ETH_MQ_RX_VMDQ_ONLY:
txgbe_vmdq_rx_hw_configure(dev);
break;
case ETH_MQ_RX_VMDQ_RSS:
txgbe_config_vf_rss(dev);
break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ /* DCB/RSS together with SRIOV is not supported */
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB/RSS with VMDq & SRIOV");
+ return -1;
default:
txgbe_config_vf_default(dev);
break;