+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+/**
+ * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+ uint32_t q;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_32;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_16;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ }
+
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* VLANTBL - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+}
+
+static void
+txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+}
+
+static void
+txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * txgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static int
+txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i, pfc_en, nb_tcs;
+ uint16_t pbsize, rx_buffer_size;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t max[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t map[TXGBE_DCB_TC_MAX] = {0};
+ struct txgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu +
+ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
+
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ * get dcb and VT rx configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_vmdq_dcb_rx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_rx_config(dev, dcb_config);
+ /*Configure general DCB RX parameters*/
+ txgbe_dcb_rx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_dcb_vt_tx_config(dev, dcb_config);
+ /* Configure general VMDQ and DCB TX parameters */
+ txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_tx_config(dev, dcb_config);
+ /* Configure general DCB TX parameters */
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ if (nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~(1 << map[i])));
+ for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
+ if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+
+ if (config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ uint32_t rxpbsize = pbsize << 10;
+
+ for (i = 0; i < nb_tcs; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ }
+ if (config_dcb_tx) {
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
+ uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) -
+ TXGBE_TXPKT_SIZE_MAX;
+
+ for (i = 0; i < nb_tcs; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_TX_CONFIG);
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_RX_CONFIG);
+
+ if (config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ if (config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ /* Configure queue statistics registers */
+ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /* If the TC count is 8,
+ * and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = txgbe_dcb_pfc_enabled;
+ }
+ txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+void txgbe_configure_pb(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ int hdrm;
+ int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+
+ /* Reserve 256KB(/512KB) rx buffer for fdir */
+ hdrm = 256; /*KB*/
+
+ hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+void txgbe_configure_port(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i = 0;
+ uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+ 0x9100, 0x9200,
+ 0x0000, 0x0000,
+ 0x0000, 0x0000};
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* default outer vlan tpid */
+ wr32(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+ TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+ /* default inner vlan tpid */
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+ /* default vlan tpid filters */
+ for (i = 0; i < 8; i++) {
+ wr32m(hw, TXGBE_TAGTPID(i / 2),
+ (i % 2 ? TXGBE_TAGTPID_MSB_MASK
+ : TXGBE_TAGTPID_LSB_MASK),
+ (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
+ : TXGBE_TAGTPID_LSB(tpids[i])));
+ }
+
+ /* default vxlan port */
+ wr32(hw, TXGBE_VXLANPORT, 4789);
+}
+
+/**
+ * txgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void txgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+ return;
+
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ txgbe_dcb_hw_configure(dev, dcb_cfg);
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ txgbe_rss_disable(dev);
+
+ /* enable vmdq */
+ mrqc = TXGBE_PORTCTL_NUMVT_64;
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ /* turn on virtualisation and set the default pool */
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
+ }
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
+
+ /* pool enabling for receive - 64 */
+ wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
+
+ /*
+ * allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ wr32(hw, TXGBE_PSRVLANPLM(0),
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ wr32(hw, TXGBE_PSRVLANPLM(1),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+ }
+
+ /* Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+ for (i = 0; i < 64; i++)
+ wr32m(hw, TXGBE_POOLETHCTL(i),
+ TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
+ }
+
+ txgbe_flush(hw);
+}
+
+/*
+ * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
+ wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
+ TXGBE_PORTCTL_NUMVT_64);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ txgbe_flush(hw);
+}
+
+static int __rte_cold
+txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
+{
+ struct txgbe_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned int i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct txgbe_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned int)rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ TXGBE_RXD_HDRADDR(rxd, 0);
+ TXGBE_RXD_PKTADDR(rxd, dma_addr);
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ uint32_t mrqc;
+
+ txgbe_rss_configure(dev);
+
+ hw = TXGBE_DEV_HW(dev);
+
+ /* enable VF RSS */
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS: