#include <unistd.h>
#include <inttypes.h>
+#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_log.h>
#include "txgbe_ethdev.h"
#include "txgbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define TXGBE_TX_IEEE1588_TMST 0
+#endif
+
/* Bit Mask to indicate what bits required for building TX context */
static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_OUTER_IPV6 |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
- PKT_TX_OUTER_IP_CKSUM);
+ PKT_TX_OUTER_IP_CKSUM |
+ TXGBE_TX_IEEE1588_TMST);
#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
*/
cmd_type_len = TXGBE_TXD_FCS;
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= TXGBE_TXD_1588;
+#endif
+
olinfo_status = 0;
if (tx_ol_req) {
if (ol_flags & PKT_TX_TCP_SEG) {
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR,
};
-
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+ int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
+ if (likely(-1 != etfid))
+ return ip_pkt_etqf_map[etfid] |
+ ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+ else
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#else
return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#endif
}
static inline uint64_t
vlan_flags & PKT_RX_VLAN_STRIPPED)
? vlan_flags : 0;
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & TXGBE_RXD_STAT_1588)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
return pkt_flags;
}
txgbe_tx_queue_release(txq);
}
+/* (Re)set dynamic txgbe_tx_queue fields to defaults */
+static void __rte_cold
+txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
+{
+ static const struct txgbe_tx_desc zeroed_desc = {0};
+ struct txgbe_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+ txq->tx_tail = 0;
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
+}
+
static const struct txgbe_txq_ops def_txq_ops = {
.release_mbufs = txgbe_tx_queue_release_mbufs,
.free_swring = txgbe_tx_free_swring,
+ .reset = txgbe_reset_tx_queue,
};
/* Takes an ethdev and a queue and sets up the tx function to be used based on
dev->data->nb_tx_queues = 0;
}
+/**
+ * Receive Side Scaling (RSS)
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+txgbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
+}
+
+int
+txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ if (!txgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
+ wr32a(hw, TXGBE_REG_RSSKEY, i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols */
+ rss_hf = rss_conf->rss_hf & TXGBE_RSS_OFFLOAD_ALL;
+ mrqc = rd32(hw, TXGBE_RACTL);
+ mrqc &= ~TXGBE_RACTL_RSSMASK;
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= TXGBE_RACTL_RSSIPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= TXGBE_RACTL_RSSIPV4TCP;
+ if (rss_hf & ETH_RSS_IPV6 ||
+ rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6TCP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= TXGBE_RACTL_RSSIPV4UDP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6UDP;
+
+ if (rss_hf)
+ mrqc |= TXGBE_RACTL_RSSENA;
+ else
+ mrqc &= ~TXGBE_RACTL_RSSENA;
+
+ wr32(hw, TXGBE_RACTL, mrqc);
+
+ return 0;
+}
+
+int
+txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = rd32a(hw, TXGBE_REG_RSSKEY, i);
+ hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
+ hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
+ hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
+ hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
+ }
+ }
+
+ rss_hf = 0;
+ mrqc = rd32(hw, TXGBE_RACTL);
+ if (mrqc & TXGBE_RACTL_RSSIPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & TXGBE_RACTL_RSSIPV6)
+ rss_hf |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX;
+ if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (!(mrqc & TXGBE_RACTL_RSSENA))
+ rss_hf = 0;
+
+ rss_hf &= TXGBE_RSS_OFFLOAD_ALL;
+
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+txgbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ if (adapter->rss_reta_updated == 0) {
+ reta = 0;
+ for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta >> 8) | LS32(j, 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ }
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ txgbe_dev_rss_hash_update(dev, &rss_conf);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+ uint16_t pbsize;
+ uint8_t nb_tcs; /* number of traffic classes */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ num_pools = cfg->nb_queue_pools;
+ /* Check we have a valid number of pools */
+ if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ txgbe_rss_disable(dev);
+ return;
+ }
+ /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+ nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+ /*
+ * split rx buffer up into sections, each for 1 traffic class
+ */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ rxpbsize |= (pbsize << 10); /* set value */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+
+ if (num_pools == ETH_16_POOLS) {
+ mrqc = TXGBE_PORTCTL_NUMTC_8;
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ } else {
+ mrqc = TXGBE_PORTCTL_NUMTC_4;
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ }
+ wr32m(hw, TXGBE_PORTCTL,
+ TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ queue_mapping = 0;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ /*
+ * mapping is done with 3 bits per priority,
+ * so shift by i*3 each time
+ */
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
+
+ wr32(hw, TXGBE_RPUP2TC, queue_mapping);
+
+ wr32(hw, TXGBE_ARBRXCTL, TXGBE_ARBRXCTL_RRM);
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ wr32(hw, TXGBE_POOLRXENA(0),
+ num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ (cfg->pool_map[i].vlan_id & 0xFFF)));
+
+ wr32(hw, TXGBE_PSRVLANPLM(0), cfg->pool_map[i].pools);
+ }
+}
+
+/**
+ * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= TXGBE_PORTCTL_NUMTC_MASK;
+ reg |= TXGBE_PORTCTL_DCB;
+ if (dcb_config->num_tcs.pg_tcs == 8)
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ else
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+}
+
+/**
+ * txgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0),
+ vmdq_tx_conf->nb_queue_pools ==
+ ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+}
+
+static void
+txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+/**
+ * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+ uint32_t q;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_32;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_16;
+ else
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ }
+
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* VLANTBL - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+}
+
+static void
+txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+}
+
+static void
+txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill,
+ max, bwg_id, tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * txgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static int
+txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i, pfc_en, nb_tcs;
+ uint16_t pbsize, rx_buffer_size;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t max[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t map[TXGBE_DCB_TC_MAX] = {0};
+ struct txgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu +
+ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
+
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ * get dcb and VT rx configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_vmdq_dcb_rx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_rx_config(dev, dcb_config);
+ /*Configure general DCB RX parameters*/
+ txgbe_dcb_rx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_dcb_vt_tx_config(dev, dcb_config);
+ /* Configure general VMDQ and DCB TX parameters */
+ txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_tx_config(dev, dcb_config);
+ /* Configure general DCB TX parameters */
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ if (nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~(1 << map[i])));
+ for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
+ if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+
+ if (config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ uint32_t rxpbsize = pbsize << 10;
+
+ for (i = 0; i < nb_tcs; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ }
+ if (config_dcb_tx) {
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
+ uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) -
+ TXGBE_TXPKT_SIZE_MAX;
+
+ for (i = 0; i < nb_tcs; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_TX_CONFIG);
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_RX_CONFIG);
+
+ if (config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ if (config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config,
+ TXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ /* Configure queue statistics registers */
+ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /* If the TC count is 8,
+ * and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = txgbe_dcb_pfc_enabled;
+ }
+ txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+void txgbe_configure_pb(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ int hdrm;
+ int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+
+ /* Reserve 256KB(/512KB) rx buffer for fdir */
+ hdrm = 256; /*KB*/
+
+ hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+void txgbe_configure_port(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i = 0;
+ uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+ 0x9100, 0x9200,
+ 0x0000, 0x0000,
+ 0x0000, 0x0000};
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* default outer vlan tpid */
+ wr32(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+ TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+ /* default inner vlan tpid */
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+ /* default vlan tpid filters */
+ for (i = 0; i < 8; i++) {
+ wr32m(hw, TXGBE_TAGTPID(i / 2),
+ (i % 2 ? TXGBE_TAGTPID_MSB_MASK
+ : TXGBE_TAGTPID_LSB_MASK),
+ (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
+ : TXGBE_TAGTPID_LSB(tpids[i])));
+ }
+
+ /* default vxlan port */
+ wr32(hw, TXGBE_VXLANPORT, 4789);
+}
+
+/**
+ * txgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void txgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+ return;
+
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ txgbe_dcb_hw_configure(dev, dcb_cfg);
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ txgbe_rss_disable(dev);
+
+ /* enable vmdq */
+ mrqc = TXGBE_PORTCTL_NUMVT_64;
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ /* turn on virtualisation and set the default pool */
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
+ }
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
+
+ /* pool enabling for receive - 64 */
+ wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
+
+ /*
+ * allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ wr32(hw, TXGBE_PSRVLANPLM(0),
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ wr32(hw, TXGBE_PSRVLANPLM(1),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+ }
+
+ /* Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+ for (i = 0; i < 64; i++)
+ wr32m(hw, TXGBE_POOLETHCTL(i),
+ TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
+ }
+
+ txgbe_flush(hw);
+}
+
+/*
+ * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
+ wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
+ TXGBE_PORTCTL_NUMVT_64);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ txgbe_flush(hw);
+}
+
static int __rte_cold
txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
{
return 0;
}
+static int
+txgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ uint32_t mrqc;
+
+ txgbe_rss_configure(dev);
+
+ hw = TXGBE_DEV_HW(dev);
+
+ /* enable VF RSS */
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, mrqc);
+
+ return 0;
+}
+
+static int
+txgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mrqc;
+
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+
+ case ETH_16_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ return 0;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, mrqc);
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB/RSS w/o VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ txgbe_rss_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_DCB:
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_ONLY:
+ txgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ txgbe_rss_disable(dev);
+ break;
+ }
+ } else {
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ txgbe_config_vf_rss(dev);
+ break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ /* DCB/RSS together with SRIOV is not supported */
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB/RSS with VMDq & SRIOV");
+ return -1;
+ default:
+ txgbe_config_vf_default(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ /* disable arbiter */
+ rttdcs = rd32(hw, TXGBE_ARBTXCTL);
+ rttdcs |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ txgbe_vmdq_tx_hw_configure(hw);
+ else
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_64;
+ break;
+ case ETH_32_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_32;
+ break;
+ case ETH_16_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ mtqc = 0;
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ return 0;
+}
+
/**
* txgbe_get_rscctl_maxdesc
*
if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
+ /*
+ * Device configured with multiple RX queues.
+ */
+ txgbe_dev_mq_rx_configure(dev);
+
/*
* Setup the Checksum Register.
* Disable Full-Packet Checksum which is mutually exclusive with RSS.
wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
}
+
+ /* Device configured with multiple TX queues. */
+ txgbe_dev_mq_tx_configure(dev);
}
/*
return 0;
}
+void
+txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct txgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct txgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+