PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
/* set flag to update link status after init */
intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
ngbe_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
+ struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
struct ngbe_hw *hw = ngbe_dev_hw(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
rte_intr_efd_disable(intr_handle);
rte_intr_vec_list_free(intr_handle);
+ adapter->rss_reta_updated = 0;
+
hw->adapter_stopped = true;
dev->data->dev_started = 0;
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
+ dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
+
dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
RTE_ETH_LINK_SPEED_10M;
ngbe_dev_interrupt_action(dev);
}
+int
+ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!hw->is_pf) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1)) {
+ reta &= ~(MS32(8 * j, 0xFF));
+ reta |= LS32(reta_conf[idx].reta[shift + j],
+ 8 * j, 0xFF);
+ }
+ }
+ wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ adapter->rss_reta_updated = 1;
+
+ return 0;
+}
+
+int
+ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1))
+ reta_conf[idx].reta[shift + j] =
+ (uint16_t)RS32(reta, 8 * j, 0xFF);
+ }
+ }
+
+ return 0;
+}
+
static int
ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool)
.mac_addr_set = ngbe_set_default_mac_addr,
.uc_hash_table_set = ngbe_uc_hash_table_set,
.uc_all_hash_table_set = ngbe_uc_all_hash_table_set,
+ .reta_update = ngbe_dev_rss_reta_update,
+ .reta_query = ngbe_dev_rss_reta_query,
+ .rss_hash_update = ngbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get,
.set_mc_addr_list = ngbe_dev_set_mc_addr_list,
.rx_burst_mode_get = ngbe_rx_burst_mode_get,
.tx_burst_mode_get = ngbe_tx_burst_mode_get,
#define NGBE_VFTA_SIZE 128
#define NGBE_VLAN_TAG_SIZE 4
+#define NGBE_HKEY_MAX_INDEX 10
/*Default value of Max Rx Queue*/
#define NGBE_MAX_RX_QUEUE_NUM 8
/* The overhead from MTU to max frame size. */
#define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+#define NGBE_RSS_OFFLOAD_ALL ( \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
+
#define NGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define NGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
struct ngbe_hwstrip hwstrip;
struct ngbe_uta_info uta_info;
bool rx_bulk_alloc_allowed;
+
+ /* For RSS reta table update */
+ uint8_t rss_reta_updated;
};
static inline struct ngbe_adapter *
uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int ngbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int ngbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
int ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+int ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
return ngbe_decode_ptype(ptid);
}
+static inline uint64_t
+ngbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
+ 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, RTE_MBUF_F_RX_FDIR,
+ };
+ return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
+}
+
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
{
pkt_flags = rx_desc_status_to_pkt_flags(s[j],
rxq->vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
mb->ol_flags = pkt_flags;
mb->packet_type =
ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
NGBE_PTID_MASK);
+
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
+ mb->hash.rss =
+ rte_le_to_cpu_32(rxdp[j].qw0.dw1);
}
/* Move mbuf pointers from the S/W ring to the stage */
* - packet length,
* - Rx port identifier.
* 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
* - IP checksum flag,
* - VLAN TCI, if any,
* - error flags.
pkt_flags = rx_desc_status_to_pkt_flags(staterr,
rxq->vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
NGBE_PTID_MASK);
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
+ rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
+
/*
* Store the mbuf address into the next entry of the array
* of returned packets.
* Fill the following info in the HEAD buffer of the Rx cluster:
* - RX port identifier
* - hardware offload data, if any:
+ * - RSS flag & hash
* - IP checksum flag
* - VLAN TCI, if any
* - error flags
pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
NGBE_PTID_MASK);
+
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
}
/**
dev->data->nb_tx_queues = 0;
}
+/**
+ * Receive Side Scaling (RSS)
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the Rx queue index where to store the
+ * received packets.
+ * The following output is supplied in the Rx write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+ngbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+ wr32m(hw, NGBE_RACTL, NGBE_RACTL_RSSENA, 0);
+}
+
+int
+ngbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ if (!hw->is_pf) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
+ wr32a(hw, NGBE_REG_RSSKEY, i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols */
+ rss_hf = rss_conf->rss_hf & NGBE_RSS_OFFLOAD_ALL;
+
+ mrqc = rd32(hw, NGBE_RACTL);
+ mrqc &= ~NGBE_RACTL_RSSMASK;
+ if (rss_hf & RTE_ETH_RSS_IPV4)
+ mrqc |= NGBE_RACTL_RSSIPV4;
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= NGBE_RACTL_RSSIPV4TCP;
+ if (rss_hf & RTE_ETH_RSS_IPV6 ||
+ rss_hf & RTE_ETH_RSS_IPV6_EX)
+ mrqc |= NGBE_RACTL_RSSIPV6;
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
+ mrqc |= NGBE_RACTL_RSSIPV6TCP;
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= NGBE_RACTL_RSSIPV4UDP;
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
+ mrqc |= NGBE_RACTL_RSSIPV6UDP;
+
+ if (rss_hf)
+ mrqc |= NGBE_RACTL_RSSENA;
+ else
+ mrqc &= ~NGBE_RACTL_RSSENA;
+
+ wr32(hw, NGBE_RACTL, mrqc);
+
+ return 0;
+}
+
+int
+ngbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = rd32a(hw, NGBE_REG_RSSKEY, i);
+ hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
+ hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
+ hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
+ hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
+ }
+ }
+
+ rss_hf = 0;
+
+ mrqc = rd32(hw, NGBE_RACTL);
+ if (mrqc & NGBE_RACTL_RSSIPV4)
+ rss_hf |= RTE_ETH_RSS_IPV4;
+ if (mrqc & NGBE_RACTL_RSSIPV4TCP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & NGBE_RACTL_RSSIPV6)
+ rss_hf |= RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX;
+ if (mrqc & NGBE_RACTL_RSSIPV6TCP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & NGBE_RACTL_RSSIPV4UDP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & NGBE_RACTL_RSSIPV6UDP)
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
+ if (!(mrqc & NGBE_RACTL_RSSENA))
+ rss_hf = 0;
+
+ rss_hf &= NGBE_RSS_OFFLOAD_ALL;
+
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+ngbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ if (adapter->rss_reta_updated == 0) {
+ reta = 0;
+ for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta >> 8) | LS32(j, 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ }
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ngbe_dev_rss_hash_update(dev, &rss_conf);
+}
+
void ngbe_configure_port(struct rte_eth_dev *dev)
{
struct ngbe_hw *hw = ngbe_dev_hw(dev);
return 0;
}
+static int
+ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case RTE_ETH_MQ_RX_RSS:
+ ngbe_rss_configure(dev);
+ break;
+
+ case RTE_ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ngbe_rss_disable(dev);
+ break;
+ }
+
+ return 0;
+}
+
void
ngbe_set_rx_function(struct rte_eth_dev *dev)
{
if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
+
+ /*
+ * Device configured with multiple RX queues.
+ */
+ ngbe_dev_mq_rx_configure(dev);
+
/*
* Setup the Checksum Register.
+ * Disable Full-Packet Checksum which is mutually exclusive with RSS.
* Enable IP/L4 checksum computation by hardware if requested to do so.
*/
rxcsum = rd32(hw, NGBE_PSRCTL);