net/txgbe: support RSS
authorJiawen Wu <jiawenwu@trustnetic.com>
Mon, 19 Oct 2020 08:53:59 +0000 (16:53 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 3 Nov 2020 22:24:27 +0000 (23:24 +0100)
Add RSS configure, support to RSS hash and reta operations for PF.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
doc/guides/nics/features/txgbe.ini
doc/guides/nics/txgbe.rst
drivers/net/txgbe/meson.build
drivers/net/txgbe/txgbe_ethdev.c
drivers/net/txgbe/txgbe_ethdev.h
drivers/net/txgbe/txgbe_rxtx.c

index 578ec05..fd9c585 100644 (file)
@@ -15,6 +15,9 @@ LRO                  = Y
 TSO                  = Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
 VMDq                 = Y
 SR-IOV               = Y
 VLAN filter          = Y
index 92731f6..471dafd 100644 (file)
@@ -11,6 +11,7 @@ Features
 --------
 
 - Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
 - MAC/VLAN filtering
 - Packet type information
 - Checksum offload
index 7b6c173..345dffa 100644 (file)
@@ -11,6 +11,8 @@ sources = files(
        'txgbe_rxtx.c',
 )
 
+deps += ['hash']
+
 includes += include_directories('base')
 
 install_headers('rte_pmd_txgbe.h')
index 5515fb3..2afc97c 100644 (file)
@@ -1476,6 +1476,7 @@ static int
 txgbe_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -1533,6 +1534,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                intr_handle->intr_vec = NULL;
        }
 
+       adapter->rss_reta_updated = 0;
        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
 
        hw->adapter_stopped = true;
@@ -2632,6 +2634,91 @@ txgbe_dev_interrupt_handler(void *param)
        txgbe_dev_interrupt_action(dev, dev->intr_handle);
 }
 
+int
+txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!txgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1)) {
+                               reta  &= ~(MS32(8 * j, 0xFF));
+                               reta |= LS32(reta_conf[idx].reta[shift + j],
+                                               8 * j, 0xFF);
+                       }
+               }
+               wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+       }
+       adapter->rss_reta_updated = 1;
+
+       return 0;
+}
+
+int
+txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1))
+                               reta_conf[idx].reta[shift + j] =
+                                       (uint16_t)RS32(reta, 8 * j, 0xFF);
+               }
+       }
+
+       return 0;
+}
+
 static int
 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
@@ -2988,6 +3075,17 @@ txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
                                         txgbe_dev_addr_list_itr, TRUE);
 }
 
+bool
+txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
+{
+       switch (mac_type) {
+       case txgbe_mac_raptor:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_configure              = txgbe_dev_configure,
        .dev_infos_get              = txgbe_dev_info_get,
@@ -3027,6 +3125,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .uc_hash_table_set          = txgbe_uc_hash_table_set,
        .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
        .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
+       .reta_update                = txgbe_dev_rss_reta_update,
+       .reta_query                 = txgbe_dev_rss_reta_query,
+       .rss_hash_update            = txgbe_dev_rss_hash_update,
+       .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,
index bf13e03..e264bcc 100644 (file)
@@ -142,6 +142,8 @@ struct txgbe_adapter {
        struct txgbe_uta_info       uta_info;
        struct txgbe_filter_info    filter;
        bool rx_bulk_alloc_allowed;
+       /* For RSS reta table update */
+       uint8_t rss_reta_updated;
 };
 
 #define TXGBE_DEV_ADAPTER(dev) \
@@ -242,6 +244,14 @@ uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);
 
+int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+                             struct rte_eth_rss_conf *rss_conf);
+
+int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+                               struct rte_eth_rss_conf *rss_conf);
+
+bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
+
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 
@@ -328,6 +338,12 @@ const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
                                      struct rte_ether_addr *mc_addr_set,
                                      uint32_t nb_mc_addr);
+int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
+int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 void txgbe_dev_setup_link_alarm_handler(void *param);
 void txgbe_read_stats_registers(struct txgbe_hw *hw,
                           struct txgbe_hw_stats *hw_stats);
index 6aaef9e..a2b1d9c 100644 (file)
@@ -2548,6 +2548,33 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_tx_queues = 0;
 }
 
+/**
+ * Receive Side Scaling (RSS)
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ *     - 32-bit result of the Microsoft RSS hash function,
+ *     - 4-bit RSS type field.
+ */
+
+/*
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
 static void
 txgbe_rss_disable(struct rte_eth_dev *dev)
 {
@@ -2558,6 +2585,151 @@ txgbe_rss_disable(struct rte_eth_dev *dev)
        wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
 }
 
+int
+txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_conf *rss_conf)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint8_t  *hash_key;
+       uint32_t mrqc;
+       uint32_t rss_key;
+       uint64_t rss_hf;
+       uint16_t i;
+
+       if (!txgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       hash_key = rss_conf->rss_key;
+       if (hash_key) {
+               /* Fill in RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key  = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
+                       rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
+                       rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
+                       rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
+                       wr32a(hw, TXGBE_REG_RSSKEY, i, rss_key);
+               }
+       }
+
+       /* Set configured hashing protocols */
+       rss_hf = rss_conf->rss_hf & TXGBE_RSS_OFFLOAD_ALL;
+       mrqc = rd32(hw, TXGBE_RACTL);
+       mrqc &= ~TXGBE_RACTL_RSSMASK;
+       if (rss_hf & ETH_RSS_IPV4)
+               mrqc |= TXGBE_RACTL_RSSIPV4;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+               mrqc |= TXGBE_RACTL_RSSIPV4TCP;
+       if (rss_hf & ETH_RSS_IPV6 ||
+           rss_hf & ETH_RSS_IPV6_EX)
+               mrqc |= TXGBE_RACTL_RSSIPV6;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
+           rss_hf & ETH_RSS_IPV6_TCP_EX)
+               mrqc |= TXGBE_RACTL_RSSIPV6TCP;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+               mrqc |= TXGBE_RACTL_RSSIPV4UDP;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
+           rss_hf & ETH_RSS_IPV6_UDP_EX)
+               mrqc |= TXGBE_RACTL_RSSIPV6UDP;
+
+       if (rss_hf)
+               mrqc |= TXGBE_RACTL_RSSENA;
+       else
+               mrqc &= ~TXGBE_RACTL_RSSENA;
+
+       wr32(hw, TXGBE_RACTL, mrqc);
+
+       return 0;
+}
+
+int
+txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+                           struct rte_eth_rss_conf *rss_conf)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint8_t *hash_key;
+       uint32_t mrqc;
+       uint32_t rss_key;
+       uint64_t rss_hf;
+       uint16_t i;
+
+       hash_key = rss_conf->rss_key;
+       if (hash_key) {
+               /* Return RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key = rd32a(hw, TXGBE_REG_RSSKEY, i);
+                       hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
+                       hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
+                       hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
+                       hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
+               }
+       }
+
+       rss_hf = 0;
+       mrqc = rd32(hw, TXGBE_RACTL);
+       if (mrqc & TXGBE_RACTL_RSSIPV4)
+               rss_hf |= ETH_RSS_IPV4;
+       if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+       if (mrqc & TXGBE_RACTL_RSSIPV6)
+               rss_hf |= ETH_RSS_IPV6 |
+                         ETH_RSS_IPV6_EX;
+       if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
+                         ETH_RSS_IPV6_TCP_EX;
+       if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+       if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
+                         ETH_RSS_IPV6_UDP_EX;
+       if (!(mrqc & TXGBE_RACTL_RSSENA))
+               rss_hf = 0;
+
+       rss_hf &= TXGBE_RSS_OFFLOAD_ALL;
+
+       rss_conf->rss_hf = rss_hf;
+       return 0;
+}
+
+static void
+txgbe_rss_configure(struct rte_eth_dev *dev)
+{
+       struct rte_eth_rss_conf rss_conf;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t reta;
+       uint16_t i;
+       uint16_t j;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /*
+        * Fill in redirection table
+        * The byte-swap is needed because NIC registers are in
+        * little-endian order.
+        */
+       if (adapter->rss_reta_updated == 0) {
+               reta = 0;
+               for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+                       if (j == dev->data->nb_rx_queues)
+                               j = 0;
+                       reta = (reta >> 8) | LS32(j, 24, 0xFF);
+                       if ((i & 3) == 3)
+                               wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+               }
+       }
+       /*
+        * Configure the RSS key and the RSS protocols used to compute
+        * the RSS hash of input packets.
+        */
+       rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+       if (rss_conf.rss_key == NULL)
+               rss_conf.rss_key = rss_intel_key; /* Default hash key */
+       txgbe_dev_rss_hash_update(dev, &rss_conf);
+}
+
 #define NUM_VFTA_REGISTERS 128
 
 /*
@@ -2719,6 +2891,38 @@ txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
        return 0;
 }
 
+static int
+txgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw;
+       uint32_t mrqc;
+
+       txgbe_rss_configure(dev);
+
+       hw = TXGBE_DEV_HW(dev);
+
+       /* enable VF RSS */
+       mrqc = rd32(hw, TXGBE_PORTCTL);
+       mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+       switch (RTE_ETH_DEV_SRIOV(dev).active) {
+       case ETH_64_POOLS:
+               mrqc |= TXGBE_PORTCTL_NUMVT_64;
+               break;
+
+       case ETH_32_POOLS:
+               mrqc |= TXGBE_PORTCTL_NUMVT_32;
+               break;
+
+       default:
+               PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+               return -EINVAL;
+       }
+
+       wr32(hw, TXGBE_PORTCTL, mrqc);
+
+       return 0;
+}
+
 static int
 txgbe_config_vf_default(struct rte_eth_dev *dev)
 {
@@ -2756,9 +2960,14 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
                /*
                 * SRIOV inactive scheme
-                * VMDq multi-queue setting
+                * any RSS w/o VMDq multi-queue setting
                 */
                switch (dev->data->dev_conf.rxmode.mq_mode) {
+               case ETH_MQ_RX_RSS:
+               case ETH_MQ_RX_VMDQ_RSS:
+                       txgbe_rss_configure(dev);
+                       break;
+
                case ETH_MQ_RX_VMDQ_ONLY:
                        txgbe_vmdq_rx_hw_configure(dev);
                        break;
@@ -2771,8 +2980,13 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                }
        } else {
                /* SRIOV active scheme
+                * Support RSS together with SRIOV.
                 */
                switch (dev->data->dev_conf.rxmode.mq_mode) {
+               case ETH_MQ_RX_RSS:
+               case ETH_MQ_RX_VMDQ_RSS:
+                       txgbe_config_vf_rss(dev);
+                       break;
                default:
                        txgbe_config_vf_default(dev);
                        break;