net/vmxnet3: reorder ethdev callbacks initialization
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_ethdev.c
index 2f40ae9..e84d304 100644 (file)
 #define        VMXNET3_TX_MAX_SEG      UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP         \
-       (DEV_TX_OFFLOAD_VLAN_INSERT |   \
-        DEV_TX_OFFLOAD_TCP_CKSUM |     \
-        DEV_TX_OFFLOAD_UDP_CKSUM |     \
-        DEV_TX_OFFLOAD_TCP_TSO |       \
-        DEV_TX_OFFLOAD_MULTI_SEGS)
+       (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |       \
+        RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+        RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+        RTE_ETH_TX_OFFLOAD_TCP_TSO |   \
+        RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP         \
-       (DEV_RX_OFFLOAD_VLAN_STRIP |    \
-        DEV_RX_OFFLOAD_VLAN_FILTER |   \
-        DEV_RX_OFFLOAD_SCATTER |       \
-        DEV_RX_OFFLOAD_UDP_CKSUM |     \
-        DEV_RX_OFFLOAD_TCP_CKSUM |     \
-        DEV_RX_OFFLOAD_TCP_LRO |       \
-        DEV_RX_OFFLOAD_JUMBO_FRAME |   \
-        DEV_RX_OFFLOAD_RSS_HASH)
+       (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
+        RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+        RTE_ETH_RX_OFFLOAD_SCATTER |   \
+        RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+        RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+        RTE_ETH_RX_OFFLOAD_TCP_LRO |   \
+        RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -96,6 +95,14 @@ static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
                                 struct rte_ether_addr *mac_addr);
 static void vmxnet3_process_events(struct rte_eth_dev *dev);
 static void vmxnet3_interrupt_handler(void *param);
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+                      struct rte_eth_rss_reta_entry64 *reta_conf,
+                      uint16_t reta_size);
 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                                uint16_t queue_id);
 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
@@ -117,27 +124,29 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
        .dev_stop             = vmxnet3_dev_stop,
        .dev_close            = vmxnet3_dev_close,
        .dev_reset            = vmxnet3_dev_reset,
+       .link_update          = vmxnet3_dev_link_update,
        .promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
        .promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
        .allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
        .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
-       .link_update          = vmxnet3_dev_link_update,
+       .mac_addr_set         = vmxnet3_mac_addr_set,
+       .mtu_set              = vmxnet3_dev_mtu_set,
        .stats_get            = vmxnet3_dev_stats_get,
-       .xstats_get_names     = vmxnet3_dev_xstats_get_names,
-       .xstats_get           = vmxnet3_dev_xstats_get,
        .stats_reset          = vmxnet3_dev_stats_reset,
-       .mac_addr_set         = vmxnet3_mac_addr_set,
+       .xstats_get           = vmxnet3_dev_xstats_get,
+       .xstats_get_names     = vmxnet3_dev_xstats_get_names,
        .dev_infos_get        = vmxnet3_dev_info_get,
        .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
-       .mtu_set              = vmxnet3_dev_mtu_set,
        .vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
        .vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
        .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
        .rx_queue_release     = vmxnet3_dev_rx_queue_release,
-       .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
-       .tx_queue_release     = vmxnet3_dev_tx_queue_release,
        .rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
        .rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
+       .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
+       .tx_queue_release     = vmxnet3_dev_tx_queue_release,
+       .reta_update          = vmxnet3_rss_reta_update,
+       .reta_query           = vmxnet3_rss_reta_query,
 };
 
 struct vmxnet3_xstats_name_off {
@@ -285,6 +294,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
        eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
        eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+       eth_dev->rx_queue_count = vmxnet3_dev_rx_queue_count;
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
        /* extra mbuf field is required to guess MSS */
@@ -317,9 +327,12 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Check h/w version compatibility with driver. */
        ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
-       PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
 
-       if (ver & (1 << VMXNET3_REV_4)) {
+       if (ver & (1 << VMXNET3_REV_5)) {
+               VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+                                      1 << VMXNET3_REV_5);
+               hw->version = VMXNET3_REV_5 + 1;
+       } else if (ver & (1 << VMXNET3_REV_4)) {
                VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
                                       1 << VMXNET3_REV_4);
                hw->version = VMXNET3_REV_4 + 1;
@@ -399,9 +412,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
        /* set the initial link status */
        memset(&link, 0, sizeof(link));
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       link.link_speed = ETH_SPEED_NUM_10G;
-       link.link_autoneg = ETH_LINK_FIXED;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       link.link_speed = RTE_ETH_SPEED_NUM_10G;
+       link.link_autoneg = RTE_ETH_LINK_FIXED;
        rte_eth_linkstatus_set(eth_dev, &link);
 
        return 0;
@@ -487,8 +500,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
            dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -548,7 +561,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
        hw->queueDescPA = mz->iova;
        hw->queue_desc_len = (uint16_t)size;
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
                /* Allocate memory structure for UPT1_RSSConf and configure */
                mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
                                      "rss_conf", rte_socket_id(),
@@ -620,11 +633,9 @@ vmxnet3_configure_msix(struct rte_eth_dev *dev)
                return -1;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int), 0);
-               if (intr_handle->intr_vec == NULL) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                                  dev->data->nb_rx_queues)) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
                                        dev->data->nb_rx_queues);
                        rte_intr_efd_disable(intr_handle);
@@ -635,8 +646,7 @@ vmxnet3_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_allow_others(intr_handle) &&
            dev->data->dev_conf.intr_conf.lsc != 0) {
                PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
+               rte_intr_vec_list_free(intr_handle);
                rte_intr_efd_disable(intr_handle);
                return -1;
        }
@@ -644,17 +654,19 @@ vmxnet3_configure_msix(struct rte_eth_dev *dev)
        /* if we cannot allocate one MSI-X vector per queue, don't enable
         * interrupt mode.
         */
-       if (hw->intr.num_intrs != (intr_handle->nb_efd + 1)) {
+       if (hw->intr.num_intrs !=
+                               (rte_intr_nb_efd_get(intr_handle) + 1)) {
                PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
-                               hw->intr.num_intrs, intr_handle->nb_efd + 1);
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
+                               hw->intr.num_intrs,
+                               rte_intr_nb_efd_get(intr_handle) + 1);
+               rte_intr_vec_list_free(intr_handle);
                rte_intr_efd_disable(intr_handle);
                return -1;
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++)
-               intr_handle->intr_vec[i] = i + 1;
+               if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
+                       return -rte_errno;
 
        for (i = 0; i < hw->intr.num_intrs; i++)
                hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
@@ -802,7 +814,9 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                if (hw->intr.lsc_only)
                        tqd->conf.intrIdx = 1;
                else
-                       tqd->conf.intrIdx = intr_handle->intr_vec[i];
+                       tqd->conf.intrIdx =
+                               rte_intr_vec_list_index_get(intr_handle,
+                                                                  i);
                tqd->status.stopped = TRUE;
                tqd->status.error   = 0;
                memset(&tqd->stats, 0, sizeof(tqd->stats));
@@ -825,7 +839,9 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                if (hw->intr.lsc_only)
                        rqd->conf.intrIdx = 1;
                else
-                       rqd->conf.intrIdx = intr_handle->intr_vec[i];
+                       rqd->conf.intrIdx =
+                               rte_intr_vec_list_index_get(intr_handle,
+                                                                  i);
                rqd->status.stopped = TRUE;
                rqd->status.error   = 0;
                memset(&rqd->stats, 0, sizeof(rqd->stats));
@@ -844,15 +860,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
        devRead->rxFilterConf.rxMode = 0;
 
        /* Setting up feature flags */
-       if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
                devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-       if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
                devRead->misc.uptFeatures |= VMXNET3_F_LRO;
                devRead->misc.maxNumRxSG = 0;
        }
 
-       if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+       if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
                ret = vmxnet3_rss_configure(dev);
                if (ret != VMXNET3_SUCCESS)
                        return ret;
@@ -864,7 +880,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
        }
 
        ret = vmxnet3_dev_vlan_offload_set(dev,
-                       ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+                       RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
        if (ret)
                return ret;
 
@@ -931,7 +947,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
        }
 
        if (VMXNET3_VERSION_GE_4(hw) &&
-           dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+           dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
                /* Check for additional RSS  */
                ret = vmxnet3_v4_rss_configure(dev);
                if (ret != VMXNET3_SUCCESS) {
@@ -1022,10 +1038,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vector mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec != NULL) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+       rte_intr_vec_list_free(intr_handle);
 
        /* quiesce the device first */
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
@@ -1040,9 +1053,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       link.link_speed = ETH_SPEED_NUM_10G;
-       link.link_autoneg = ETH_LINK_FIXED;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       link.link_speed = RTE_ETH_SPEED_NUM_10G;
+       link.link_autoneg = RTE_ETH_LINK_FIXED;
        rte_eth_linkstatus_set(dev, &link);
 
        hw->adapter_stopped = 1;
@@ -1058,18 +1071,12 @@ vmxnet3_free_queues(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               void *rxq = dev->data->rx_queues[i];
-
-               vmxnet3_dev_rx_queue_release(rxq);
-       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               vmxnet3_dev_rx_queue_release(dev, i);
        dev->data->nb_rx_queues = 0;
 
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               void *txq = dev->data->tx_queues[i];
-
-               vmxnet3_dev_tx_queue_release(txq);
-       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               vmxnet3_dev_tx_queue_release(dev, i);
        dev->data->nb_tx_queues = 0;
 }
 
@@ -1372,7 +1379,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
        dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
        dev_info->min_mtu = VMXNET3_MIN_MTU;
        dev_info->max_mtu = VMXNET3_MAX_MTU;
-       dev_info->speed_capa = ETH_LINK_SPEED_10G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
        dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
        dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1454,10 +1461,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
        ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
        if (ret & 0x1)
-               link.link_status = ETH_LINK_UP;
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       link.link_speed = ETH_SPEED_NUM_10G;
-       link.link_autoneg = ETH_LINK_FIXED;
+               link.link_status = RTE_ETH_LINK_UP;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       link.link_speed = RTE_ETH_SPEED_NUM_10G;
+       link.link_autoneg = RTE_ETH_LINK_FIXED;
 
        return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1510,7 +1517,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
        uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
        uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-       if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
        else
                memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1580,8 +1587,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        uint32_t *vf_table = devRead->rxFilterConf.vfTable;
        uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
                else
                        devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1590,8 +1597,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                                       VMXNET3_CMD_UPDATE_FEATURE);
        }
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
                else
                        memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1677,7 +1684,9 @@ vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
 
-       vmxnet3_enable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+       vmxnet3_enable_intr(hw,
+                           rte_intr_vec_list_index_get(dev->intr_handle,
+                                                              queue_id));
 
        return 0;
 }
@@ -1687,7 +1696,8 @@ vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
 
-       vmxnet3_disable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+       vmxnet3_disable_intr(hw,
+               rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
 
        return 0;
 }
@@ -1697,3 +1707,60 @@ RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
+
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size)
+{
+       int i, idx, shift;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+       if (reta_size != dev_rss_conf->indTableSize) {
+               PMD_DRV_LOG(ERR,
+                       "The size of hash lookup table configured (%d) doesn't match "
+                       "the supported number (%d)",
+                       reta_size, dev_rss_conf->indTableSize);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i++) {
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
+               if (reta_conf[idx].mask & RTE_BIT64(shift))
+                       dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
+       }
+
+       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+                               VMXNET3_CMD_UPDATE_RSSIDT);
+
+       return 0;
+}
+
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+                      struct rte_eth_rss_reta_entry64 *reta_conf,
+                      uint16_t reta_size)
+{
+       int i, idx, shift;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+       if (reta_size != dev_rss_conf->indTableSize) {
+               PMD_DRV_LOG(ERR,
+                       "Size of requested hash lookup table (%d) doesn't "
+                       "match the configured size (%d)",
+                       reta_size, dev_rss_conf->indTableSize);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i++) {
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
+               if (reta_conf[idx].mask & RTE_BIT64(shift))
+                       reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];
+       }
+
+       return 0;
+}