i40e: enlarge the number of supported queues
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 74411c8..52c7899 100644 (file)
@@ -570,6 +570,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return 0;
        }
        pci_dev = dev->pci_dev;
+
+       rte_eth_copy_pci_info(dev, pci_dev);
+
        pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        pf->adapter->eth_dev = dev;
        pf->dev_data = dev->data;
@@ -2441,17 +2444,73 @@ i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
        return ret;
 }
 
+static int
+i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+       struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       int ret;
+
+       if (!lut)
+               return -EINVAL;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+               ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
+                                         lut, lut_size);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+                       return ret;
+               }
+       } else {
+               uint32_t *lut_dw = (uint32_t *)lut;
+               uint16_t i, lut_size_dw = lut_size / 4;
+
+               for (i = 0; i < lut_size_dw; i++)
+                       lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+       }
+
+       return 0;
+}
+
+static int
+i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+       struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       int ret;
+
+       if (!vsi || !lut)
+               return -EINVAL;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+               ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
+                                         lut, lut_size);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+                       return ret;
+               }
+       } else {
+               uint32_t *lut_dw = (uint32_t *)lut;
+               uint16_t i, lut_size_dw = lut_size / 4;
+
+               for (i = 0; i < lut_size_dw; i++)
+                       I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               I40E_WRITE_FLUSH(hw);
+       }
+
+       return 0;
+}
+
 static int
 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t lut, l;
-       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t i, lut_size = pf->hash_lut_size;
        uint16_t idx, shift;
-       uint8_t mask;
+       uint8_t *lut;
+       int ret;
 
        if (reta_size != lut_size ||
                reta_size > ETH_RSS_RETA_SIZE_512) {
@@ -2461,28 +2520,26 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
-       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+       lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+       if (!lut) {
+               PMD_DRV_LOG(ERR, "No memory can be allocated");
+               return -ENOMEM;
+       }
+       ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+       if (ret)
+               goto out;
+       for (i = 0; i < reta_size; i++) {
                idx = i / RTE_RETA_GROUP_SIZE;
                shift = i % RTE_RETA_GROUP_SIZE;
-               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
-                                               I40E_4_BIT_MASK);
-               if (!mask)
-                       continue;
-               if (mask == I40E_4_BIT_MASK)
-                       l = 0;
-               else
-                       l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-               for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
-                       if (mask & (0x1 << j))
-                               lut |= reta_conf[idx].reta[shift + j] <<
-                                                       (CHAR_BIT * j);
-                       else
-                               lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
-               }
-               I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+               if (reta_conf[idx].mask & (1ULL << shift))
+                       lut[i] = reta_conf[idx].reta[shift];
        }
+       ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
 
-       return 0;
+out:
+       rte_free(lut);
+
+       return ret;
 }
 
 static int
@@ -2491,11 +2548,10 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
                        uint16_t reta_size)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t lut;
-       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t i, lut_size = pf->hash_lut_size;
        uint16_t idx, shift;
-       uint8_t mask;
+       uint8_t *lut;
+       int ret;
 
        if (reta_size != lut_size ||
                reta_size > ETH_RSS_RETA_SIZE_512) {
@@ -2505,23 +2561,26 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
-       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+       lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+       if (!lut) {
+               PMD_DRV_LOG(ERR, "No memory can be allocated");
+               return -ENOMEM;
+       }
+
+       ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+       if (ret)
+               goto out;
+       for (i = 0; i < reta_size; i++) {
                idx = i / RTE_RETA_GROUP_SIZE;
                shift = i % RTE_RETA_GROUP_SIZE;
-               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
-                                               I40E_4_BIT_MASK);
-               if (!mask)
-                       continue;
-
-               lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-               for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
-                       if (mask & (0x1 << j))
-                               reta_conf[idx].reta[shift + j] = ((lut >>
-                                       (CHAR_BIT * j)) & I40E_8_BIT_MASK);
-               }
+               if (reta_conf[idx].mask & (1ULL << shift))
+                       reta_conf[idx].reta[shift] = lut[i];
        }
 
-       return 0;
+out:
+       rte_free(lut);
+
+       return ret;
 }
 
 /**
@@ -2688,9 +2747,8 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
-       uint16_t sum_queues = 0, sum_vsis, left_queues;
+       uint16_t qp_count = 0, vsi_count = 0;
 
-       /* First check if FW support SRIOV */
        if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
                PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
                return -EINVAL;
@@ -2701,107 +2759,85 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
 
        pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
-       pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
-       PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
-       /* Allocate queues for pf */
-       if (hw->func_caps.rss) {
-               pf->flags |= I40E_FLAG_RSS;
-               pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
-                       (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
-               pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
-       } else
+       pf->max_num_vsi = hw->func_caps.num_vsis;
+       pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
+       pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+       pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+       /* FDir queue/VSI allocation */
+       pf->fdir_qp_offset = 0;
+       if (hw->func_caps.fd) {
+               pf->flags |= I40E_FLAG_FDIR;
+               pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+       } else {
+               pf->fdir_nb_qps = 0;
+       }
+       qp_count += pf->fdir_nb_qps;
+       vsi_count += 1;
+
+       /* LAN queue/VSI allocation */
+       pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
+       if (!hw->func_caps.rss) {
                pf->lan_nb_qps = 1;
-       sum_queues = pf->lan_nb_qps;
-       /* Default VSI is not counted in */
-       sum_vsis = 0;
-       PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
+       } else {
+               pf->flags |= I40E_FLAG_RSS;
+               if (hw->mac.type == I40E_MAC_X722)
+                       pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
+               pf->lan_nb_qps = pf->lan_nb_qp_max;
+       }
+       qp_count += pf->lan_nb_qps;
+       vsi_count += 1;
 
+       /* VF queue/VSI allocation */
+       pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
        if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
                pf->flags |= I40E_FLAG_SRIOV;
                pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
-               if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
-                       PMD_INIT_LOG(ERR, "Config VF number %u, "
-                                    "max supported %u.",
-                                    dev->pci_dev->max_vfs,
-                                    hw->func_caps.num_vfs);
-                       return -EINVAL;
-               }
-               if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
-                       PMD_INIT_LOG(ERR, "FVL VF queue %u, "
-                                    "max support %u queues.",
-                                    pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
-                       return -EINVAL;
-               }
                pf->vf_num = dev->pci_dev->max_vfs;
-               sum_queues += pf->vf_nb_qps * pf->vf_num;
-               sum_vsis   += pf->vf_num;
-               PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
-                            pf->vf_num, pf->vf_nb_qps);
-       } else
+               PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
+                           "in total %u queues", pf->vf_num, pf->vf_nb_qps,
+                           pf->vf_nb_qps * pf->vf_num);
+       } else {
+               pf->vf_nb_qps = 0;
                pf->vf_num = 0;
+       }
+       qp_count += pf->vf_nb_qps * pf->vf_num;
+       vsi_count += pf->vf_num;
 
+       /* VMDq queue/VSI allocation */
+       pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
        if (hw->func_caps.vmdq) {
                pf->flags |= I40E_FLAG_VMDQ;
-               pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+               pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
                pf->max_nb_vmdq_vsi = 1;
-               /*
-                * If VMDQ available, assume a single VSI can be created.  Will adjust
-                * later.
-                */
-               sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
-               sum_vsis += pf->max_nb_vmdq_vsi;
+               PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
+                           "in total %u queues", pf->max_nb_vmdq_vsi,
+                           pf->vmdq_nb_qps,
+                           pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
        } else {
                pf->vmdq_nb_qps = 0;
                pf->max_nb_vmdq_vsi = 0;
        }
-       pf->nb_cfg_vmdq_vsi = 0;
-
-       if (hw->func_caps.fd) {
-               pf->flags |= I40E_FLAG_FDIR;
-               pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
-               /**
-                * Each flow director consumes one VSI and one queue,
-                * but can't calculate out predictably here.
-                */
-       }
+       qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+       vsi_count += pf->max_nb_vmdq_vsi;
 
        if (hw->func_caps.dcb)
                pf->flags |= I40E_FLAG_DCB;
 
-       if (sum_vsis > pf->max_num_vsi ||
-               sum_queues > hw->func_caps.num_rx_qp) {
-               PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
-               PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
-                            pf->max_num_vsi, sum_vsis);
-               PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
-                            hw->func_caps.num_rx_qp, sum_queues);
+       if (qp_count > hw->func_caps.num_tx_qp) {
+               PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
+                           "the hardware maximum %u", qp_count,
+                           hw->func_caps.num_tx_qp);
                return -EINVAL;
        }
-
-       /* Adjust VMDQ setting to support as many VMs as possible */
-       if (pf->flags & I40E_FLAG_VMDQ) {
-               left_queues = hw->func_caps.num_rx_qp - sum_queues;
-
-               pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
-                                       pf->max_num_vsi - sum_vsis);
-
-               /* Limit the max VMDQ number that rte_ether that can support  */
-               pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-                                       ETH_64_POOLS - 1);
-
-               PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
-                               pf->max_nb_vmdq_vsi);
-               PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
-       }
-
-       /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
-        * cause */
-       if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
-               PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
-                            sum_vsis, hw->func_caps.num_msix_vectors);
+       if (vsi_count > hw->func_caps.num_vsis) {
+               PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
+                           "the hardware maximum %u", vsi_count,
+                           hw->func_caps.num_vsis);
                return -EINVAL;
        }
-       return I40E_SUCCESS;
+
+       return 0;
 }
 
 static int
@@ -3191,7 +3227,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
        bsf = rte_bsf32(qpnum_per_tc);
 
        /* Adjust the queue number to actual queues that can be applied */
-       vsi->nb_qps = qpnum_per_tc * total_tc;
+       if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
+               vsi->nb_qps = qpnum_per_tc * total_tc;
 
        /**
         * Configure TC and queue mapping parameters, for enabled TC,
@@ -5253,22 +5290,77 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
 }
 
 static int
-i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
+i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
 {
-       uint32_t *hash_key;
-       uint8_t hash_key_len;
-       uint64_t rss_hf;
-       uint16_t i;
-       uint64_t hena;
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+       struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       int ret = 0;
+
+       if (!key || key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) *
+               sizeof(uint32_t)))
+               return -EINVAL;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+               struct i40e_aqc_get_set_rss_key_data *key_dw =
+                       (struct i40e_aqc_get_set_rss_key_data *)key;
+
+               ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+               if (ret)
+                       PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+                                    "via AQ");
+       } else {
+               uint32_t *hash_key = (uint32_t *)key;
+               uint16_t i;
 
-       hash_key = (uint32_t *)(rss_conf->rss_key);
-       hash_key_len = rss_conf->rss_key_len;
-       if (hash_key != NULL && hash_key_len >=
-               (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
-               /* Fill in RSS hash key */
                for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
                        I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+               I40E_WRITE_FLUSH(hw);
+       }
+
+       return ret;
+}
+
+static int
+i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+       struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       int ret;
+
+       if (!key || !key_len)
+               return -EINVAL;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+               ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+                       (struct i40e_aqc_get_set_rss_key_data *)key);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+                       return ret;
+               }
+       } else {
+               uint32_t *key_dw = (uint32_t *)key;
+               uint16_t i;
+
+               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                       key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
        }
+       *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+       return 0;
+}
+
+static int
+i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
+{
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       uint64_t rss_hf;
+       uint64_t hena;
+       int ret;
+
+       ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
+                              rss_conf->rss_key_len);
+       if (ret)
+               return ret;
 
        rss_hf = rss_conf->rss_hf;
        hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
@@ -5286,6 +5378,7 @@ static int
 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
                         struct rte_eth_rss_conf *rss_conf)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
        uint64_t hena;
@@ -5301,23 +5394,20 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
        if (rss_hf == 0) /* Disable RSS */
                return -EINVAL;
 
-       return i40e_hw_rss_hash_set(hw, rss_conf);
+       return i40e_hw_rss_hash_set(pf, rss_conf);
 }
 
 static int
 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
                           struct rte_eth_rss_conf *rss_conf)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
        uint64_t hena;
-       uint16_t i;
 
-       if (hash_key != NULL) {
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
-               rss_conf->rss_key_len = i * sizeof(uint32_t);
-       }
+       i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
+                        &rss_conf->rss_key_len);
+
        hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
        hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
        rss_conf->rss_hf = i40e_parse_hena(hena);
@@ -5614,12 +5704,12 @@ i40e_pf_config_rss(struct i40e_pf *pf)
         * If both VMDQ and RSS enabled, not all of PF queues are configured.
         * It's necessary to calulate the actual PF queues that are configured.
         */
-       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
                num = i40e_pf_calc_configured_queues_num(pf);
-               num = i40e_align_floor(num);
-       } else
-               num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+       else
+               num = pf->dev_data->nb_rx_queues;
 
+       num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
        PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
                        num);
 
@@ -5655,7 +5745,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
                                                        sizeof(uint32_t);
        }
 
-       return i40e_hw_rss_hash_set(hw, &rss_conf);
+       return i40e_hw_rss_hash_set(pf, &rss_conf);
 }
 
 static int