net/i40e: fix packet type for X722
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 04fc3a1..9626c6a 100644 (file)
@@ -33,6 +33,7 @@
 #include "base/i40e_type.h"
 #include "base/i40e_register.h"
 #include "base/i40e_dcb.h"
+#include "base/i40e_diag.h"
 #include "i40e_ethdev.h"
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
@@ -671,17 +672,6 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
 
 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
 {
-       /*
-        * Force global configuration for flexible payload
-        * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
-        * This should be removed from code once proper
-        * configuration API is added to avoid configuration conflicts
-        * between ports of the same device.
-        */
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
-
        /*
         * Initialize registers for parsing packet type of QinQ
         * This should be removed from code once proper
@@ -1067,7 +1057,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return 0;
        }
        i40e_set_default_ptype_table(dev);
-       i40e_set_default_pctype_table(dev);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        intr_handle = &pci_dev->intr_handle;
 
@@ -1113,11 +1102,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return ret;
        }
 
+       i40e_set_default_pctype_table(dev);
+
        /*
         * To work around the NVM issue, initialize registers
-        * for flexible payload and packet type of QinQ by
-        * software. It should be removed once issues are fixed
-        * in NVM.
+        * for packet type of QinQ by software.
+        * It should be removed once issues are fixed in NVM.
         */
        i40e_GLQF_reg_init(hw);
 
@@ -1286,6 +1276,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 
        /* enable uio intr after callback register */
        rte_intr_enable(intr_handle);
+
+       /* By default disable flexible payload in global configuration */
+       i40e_flex_payload_reg_set_default(hw);
+
        /*
         * Add an ethertype filter to drop all flow control frames transmitted
         * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1320,6 +1314,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        /* initialize queue region configuration */
        i40e_init_queue_region_conf(dev);
 
+       /* initialize rss configuration from rte_flow */
+       memset(&pf->rss_info, 0,
+               sizeof(struct i40e_rte_flow_rss_conf));
+
        return 0;
 
 err_init_fdir_filter_list:
@@ -1406,6 +1404,17 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf)
        }
 }
 
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
+{
+       /*
+        * Disable by default flexible payload
+        * for corresponding L2/L3/L4 layers.
+        */
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
+}
+
 static int
 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
 {
@@ -1993,6 +2002,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 
        /* Enable all queues which have been configured */
        ret = i40e_dev_switch_queues(pf, TRUE);
+
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR, "Failed to enable VSI");
                goto err_up;
@@ -2022,7 +2032,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
        /* Enable mac loopback mode */
        if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
            dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
-               ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+               ret = i40e_diag_set_loopback(hw, dev->data->dev_conf.lpbk_mode);
                if (ret != I40E_SUCCESS) {
                        PMD_DRV_LOG(ERR, "fail to set loopback link");
                        goto err_up;
@@ -2135,9 +2145,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        /* reset hierarchy commit */
        pf->tm_conf.committed = false;
 
-       /* Remove all the queue region configuration */
-       i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
-
        hw->adapter_stopped = 1;
 }
 
@@ -2203,6 +2210,9 @@ i40e_dev_close(struct rte_eth_dev *dev)
        i40e_res_pool_destroy(&pf->qp_pool);
        i40e_res_pool_destroy(&pf->msix_pool);
 
+       /* Disable flexible payload in global configuration */
+       i40e_flex_payload_reg_set_default(hw);
+
        /* force a PF reset to clean anything leftover */
        reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
        I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
@@ -2512,6 +2522,22 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
                            pf->offset_loaded,
                            &pf->internal_stats_offset.rx_broadcast,
                            &pf->internal_stats.rx_broadcast);
+       /* Get total internal tx packet count */
+       i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
+                           I40E_GLV_UPTCL(hw->port),
+                           pf->offset_loaded,
+                           &pf->internal_stats_offset.tx_unicast,
+                           &pf->internal_stats.tx_unicast);
+       i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
+                           I40E_GLV_MPTCL(hw->port),
+                           pf->offset_loaded,
+                           &pf->internal_stats_offset.tx_multicast,
+                           &pf->internal_stats.tx_multicast);
+       i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
+                           I40E_GLV_BPTCL(hw->port),
+                           pf->offset_loaded,
+                           &pf->internal_stats_offset.tx_broadcast,
+                           &pf->internal_stats.tx_broadcast);
 
        /* exclude CRC size */
        pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
@@ -2541,16 +2567,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
        ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
                ns->eth.rx_broadcast) * ETHER_CRC_LEN;
 
-       /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
-        * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+       /* exclude internal rx bytes
+        * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+        * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
         * value.
+        * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
         */
        if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
                ns->eth.rx_bytes = 0;
-       /* exlude internal rx bytes */
        else
                ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
 
+       if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
+               ns->eth.rx_unicast = 0;
+       else
+               ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
+
+       if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
+               ns->eth.rx_multicast = 0;
+       else
+               ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
+
+       if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
+               ns->eth.rx_broadcast = 0;
+       else
+               ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
+
        i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
                            pf->offset_loaded, &os->eth.rx_discards,
                            &ns->eth.rx_discards);
@@ -2579,12 +2621,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
        ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
                ns->eth.tx_broadcast) * ETHER_CRC_LEN;
 
-       /* exclude internal tx bytes */
+       /* exclude internal tx bytes
+        * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
+        * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
+        * value.
+        * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
+        */
        if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
                ns->eth.tx_bytes = 0;
        else
                ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
 
+       if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
+               ns->eth.tx_unicast = 0;
+       else
+               ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
+
+       if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
+               ns->eth.tx_multicast = 0;
+       else
+               ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
+
+       if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
+               ns->eth.tx_broadcast = 0;
+       else
+               ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
+
        /* GLPRT_TEPC not supported */
 
        /* additional port specific stats */
@@ -3659,6 +3721,7 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint32_t reg;
        int ret;
 
        if (!lut)
@@ -3675,14 +3738,22 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
                uint32_t *lut_dw = (uint32_t *)lut;
                uint16_t i, lut_size_dw = lut_size / 4;
 
-               for (i = 0; i < lut_size_dw; i++)
-                       lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= lut_size_dw; i++) {
+                               reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+                               lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+               } else {
+                       for (i = 0; i < lut_size_dw; i++)
+                               lut_dw[i] = I40E_READ_REG(hw,
+                                                         I40E_PFQF_HLUT(i));
+               }
        }
 
        return 0;
 }
 
-static int
+int
 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
 {
        struct i40e_pf *pf;
@@ -3706,8 +3777,17 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
                uint32_t *lut_dw = (uint32_t *)lut;
                uint16_t i, lut_size_dw = lut_size / 4;
 
-               for (i = 0; i < lut_size_dw; i++)
-                       I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i < lut_size_dw; i++)
+                               I40E_WRITE_REG(
+                                       hw,
+                                       I40E_VFQF_HLUT1(i, vsi->user_param),
+                                       lut_dw[i]);
+               } else {
+                       for (i = 0; i < lut_size_dw; i++)
+                               I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
+                                              lut_dw[i]);
+               }
                I40E_WRITE_FLUSH(hw);
        }
 
@@ -3952,6 +4032,69 @@ i40e_get_cap(struct i40e_hw *hw)
        return ret;
 }
 
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF       4
+#define QUEUE_NUM_PER_VF_ARG                   "queue-num-per-vf"
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,        QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16");
+
+static int i40e_pf_parse_vf_queue_number_handler(const char *key,
+               const char *value,
+               void *opaque)
+{
+       struct i40e_pf *pf;
+       unsigned long num;
+       char *end;
+
+       pf = (struct i40e_pf *)opaque;
+       RTE_SET_USED(key);
+
+       errno = 0;
+       num = strtoul(value, &end, 0);
+       if (errno != 0 || end == value || *end != 0) {
+               PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
+                           "kept the value = %hu", value, pf->vf_nb_qp_max);
+               return -(EINVAL);
+       }
+
+       if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
+               pf->vf_nb_qp_max = (uint16_t)num;
+       else
+               /* here return 0 to make next valid same argument work */
+               PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
+                           "power of 2 and equal or less than 16 !, Now it is "
+                           "kept the value = %hu", num, pf->vf_nb_qp_max);
+
+       return 0;
+}
+
+static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
+{
+       static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct rte_kvargs *kvlist;
+
+       /* set default queue number per VF as 4 */
+       pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+       if (dev->device->devargs == NULL)
+               return 0;
+
+       kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+       if (kvlist == NULL)
+               return -(EINVAL);
+
+       if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+               PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+                           "the first invalid or last valid one is used !",
+                           QUEUE_NUM_PER_VF_ARG);
+
+       rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+                          i40e_pf_parse_vf_queue_number_handler, pf);
+
+       rte_kvargs_free(kvlist);
+
+       return 0;
+}
+
 static int
 i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
@@ -3964,6 +4107,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
                PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
                return -EINVAL;
        }
+
+       i40e_pf_config_vf_rxq_number(dev);
+
        /* Add the parameter init for LFC */
        pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
@@ -3973,7 +4119,6 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
        pf->max_num_vsi = hw->func_caps.num_vsis;
        pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
        pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
-       pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 
        /* FDir queue/VSI allocation */
        pf->fdir_qp_offset = 0;
@@ -4003,7 +4148,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
        pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
        if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
                pf->flags |= I40E_FLAG_SRIOV;
-               pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+               pf->vf_nb_qps = pf->vf_nb_qp_max;
                pf->vf_num = pci_dev->max_vfs;
                PMD_DRV_LOG(DEBUG,
                        "%u VF VSIs, %u queues per VF VSI, in total %u queues",
@@ -5977,7 +6122,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
                        ret = i40e_dev_link_update(dev, 0);
                        if (!ret)
                                _rte_eth_dev_callback_process(dev,
-                                       RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+                                       RTE_ETH_EVENT_INTR_LSC, NULL);
                        break;
                default:
                        PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -6679,17 +6824,20 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
        I40E_WRITE_FLUSH(hw);
 }
 
-static int
+int
 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
+                          I40E_VFQF_HKEY_MAX_INDEX :
+                          I40E_PFQF_HKEY_MAX_INDEX;
        int ret = 0;
 
        if (!key || key_len == 0) {
                PMD_DRV_LOG(DEBUG, "No key to be configured");
                return 0;
-       } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+       } else if (key_len != (key_idx + 1) *
                sizeof(uint32_t)) {
                PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
                return -EINVAL;
@@ -6706,8 +6854,18 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
                uint32_t *hash_key = (uint32_t *)key;
                uint16_t i;
 
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+                               I40E_WRITE_REG(
+                                       hw,
+                                       I40E_VFQF_HKEY1(i, vsi->user_param),
+                                       hash_key[i]);
+
+               } else {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                               I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
+                                              hash_key[i]);
+               }
                I40E_WRITE_FLUSH(hw);
        }
 
@@ -6719,6 +6877,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint32_t reg;
        int ret;
 
        if (!key || !key_len)
@@ -6735,11 +6894,22 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
                uint32_t *key_dw = (uint32_t *)key;
                uint16_t i;
 
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
+                               reg = I40E_VFQF_HKEY1(i, vsi->user_param);
+                               key_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+                       *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+                                  sizeof(uint32_t);
+               } else {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                               reg = I40E_PFQF_HKEY(i);
+                               key_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+                       *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+                                  sizeof(uint32_t);
+               }
        }
-       *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-
        return 0;
 }
 
@@ -10801,14 +10971,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
                                      struct ether_addr *mac_addr)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_vsi *vsi = pf->main_vsi;
+       struct i40e_mac_filter_info mac_filter;
+       struct i40e_mac_filter *f;
+       int ret;
 
        if (!is_valid_assigned_ether_addr(mac_addr)) {
                PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
                return;
        }
 
-       /* Flags: 0x3 updates port address */
-       i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+       TAILQ_FOREACH(f, &vsi->mac_list, next) {
+               if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+                       break;
+       }
+
+       if (f == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+               return;
+       }
+
+       mac_filter = f->mac_info;
+       ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+               return;
+       }
+       memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+       ret = i40e_vsi_add_mac(vsi, &mac_filter);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to add mac filter");
+               return;
+       }
+       memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+       i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                 mac_addr->addr_bytes, NULL);
 }
 
 static int
@@ -10926,12 +11125,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
        }
 }
 
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+       struct i40e_rte_flow_rss_conf *conf =
+                                       &pf->rss_info;
+       if (conf->num)
+               i40e_config_rss_filter(pf, conf, TRUE);
+}
+
 static void
 i40e_filter_restore(struct i40e_pf *pf)
 {
        i40e_ethertype_filter_restore(pf);
        i40e_tunnel_filter_restore(pf);
        i40e_fdir_filter_restore(pf);
+       i40e_rss_filter_restore(pf);
 }
 
 static bool
@@ -11386,6 +11596,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
        return ret;
 }
 
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+               struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       uint32_t i, lut = 0;
+       uint16_t j, num;
+       struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+       struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+       if (!add) {
+               if (memcmp(conf, rss_info,
+                       sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+                       i40e_pf_disable_rss(pf);
+                       memset(rss_info, 0,
+                               sizeof(struct i40e_rte_flow_rss_conf));
+                       return 0;
+               }
+               return -EINVAL;
+       }
+
+       if (rss_info->num)
+               return -EINVAL;
+
+       /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+        * It's necessary to calculate the actual PF queues that are configured.
+        */
+       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+               num = i40e_pf_calc_configured_queues_num(pf);
+       else
+               num = pf->dev_data->nb_rx_queues;
+
+       num = RTE_MIN(num, conf->num);
+       PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+                       num);
+
+       if (num == 0) {
+               PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+               return -ENOTSUP;
+       }
+
+       /* Fill in redirection table */
+       for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+               if (j == num)
+                       j = 0;
+               lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+                       hw->func_caps.rss_table_entry_width) - 1));
+               if ((i & 3) == 3)
+                       I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+       }
+
+       if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+               i40e_pf_disable_rss(pf);
+               return 0;
+       }
+       if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+               (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+               /* Random default keys */
+               static uint32_t rss_key_default[] = {0x6b793944,
+                       0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+                       0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+                       0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+               rss_conf.rss_key = (uint8_t *)rss_key_default;
+               rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+                                                       sizeof(uint32_t);
+       }
+
+       return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+       rte_memcpy(rss_info,
+               conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+       return 0;
+}
+
 RTE_INIT(i40e_init_log);
 static void
 i40e_init_log(void)