net/virtio: fix memory leak when reinitializing device
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 94b89e4..277c1a8 100644 (file)
@@ -16,7 +16,7 @@
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
 #include <rte_memzone.h>
 #include <rte_malloc.h>
@@ -33,6 +33,7 @@
 #include "base/i40e_type.h"
 #include "base/i40e_register.h"
 #include "base/i40e_dcb.h"
+#include "base/i40e_diag.h"
 #include "i40e_ethdev.h"
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
@@ -1056,7 +1057,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return 0;
        }
        i40e_set_default_ptype_table(dev);
-       i40e_set_default_pctype_table(dev);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        intr_handle = &pci_dev->intr_handle;
 
@@ -1102,6 +1102,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return ret;
        }
 
+       i40e_set_default_pctype_table(dev);
+
        /*
         * To work around the NVM issue, initialize registers
         * for packet type of QinQ by software.
@@ -1312,6 +1314,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        /* initialize queue region configuration */
        i40e_init_queue_region_conf(dev);
 
+       /* initialize rss configuration from rte_flow */
+       memset(&pf->rss_info, 0,
+               sizeof(struct i40e_rte_flow_rss_conf));
+
        return 0;
 
 err_init_fdir_filter_list:
@@ -1996,6 +2002,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 
        /* Enable all queues which have been configured */
        ret = i40e_dev_switch_queues(pf, TRUE);
+
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR, "Failed to enable VSI");
                goto err_up;
@@ -2025,7 +2032,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
        /* Enable mac loopback mode */
        if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
            dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
-               ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+               ret = i40e_diag_set_loopback(hw, dev->data->dev_conf.lpbk_mode);
                if (ret != I40E_SUCCESS) {
                        PMD_DRV_LOG(ERR, "fail to set loopback link");
                        goto err_up;
@@ -2138,9 +2145,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        /* reset hierarchy commit */
        pf->tm_conf.committed = false;
 
-       /* Remove all the queue region configuration */
-       i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
-
        hw->adapter_stopped = 1;
 }
 
@@ -3717,6 +3721,7 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint32_t reg;
        int ret;
 
        if (!lut)
@@ -3733,14 +3738,22 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
                uint32_t *lut_dw = (uint32_t *)lut;
                uint16_t i, lut_size_dw = lut_size / 4;
 
-               for (i = 0; i < lut_size_dw; i++)
-                       lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= lut_size_dw; i++) {
+                               reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+                               lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+               } else {
+                       for (i = 0; i < lut_size_dw; i++)
+                               lut_dw[i] = I40E_READ_REG(hw,
+                                                         I40E_PFQF_HLUT(i));
+               }
        }
 
        return 0;
 }
 
-static int
+int
 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
 {
        struct i40e_pf *pf;
@@ -3764,8 +3777,17 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
                uint32_t *lut_dw = (uint32_t *)lut;
                uint16_t i, lut_size_dw = lut_size / 4;
 
-               for (i = 0; i < lut_size_dw; i++)
-                       I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i < lut_size_dw; i++)
+                               I40E_WRITE_REG(
+                                       hw,
+                                       I40E_VFQF_HLUT1(i, vsi->user_param),
+                                       lut_dw[i]);
+               } else {
+                       for (i = 0; i < lut_size_dw; i++)
+                               I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
+                                              lut_dw[i]);
+               }
                I40E_WRITE_FLUSH(hw);
        }
 
@@ -6100,7 +6122,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
                        ret = i40e_dev_link_update(dev, 0);
                        if (!ret)
                                _rte_eth_dev_callback_process(dev,
-                                       RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+                                       RTE_ETH_EVENT_INTR_LSC, NULL);
                        break;
                default:
                        PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -6802,17 +6824,20 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
        I40E_WRITE_FLUSH(hw);
 }
 
-static int
+int
 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
+                          I40E_VFQF_HKEY_MAX_INDEX :
+                          I40E_PFQF_HKEY_MAX_INDEX;
        int ret = 0;
 
        if (!key || key_len == 0) {
                PMD_DRV_LOG(DEBUG, "No key to be configured");
                return 0;
-       } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+       } else if (key_len != (key_idx + 1) *
                sizeof(uint32_t)) {
                PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
                return -EINVAL;
@@ -6829,8 +6854,18 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
                uint32_t *hash_key = (uint32_t *)key;
                uint16_t i;
 
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+                               I40E_WRITE_REG(
+                                       hw,
+                                       I40E_VFQF_HKEY1(i, vsi->user_param),
+                                       hash_key[i]);
+
+               } else {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                               I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
+                                              hash_key[i]);
+               }
                I40E_WRITE_FLUSH(hw);
        }
 
@@ -6842,6 +6877,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
 {
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint32_t reg;
        int ret;
 
        if (!key || !key_len)
@@ -6858,11 +6894,22 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
                uint32_t *key_dw = (uint32_t *)key;
                uint16_t i;
 
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+               if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
+                               reg = I40E_VFQF_HKEY1(i, vsi->user_param);
+                               key_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+                       *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+                                  sizeof(uint32_t);
+               } else {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                               reg = I40E_PFQF_HKEY(i);
+                               key_dw[i] = i40e_read_rx_ctl(hw, reg);
+                       }
+                       *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+                                  sizeof(uint32_t);
+               }
        }
-       *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-
        return 0;
 }
 
@@ -7141,11 +7188,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
        node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
        if (add && node) {
                PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+               rte_free(cld_filter);
                return -EINVAL;
        }
 
        if (!add && !node) {
                PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+               rte_free(cld_filter);
                return -EINVAL;
        }
 
@@ -7154,16 +7203,26 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
                                        vsi->seid, &cld_filter->element, 1);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+                       rte_free(cld_filter);
                        return -ENOTSUP;
                }
                tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+               if (tunnel == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+                       rte_free(cld_filter);
+                       return -ENOMEM;
+               }
+
                rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
                ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+               if (ret < 0)
+                       rte_free(tunnel);
        } else {
                ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
                                                   &cld_filter->element, 1);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+                       rte_free(cld_filter);
                        return -ENOTSUP;
                }
                ret = i40e_sw_tunnel_filter_del(pf, &node->input);
@@ -7592,6 +7651,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
        else {
                if (tunnel_filter->vf_id >= pf->vf_num) {
                        PMD_DRV_LOG(ERR, "Invalid argument.");
+                       rte_free(cld_filter);
                        return -EINVAL;
                }
                vf = &pf->vfs[tunnel_filter->vf_id];
@@ -7606,11 +7666,13 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
        node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
        if (add && node) {
                PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+               rte_free(cld_filter);
                return -EINVAL;
        }
 
        if (!add && !node) {
                PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+               rte_free(cld_filter);
                return -EINVAL;
        }
 
@@ -7623,11 +7685,20 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
                                        vsi->seid, &cld_filter->element, 1);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+                       rte_free(cld_filter);
                        return -ENOTSUP;
                }
                tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+               if (tunnel == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+                       rte_free(cld_filter);
+                       return -ENOMEM;
+               }
+
                rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
                ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+               if (ret < 0)
+                       rte_free(tunnel);
        } else {
                if (big_buffer)
                        ret = i40e_aq_remove_cloud_filters_big_buffer(
@@ -7637,6 +7708,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
                                                   &cld_filter->element, 1);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+                       rte_free(cld_filter);
                        return -ENOTSUP;
                }
                ret = i40e_sw_tunnel_filter_del(pf, &node->input);
@@ -8090,14 +8162,17 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
                (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
 
        /*
-        * We work only with lowest 32 bits which is not correct, but to work
-        * properly the valid_bit_mask size should be increased up to 64 bits
-        * and this will brake ABI. This modification will be done in next
-        * release
+        * As i40e supports less than 64 flow types, only first 64 bits need to
+        * be checked.
         */
-       g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
+       for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+               g_cfg->valid_bit_mask[i] = 0ULL;
+               g_cfg->sym_hash_enable_mask[i] = 0ULL;
+       }
 
-       for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
+       g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
+
+       for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
                if (!adapter->pctypes_tbl[i])
                        continue;
                for (j = I40E_FILTER_PCTYPE_INVALID + 1;
@@ -8106,7 +8181,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
                                reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
                                if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
                                        g_cfg->sym_hash_enable_mask[0] |=
-                                                               (1UL << i);
+                                                               (1ULL << i);
                                }
                        }
                }
@@ -8120,7 +8195,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
                              const struct rte_eth_hash_global_conf *g_cfg)
 {
        uint32_t i;
-       uint32_t mask0, i40e_mask = adapter->flow_types_mask;
+       uint64_t mask0, i40e_mask = adapter->flow_types_mask;
 
        if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
                g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
@@ -8131,7 +8206,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
        }
 
        /*
-        * As i40e supports less than 32 flow types, only first 32 bits need to
+        * As i40e supports less than 64 flow types, only first 64 bits need to
         * be checked.
         */
        mask0 = g_cfg->valid_bit_mask[0];
@@ -8167,23 +8242,20 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
        int ret;
        uint16_t i, j;
        uint32_t reg;
-       /*
-        * We work only with lowest 32 bits which is not correct, but to work
-        * properly the valid_bit_mask size should be increased up to 64 bits
-        * and this will brake ABI. This modification will be done in next
-        * release
-        */
-       uint32_t mask0 = g_cfg->valid_bit_mask[0] &
-                                       (uint32_t)adapter->flow_types_mask;
+       uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
 
        /* Check the input parameters */
        ret = i40e_hash_global_config_check(adapter, g_cfg);
        if (ret < 0)
                return ret;
 
-       for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
+       /*
+        * As i40e supports less than 64 flow types, only first 64 bits need to
+        * be configured.
+        */
+       for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
                if (mask0 & (1UL << i)) {
-                       reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+                       reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
                                        I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
 
                        for (j = I40E_FILTER_PCTYPE_INVALID + 1;
@@ -9248,9 +9320,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
        if (add) {
                ethertype_filter = rte_zmalloc("ethertype_filter",
                                       sizeof(*ethertype_filter), 0);
+               if (ethertype_filter == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+                       return -ENOMEM;
+               }
+
                rte_memcpy(ethertype_filter, &check_filter,
                           sizeof(check_filter));
                ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+               if (ret < 0)
+                       rte_free(ethertype_filter);
        } else {
                ret = i40e_sw_ethertype_filter_del(pf, &node->input);
        }
@@ -10924,14 +11003,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
                                      struct ether_addr *mac_addr)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_vsi *vsi = pf->main_vsi;
+       struct i40e_mac_filter_info mac_filter;
+       struct i40e_mac_filter *f;
+       int ret;
 
        if (!is_valid_assigned_ether_addr(mac_addr)) {
                PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
                return;
        }
 
-       /* Flags: 0x3 updates port address */
-       i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+       TAILQ_FOREACH(f, &vsi->mac_list, next) {
+               if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+                       break;
+       }
+
+       if (f == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+               return;
+       }
+
+       mac_filter = f->mac_info;
+       ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+               return;
+       }
+       memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+       ret = i40e_vsi_add_mac(vsi, &mac_filter);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to add mac filter");
+               return;
+       }
+       memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+       i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                 mac_addr->addr_bytes, NULL);
 }
 
 static int
@@ -11049,12 +11157,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
        }
 }
 
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+       struct i40e_rte_flow_rss_conf *conf =
+                                       &pf->rss_info;
+       if (conf->num)
+               i40e_config_rss_filter(pf, conf, TRUE);
+}
+
 static void
 i40e_filter_restore(struct i40e_pf *pf)
 {
        i40e_ethertype_filter_restore(pf);
        i40e_tunnel_filter_restore(pf);
        i40e_fdir_filter_restore(pf);
+       i40e_rss_filter_restore(pf);
 }
 
 static bool
@@ -11239,87 +11358,104 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
                                        continue;
                                memset(name, 0, sizeof(name));
                                strcpy(name, proto[n].name);
-                               if (!strncmp(name, "PPPOE", 5))
+                               if (!strncasecmp(name, "PPPOE", 5))
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L2_ETHER_PPPOE;
-                               else if (!strncmp(name, "OIPV4", 5)) {
+                               else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+                                        !in_tunnel) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
-                                       in_tunnel = true;
-                               } else if (!strncmp(name, "IPV4", 4) &&
-                                          !in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
-                                               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
-                               else if (!strncmp(name, "IPV4FRAG", 8) &&
-                                        in_tunnel) {
+                                               RTE_PTYPE_L4_FRAG;
+                               } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+                                          in_tunnel) {
                                        ptype_mapping[i].sw_ptype |=
                                            RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_FRAG;
-                               } else if (!strncmp(name, "IPV4", 4) &&
-                                          in_tunnel)
+                               } else if (!strncasecmp(name, "OIPV4", 5)) {
                                        ptype_mapping[i].sw_ptype |=
-                                           RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
-                               else if (!strncmp(name, "OIPV6", 5)) {
-                                       ptype_mapping[i].sw_ptype |=
-                                               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                                               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
                                        in_tunnel = true;
-                               } else if (!strncmp(name, "IPV6", 4) &&
+                               } else if (!strncasecmp(name, "IPV4", 4) &&
                                           !in_tunnel)
+                                       ptype_mapping[i].sw_ptype |=
+                                               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                               else if (!strncasecmp(name, "IPV4", 4) &&
+                                        in_tunnel)
+                                       ptype_mapping[i].sw_ptype |=
+                                           RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+                               else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+                                        !in_tunnel) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
-                               else if (!strncmp(name, "IPV6FRAG", 8) &&
-                                        in_tunnel) {
+                                       ptype_mapping[i].sw_ptype |=
+                                               RTE_PTYPE_L4_FRAG;
+                               } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+                                          in_tunnel) {
                                        ptype_mapping[i].sw_ptype |=
                                            RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_FRAG;
-                               } else if (!strncmp(name, "IPV6", 4) &&
-                                          in_tunnel)
+                               } else if (!strncasecmp(name, "OIPV6", 5)) {
+                                       ptype_mapping[i].sw_ptype |=
+                                               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                                       in_tunnel = true;
+                               } else if (!strncasecmp(name, "IPV6", 4) &&
+                                          !in_tunnel)
+                                       ptype_mapping[i].sw_ptype |=
+                                               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                               else if (!strncasecmp(name, "IPV6", 4) &&
+                                        in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                            RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
-                               else if (!strncmp(name, "UDP", 3) && !in_tunnel)
+                               else if (!strncasecmp(name, "UDP", 3) &&
+                                        !in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L4_UDP;
-                               else if (!strncmp(name, "UDP", 3) && in_tunnel)
+                               else if (!strncasecmp(name, "UDP", 3) &&
+                                        in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_UDP;
-                               else if (!strncmp(name, "TCP", 3) && !in_tunnel)
+                               else if (!strncasecmp(name, "TCP", 3) &&
+                                        !in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L4_TCP;
-                               else if (!strncmp(name, "TCP", 3) && in_tunnel)
+                               else if (!strncasecmp(name, "TCP", 3) &&
+                                        in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_TCP;
-                               else if (!strncmp(name, "SCTP", 4) &&
+                               else if (!strncasecmp(name, "SCTP", 4) &&
                                         !in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L4_SCTP;
-                               else if (!strncmp(name, "SCTP", 4) && in_tunnel)
+                               else if (!strncasecmp(name, "SCTP", 4) &&
+                                        in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_SCTP;
-                               else if ((!strncmp(name, "ICMP", 4) ||
-                                         !strncmp(name, "ICMPV6", 6)) &&
+                               else if ((!strncasecmp(name, "ICMP", 4) ||
+                                         !strncasecmp(name, "ICMPV6", 6)) &&
                                         !in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_L4_ICMP;
-                               else if ((!strncmp(name, "ICMP", 4) ||
-                                         !strncmp(name, "ICMPV6", 6)) &&
+                               else if ((!strncasecmp(name, "ICMP", 4) ||
+                                         !strncasecmp(name, "ICMPV6", 6)) &&
                                         in_tunnel)
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_INNER_L4_ICMP;
-                               else if (!strncmp(name, "GTPC", 4)) {
+                               else if (!strncasecmp(name, "GTPC", 4)) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_TUNNEL_GTPC;
                                        in_tunnel = true;
-                               } else if (!strncmp(name, "GTPU", 4)) {
+                               } else if (!strncasecmp(name, "GTPU", 4)) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_TUNNEL_GTPU;
                                        in_tunnel = true;
-                               } else if (!strncmp(name, "GRENAT", 6)) {
+                               } else if (!strncasecmp(name, "GRENAT", 6)) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_TUNNEL_GRENAT;
                                        in_tunnel = true;
-                               } else if (!strncmp(name, "L2TPv2CTL", 9)) {
+                               } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
                                        ptype_mapping[i].sw_ptype |=
                                                RTE_PTYPE_TUNNEL_L2TP;
                                        in_tunnel = true;
@@ -11509,6 +11645,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
        return ret;
 }
 
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+               struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       uint32_t i, lut = 0;
+       uint16_t j, num;
+       struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+       struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+       if (!add) {
+               if (memcmp(conf, rss_info,
+                       sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+                       i40e_pf_disable_rss(pf);
+                       memset(rss_info, 0,
+                               sizeof(struct i40e_rte_flow_rss_conf));
+                       return 0;
+               }
+               return -EINVAL;
+       }
+
+       if (rss_info->num)
+               return -EINVAL;
+
+       /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+        * It's necessary to calculate the actual PF queues that are configured.
+        */
+       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+               num = i40e_pf_calc_configured_queues_num(pf);
+       else
+               num = pf->dev_data->nb_rx_queues;
+
+       num = RTE_MIN(num, conf->num);
+       PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+                       num);
+
+       if (num == 0) {
+               PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+               return -ENOTSUP;
+       }
+
+       /* Fill in redirection table */
+       for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+               if (j == num)
+                       j = 0;
+               lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+                       hw->func_caps.rss_table_entry_width) - 1));
+               if ((i & 3) == 3)
+                       I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+       }
+
+       if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+               i40e_pf_disable_rss(pf);
+               return 0;
+       }
+       if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+               (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+               /* Random default keys */
+               static uint32_t rss_key_default[] = {0x6b793944,
+                       0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+                       0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+                       0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+               rss_conf.rss_key = (uint8_t *)rss_key_default;
+               rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+                                                       sizeof(uint32_t);
+       }
+
+       return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+       rte_memcpy(rss_info,
+               conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+       return 0;
+}
+
 RTE_INIT(i40e_init_log);
 static void
 i40e_init_log(void)