pci: make device id tables const
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
index 624f0ce..43762f2 100644 (file)
@@ -73,7 +73,7 @@
 /* Maximun number of VSI */
 #define I40E_MAX_NUM_VSIS          (384UL)
 
-/* Default queue interrupt throttling time in microseconds*/
+/* Default queue interrupt throttling time in microseconds */
 #define I40E_ITR_INDEX_DEFAULT          0
 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
                I40E_PFINT_ICR0_ENA_VFLR_MASK | \
                I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
 
-static int eth_i40e_dev_init(\
-                       __attribute__((unused)) struct eth_driver *eth_drv,
-                       struct rte_eth_dev *eth_dev);
+#define I40E_FLOW_TYPES ( \
+       (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+       (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+       (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+       (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
+
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
 static int i40e_dev_configure(struct rte_eth_dev *dev);
 static int i40e_dev_start(struct rte_eth_dev *dev);
 static void i40e_dev_stop(struct rte_eth_dev *dev);
@@ -199,17 +210,15 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg);
 static void i40e_configure_registers(struct i40e_hw *hw);
+static void i40e_hw_init(struct i40e_hw *hw);
 
-/* Default hash key buffer for RSS */
-static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
-
-static struct rte_pci_id pci_id_i40e_map[] = {
+static const struct rte_pci_id pci_id_i40e_map[] = {
 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
 #include "rte_pci_dev_ids.h"
 { .vendor_id = 0, /* sentinel */ },
 };
 
-static struct eth_dev_ops i40e_eth_dev_ops = {
+static const struct eth_dev_ops i40e_eth_dev_ops = {
        .dev_configure                = i40e_dev_configure,
        .dev_start                    = i40e_dev_start,
        .dev_stop                     = i40e_dev_stop,
@@ -353,8 +362,7 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
 }
 
 static int
-eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
-                  struct rte_eth_dev *dev)
+eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -401,6 +409,9 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
        /* Make sure all is clean before doing PF reset */
        i40e_clear_hw(hw);
 
+       /* Initialize the hardware */
+       i40e_hw_init(hw);
+
        /* Reset here to make sure all is clean for each PF */
        ret = i40e_pf_reset(hw);
        if (ret) {
@@ -1070,28 +1081,37 @@ i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
 
 int
 i40e_dev_link_update(struct rte_eth_dev *dev,
-                    __rte_unused int wait_to_complete)
+                    int wait_to_complete)
 {
+#define CHECK_INTERVAL 100  /* 100ms */
+#define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_link_status link_status;
        struct rte_eth_link link, old;
        int status;
+       unsigned rep_cnt = MAX_REPEAT_TIME;
 
        memset(&link, 0, sizeof(link));
        memset(&old, 0, sizeof(old));
        memset(&link_status, 0, sizeof(link_status));
        rte_i40e_dev_atomic_read_link_status(dev, &old);
 
-       /* Get link status information from hardware */
-       status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
-       if (status != I40E_SUCCESS) {
-               link.link_speed = ETH_LINK_SPEED_100;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               PMD_DRV_LOG(ERR, "Failed to get link info");
-               goto out;
-       }
+       do {
+               /* Get link status information from hardware */
+               status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+               if (status != I40E_SUCCESS) {
+                       link.link_speed = ETH_LINK_SPEED_100;
+                       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+                       PMD_DRV_LOG(ERR, "Failed to get link info");
+                       goto out;
+               }
+
+               link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+               if (!wait_to_complete)
+                       break;
 
-       link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+               rte_delay_ms(CHECK_INTERVAL);
+       } while (!link.link_status && rep_cnt--);
 
        if (!link.link_status)
                goto out;
@@ -1516,8 +1536,11 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_IPV4_CKSUM |
                DEV_TX_OFFLOAD_UDP_CKSUM |
                DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM;
+               DEV_TX_OFFLOAD_SCTP_CKSUM |
+               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               DEV_TX_OFFLOAD_TCP_TSO;
        dev_info->reta_size = pf->hash_lut_size;
+       dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -2632,7 +2655,7 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
                        rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
                info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
        }
-       info->valid_sections =
+       info->valid_sections |=
                rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
 
        return I40E_SUCCESS;
@@ -3045,11 +3068,15 @@ i40e_vsi_setup(struct i40e_pf *pf,
                ctxt.connection_type = 0x1;
                ctxt.flags = I40E_AQ_VSI_TYPE_VF;
 
-               /* Configure switch ID */
-               ctxt.info.valid_sections |=
-                       rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-               ctxt.info.switch_id =
-                       rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               /**
+                * Do not configure switch ID to enable VEB switch by
+                * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
+                * if the source mac address of packet sent from VF is not
+                * listed in the VEB's mac table, the VEB will switch the
+                * packet back to the VF. Need to enable it when HW issue
+                * is fixed.
+                */
+
                /* Configure port/vlan */
                ctxt.info.valid_sections |=
                        rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -3106,6 +3133,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
                ctxt.info.valid_sections |=
                        rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
        } else if (type == I40E_VSI_FDIR) {
+               memset(&ctxt, 0, sizeof(ctxt));
                vsi->uplink_seid = uplink_vsi->uplink_seid;
                ctxt.pf_num = hw->pf_id;
                ctxt.vf_num = 0;
@@ -3129,7 +3157,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 
        if (vsi->type != I40E_VSI_MAIN) {
                ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
-               if (ret) {
+               if (ret != I40E_SUCCESS) {
                        PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
                                    hw->aq.asq_last_status);
                        goto fail_msix_alloc;
@@ -4574,26 +4602,26 @@ i40e_config_hena(uint64_t flags)
        if (!flags)
                return hena;
 
-       if (flags & ETH_RSS_NONF_IPV4_UDP)
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
-       if (flags & ETH_RSS_NONF_IPV4_TCP)
+       if (flags & ETH_RSS_FRAG_IPV4)
+               hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
+       if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-       if (flags & ETH_RSS_NONF_IPV4_SCTP)
+       if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
+               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+       if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
-       if (flags & ETH_RSS_NONF_IPV4_OTHER)
+       if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
-       if (flags & ETH_RSS_FRAG_IPV4)
-               hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
-       if (flags & ETH_RSS_NONF_IPV6_UDP)
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
-       if (flags & ETH_RSS_NONF_IPV6_TCP)
+       if (flags & ETH_RSS_FRAG_IPV6)
+               hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
+       if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
-       if (flags & ETH_RSS_NONF_IPV6_SCTP)
+       if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
+               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+       if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
-       if (flags & ETH_RSS_NONF_IPV6_OTHER)
+       if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-       if (flags & ETH_RSS_FRAG_IPV6)
-               hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
        if (flags & ETH_RSS_L2_PAYLOAD)
                hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
 
@@ -4608,27 +4636,26 @@ i40e_parse_hena(uint64_t flags)
 
        if (!flags)
                return rss_hf;
-
-       if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
-               rss_hf |= ETH_RSS_NONF_IPV4_UDP;
+       if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
+               rss_hf |= ETH_RSS_FRAG_IPV4;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
-               rss_hf |= ETH_RSS_NONF_IPV4_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+       if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
-               rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
-               rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
-       if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
-               rss_hf |= ETH_RSS_FRAG_IPV4;
-       if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
-               rss_hf |= ETH_RSS_NONF_IPV6_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
+       if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
+               rss_hf |= ETH_RSS_FRAG_IPV6;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
-               rss_hf |= ETH_RSS_NONF_IPV6_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+       if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
-               rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
-               rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
-       if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
-               rss_hf |= ETH_RSS_FRAG_IPV6;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
        if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
                rss_hf |= ETH_RSS_L2_PAYLOAD;
 
@@ -4796,6 +4823,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
        case RTE_TUNNEL_TYPE_VXLAN:
                tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
                break;
+       case RTE_TUNNEL_TYPE_NVGRE:
+               tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+               break;
        default:
                /* Other tunnel types is not supported. */
                PMD_DRV_LOG(ERR, "tunnel type is not supported.");
@@ -5039,9 +5069,12 @@ i40e_pf_config_rss(struct i40e_pf *pf)
        }
        if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
                (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
-               /* Calculate the default hash key */
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       rss_key_default[i] = (uint32_t)rte_rand();
+               /* Random default keys */
+               static uint32_t rss_key_default[] = {0x6b793944,
+                       0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+                       0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+                       0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
                rss_conf.rss_key = (uint8_t *)rss_key_default;
                rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
                                                        sizeof(uint32_t);
@@ -5136,6 +5169,261 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf)
        return ret;
 }
 
+/* Get the symmetric hash enable configurations per port */
+static void
+i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
+{
+       uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
+
+       *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
+}
+
+/* Set the symmetric hash enable configurations per port */
+static void
+i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
+{
+       uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
+
+       if (enable > 0) {
+               if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
+                       PMD_DRV_LOG(INFO, "Symmetric hash has already "
+                                                       "been enabled");
+                       return;
+               }
+               reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+       } else {
+               if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
+                       PMD_DRV_LOG(INFO, "Symmetric hash has already "
+                                                       "been disabled");
+                       return;
+               }
+               reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+       }
+       I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
+       I40E_WRITE_FLUSH(hw);
+}
+
+/*
+ * Get global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note that global configuration means it affects all
+ * the ports on the same NIC.
+ */
+static int
+i40e_get_hash_filter_global_config(struct i40e_hw *hw,
+                                  struct rte_eth_hash_global_conf *g_cfg)
+{
+       uint32_t reg, mask = I40E_FLOW_TYPES;
+       uint16_t i;
+       enum i40e_filter_pctype pctype;
+
+       memset(g_cfg, 0, sizeof(*g_cfg));
+       reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
+       if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+               g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+       else
+               g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+       PMD_DRV_LOG(DEBUG, "Hash function is %s",
+               (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
+
+       for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
+               if (!(mask & (1UL << i)))
+                       continue;
+               mask &= ~(1UL << i);
+               /* Bit set indicats the coresponding flow type is supported */
+               g_cfg->valid_bit_mask[0] |= (1UL << i);
+               pctype = i40e_flowtype_to_pctype(i);
+               reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
+               if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
+                       g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
+       }
+
+       return 0;
+}
+
+static int
+i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
+{
+       uint32_t i;
+       uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
+
+       if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
+               g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+               g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+               PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
+                                               g_cfg->hash_func);
+               return -EINVAL;
+       }
+
+       /*
+        * As i40e supports less than 32 flow types, only first 32 bits need to
+        * be checked.
+        */
+       mask0 = g_cfg->valid_bit_mask[0];
+       for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+               if (i == 0) {
+                       /* Check if any unsupported flow type configured */
+                       if ((mask0 | i40e_mask) ^ i40e_mask)
+                               goto mask_err;
+               } else {
+                       if (g_cfg->valid_bit_mask[i])
+                               goto mask_err;
+               }
+       }
+
+       return 0;
+
+mask_err:
+       PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
+
+       return -EINVAL;
+}
+
+/*
+ * Set global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note any modifying global configuration will affect
+ * all the ports on the same NIC.
+ */
+static int
+i40e_set_hash_filter_global_config(struct i40e_hw *hw,
+                                  struct rte_eth_hash_global_conf *g_cfg)
+{
+       int ret;
+       uint16_t i;
+       uint32_t reg;
+       uint32_t mask0 = g_cfg->valid_bit_mask[0];
+       enum i40e_filter_pctype pctype;
+
+       /* Check the input parameters */
+       ret = i40e_hash_global_config_check(g_cfg);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; mask0 && i < UINT32_BIT; i++) {
+               if (!(mask0 & (1UL << i)))
+                       continue;
+               mask0 &= ~(1UL << i);
+               pctype = i40e_flowtype_to_pctype(i);
+               reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+                               I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
+               I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
+       }
+
+       reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
+       if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+               /* Toeplitz */
+               if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+                       PMD_DRV_LOG(DEBUG, "Hash function already set to "
+                                                               "Toeplitz");
+                       goto out;
+               }
+               reg |= I40E_GLQF_CTL_HTOEP_MASK;
+       } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+               /* Simple XOR */
+               if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+                       PMD_DRV_LOG(DEBUG, "Hash function already set to "
+                                                       "Simple XOR");
+                       goto out;
+               }
+               reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+       } else
+               /* Use the default, and keep it as it is */
+               goto out;
+
+       I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
+
+out:
+       I40E_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int
+i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+       int ret = 0;
+
+       if (!hw || !info) {
+               PMD_DRV_LOG(ERR, "Invalid pointer");
+               return -EFAULT;
+       }
+
+       switch (info->info_type) {
+       case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+               i40e_get_symmetric_hash_enable_per_port(hw,
+                                       &(info->info.enable));
+               break;
+       case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+               ret = i40e_get_hash_filter_global_config(hw,
+                               &(info->info.global_conf));
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+                                                       info->info_type);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+       int ret = 0;
+
+       if (!hw || !info) {
+               PMD_DRV_LOG(ERR, "Invalid pointer");
+               return -EFAULT;
+       }
+
+       switch (info->info_type) {
+       case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+               i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
+               break;
+       case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+               ret = i40e_set_hash_filter_global_config(hw,
+                               &(info->info.global_conf));
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+                                                       info->info_type);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Operations for hash function */
+static int
+i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
+                     enum rte_filter_op filter_op,
+                     void *arg)
+{
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret = 0;
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_NOP:
+               break;
+       case RTE_ETH_FILTER_GET:
+               ret = i40e_hash_filter_get(hw,
+                       (struct rte_eth_hash_filter_info *)arg);
+               break;
+       case RTE_ETH_FILTER_SET:
+               ret = i40e_hash_filter_set(hw,
+                       (struct rte_eth_hash_filter_info *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
+                                                               filter_op);
+               ret = -ENOTSUP;
+               break;
+       }
+
+       return ret;
+}
+
 /*
  * Configure ethertype filter, which can director packet by filtering
  * with mac address and ether_type or only ether_type
@@ -5238,6 +5526,9 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (filter_type) {
+       case RTE_ETH_FILTER_HASH:
+               ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
+               break;
        case RTE_ETH_FILTER_MACVLAN:
                ret = i40e_mac_filter_handle(dev, filter_op, arg);
                break;
@@ -5260,47 +5551,72 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
        return ret;
 }
 
+/*
+ * As some registers wouldn't be reset unless a global hardware reset,
+ * hardware initialization is needed to put those registers into an
+ * expected initial state.
+ */
+static void
+i40e_hw_init(struct i40e_hw *hw)
+{
+       /* clear the PF Queue Filter control register */
+       I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
+
+       /* Disable symmetric hash per port */
+       i40e_set_symmetric_hash_enable_per_port(hw, 0);
+}
+
 enum i40e_filter_pctype
-i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+i40e_flowtype_to_pctype(uint16_t flow_type)
 {
        static const enum i40e_filter_pctype pctype_table[] = {
-               [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
-               [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
-               [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
-               [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
-                                       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
-               [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
-                                       I40E_FILTER_PCTYPE_FRAG_IPV4,
-               [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
-               [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
-               [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
-               [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
-                                       I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
-               [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
-                                       I40E_FILTER_PCTYPE_FRAG_IPV6,
+               [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
+               [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+               [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+               [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+               [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+                       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+               [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+                       I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+                       I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+               [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
        };
 
        return pctype_table[flow_type];
 }
 
-enum rte_eth_flow_type
+uint16_t
 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
 {
-       static const enum rte_eth_flow_type flowtype_table[] = {
-               [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
-               [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
-               [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+       static const uint16_t flowtype_table[] = {
+               [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
                [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-                                       RTE_ETH_FLOW_TYPE_IPV4_OTHER,
-               [I40E_FILTER_PCTYPE_FRAG_IPV4] =
-                                       RTE_ETH_FLOW_TYPE_FRAG_IPV4,
-               [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
-               [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
-               [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+                       RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
+               [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+                       RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
                [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-                                       RTE_ETH_FLOW_TYPE_IPV6_OTHER,
-               [I40E_FILTER_PCTYPE_FRAG_IPV6] =
-                                       RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+                       RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
+               [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
        };
 
        return flowtype_table[pctype];
@@ -5310,56 +5626,71 @@ static int
 i40e_debug_read_register(struct i40e_hw *hw, uint32_t addr, uint64_t *val)
 {
        struct i40e_aq_desc desc;
-       struct i40e_aqc_debug_reg_read_write *cmd =
-               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
        enum i40e_status_code status;
 
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
-       cmd->address = rte_cpu_to_le_32(addr);
+       desc.params.internal.param1 = rte_cpu_to_le_32(addr);
        status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
        if (status < 0)
                return status;
 
-       *val = ((uint64_t)(rte_le_to_cpu_32(cmd->value_high)) << (CHAR_BIT *
-                       sizeof(uint32_t))) + rte_le_to_cpu_32(cmd->value_low);
+       *val = ((uint64_t)(rte_le_to_cpu_32(desc.params.internal.param2)) <<
+                                       (CHAR_BIT * sizeof(uint32_t))) +
+                               rte_le_to_cpu_32(desc.params.internal.param3);
 
        return status;
 }
 
 /*
  * On X710, performance number is far from the expectation on recent firmware
- * versions. The fix for this issue may not be integrated in the following
+ * versions; on XL710, performance number is also far from the expectation on
+ * recent firmware versions, if promiscuous mode is disabled, or promiscuous
+ * mode is enabled and port MAC address is equal to the packet destination MAC
+ * address. The fix for this issue may not be integrated in the following
  * firmware version. So the workaround in software driver is needed. It needs
- * to modify the initial values of 3 internal only registers. Note that the
+ * to modify the initial values of 3 internal only registers for both X710 and
+ * XL710. Note that the values for X710 or XL710 could be different, and the
  * workaround can be removed when it is fixed in firmware in the future.
  */
-static void
-i40e_configure_registers(struct i40e_hw *hw)
-{
+
+/* For both X710 and XL710 */
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
+
+#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
+
+/* For X710 */
+#define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
+/* For XL710 */
+#define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
-#define I40E_GL_SWR_PM_UP_THR_VALUE      0x03030303
 
-       static const struct {
+static void
+i40e_configure_registers(struct i40e_hw *hw)
+{
+       static struct {
                uint32_t addr;
                uint64_t val;
        } reg_table[] = {
                {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
                {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
-               {I40E_GL_SWR_PM_UP_THR, I40E_GL_SWR_PM_UP_THR_VALUE},
+               {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
        };
        uint64_t reg;
        uint32_t i;
        int ret;
 
-       /* Below fix is for X710 only */
-       if (i40e_is_40G_device(hw->device_id))
-               return;
-
        for (i = 0; i < RTE_DIM(reg_table); i++) {
+               if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
+                       if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
+                               reg_table[i].val =
+                                       I40E_GL_SWR_PM_UP_THR_SF_VALUE;
+                       else /* For X710 */
+                               reg_table[i].val =
+                                       I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+               }
+
                ret = i40e_debug_read_register(hw, reg_table[i].addr, &reg);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,