#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
-#define I40E_MAX_PERCENT 100
-#define I40E_DEFAULT_DCB_APP_NUM 1
-#define I40E_DEFAULT_DCB_APP_PRIO 3
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
-static void i40e_dev_stats_get(struct rte_eth_dev *dev,
+static int i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid);
-static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue,
int on);
static int i40e_pf_setup(struct i40e_pf *pf);
static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
-static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
static int i40e_dcb_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
static struct rte_pci_driver rte_i40e_pmd = {
.id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_i40e_pci_probe,
.remove = eth_i40e_pci_remove,
};
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
/*
- * Initialize registers for flexible payload, which should be set by NVM.
- * This should be removed from code once it is fixed in NVM.
+ * Force global configuration for flexible payload
+ * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
- /* Initialize registers for parsing packet type of QinQ */
+ /*
+ * Initialize registers for parsing packet type of QinQ
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
+ */
I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
}
pf->gtp_support = false;
}
+void
+i40e_init_queue_region_conf(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i;
+
+ for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
+
+ memset(info, 0, sizeof(struct i40e_queue_regions));
+}
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
if (ret < 0)
goto err_init_fdir_filter_list;
+ /* initialize queue region configuration */
+ i40e_init_queue_region_conf(dev);
+
return 0;
err_init_fdir_filter_list:
hw->adapter_stopped = 0;
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, autonegotiation disabled",
+ dev->data->port_id);
return -EINVAL;
}
/* reset hierarchy commit */
pf->tm_conf.committed = false;
+ /* Remove all the queue region configuration */
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
hw->adapter_stopped = 1;
}
}
/* Get all statistics of a port */
-static void
+static int
i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
ns->checksum_error);
PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+ return 0;
}
/* Reset the statistics */
return ret;
}
-static void
+static int
i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
+
+ return 0;
}
static void
mem->size = size;
mem->va = mz->addr;
- mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
/* Apply vlan offload setting */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
- i40e_vlan_offload_set(dev, mask);
+ ret = i40e_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Failed to update vlan offload");
+ return ret;
+ }
/* Apply double-vlan setting, not implemented yet */
/* create L1 filter */
filter_replace.old_filter_type =
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
- filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace.tr_bit = 0;
/* Prepare the buffer, 3 entries */
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* For GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* for GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x40;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
break;
case I40E_TUNNEL_TYPE_MPLSoGRE:
if (!pf->mpls_replace_flag) {
pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x0;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+ break;
+ case I40E_TUNNEL_TYPE_GTPC:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ break;
+ case I40E_TUNNEL_TYPE_GTPU:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+ 0x0;
+ big_buffer = 1;
break;
case I40E_TUNNEL_TYPE_QINQ:
if (!pf->qinq_replace_flag) {
if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
pfilter->element.flags |=
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
else {
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
&pfilter->element.flags);
/* For both X710 and XL710 */
#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
*
* Returns 0 on success, negative value on failure
*/
-static int
+int
i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
sizeof(f->input.general_fields));
if (((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
big_buffer = 1;
if (big_buffer)
struct rte_pmd_i40e_proto_info *proto)
{
struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
- uint8_t port_id = dev->data->port_id;
+ uint16_t port_id = dev->data->port_id;
uint32_t ptype_num;
struct rte_pmd_i40e_ptype_info *ptype;
uint32_t buff_size;
uint8_t proto_id;
- char name[16];
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
uint32_t i, j, n;
bool inner_ip;
int ret;
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
inner_ip = true;
- } else if (!strncmp(name, "IPV4", 4) &&
+ } else if (!strncmp(name, "IPV4FRAG", 8) &&
inner_ip) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- } else if (!strncmp(name, "IPV6", 4) &&
- !inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncmp(name, "IPV4", 4) &&
+ inner_ip)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncmp(name, "IPV6", 4) &&
+ !inner_ip) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
inner_ip = true;
- } else if (!strncmp(name, "IPV6", 4) &&
+ } else if (!strncmp(name, "IPV6FRAG", 8) &&
inner_ip) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- } else if (!strncmp(name, "IPV4FRAG", 8)) {
- ptype_mapping[i].sw_ptype |=
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_FRAG;
- } else if (!strncmp(name, "IPV6FRAG", 8)) {
+ } else if (!strncmp(name, "IPV6", 4) &&
+ inner_ip)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- ptype_mapping[i].sw_ptype |=
- RTE_PTYPE_INNER_L4_FRAG;
- } else if (!strncmp(name, "GTPC", 4))
+ else if (!strncmp(name, "GTPC", 4))
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GTPC;
else if (!strncmp(name, "GTPU", 4))
/* create L1 filter */
filter_replace.old_filter_type =
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
filter_replace.tr_bit = 0;
/* Prepare the buffer, 2 entries */
/* create L2 filter, input for L2 filter will be L1 filter */
filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,