static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
-static int
-i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
/* FDIR - support default flow type with flexible payload */
{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
/* QINQ */
{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
+ /* L2TPv3 over IP */
+ { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!is_zero_ether_addr(ð_mask->src) ||
- (!is_zero_ether_addr(ð_mask->dst) &&
- !is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6 ||
- filter->ether_type == ETHER_TYPE_LLDP ||
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
+ filter->ether_type == RTE_ETHER_TYPE_LLDP ||
filter->ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (num < 0)
return -EINVAL;
+ if (pf->support_multi_driver) {
+ for (i = 0; i < num; i++)
+ if (i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_MSK(i, pctype)) !=
+ mask_reg[i]) {
+ PMD_DRV_LOG(ERR, "Input set setting is not"
+ " supported with"
+ " `support-multi-driver`"
+ " enabled!");
+ return -EPERM;
+ }
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ if (i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_MSK(i, pctype)) != 0) {
+ PMD_DRV_LOG(ERR, "Input set setting is not"
+ " supported with"
+ " `support-multi-driver`"
+ " enabled!");
+ return -EPERM;
+ }
+
+ } else {
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype), 0);
+ }
+
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
-
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
I40E_WRITE_FLUSH(hw);
pf->fdir.input_set[pctype] = input_set;
cus_pctype = i40e_find_customized_pctype(pf,
I40E_CUSTOMIZED_GTPU_IPV6);
break;
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV4_L2TPV3);
+ else if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV6)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV6_L2TPV3);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ if (!filter->input.flow_ext.is_udp) {
+ if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV4)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4);
+ else if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV6)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6);
+ } else {
+ if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV4)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4_UDP);
+ else if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV6)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6_UDP);
+ filter->input.flow_ext.is_udp = false;
+ }
+ break;
default:
PMD_DRV_LOG(ERR, "Unsupported item type");
break;
return I40E_FILTER_PCTYPE_INVALID;
}
+static void
+i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
+ const struct rte_flow_item_esp *esp_spec)
+{
+ if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ if (filter->input.flow_ext.is_udp)
+ filter->input.flow.esp_ipv4_udp_flow.spi =
+ esp_spec->hdr.spi;
+ else
+ filter->input.flow.esp_ipv4_flow.spi =
+ esp_spec->hdr.spi;
+ }
+ if (filter->input.flow_ext.oip_type ==
+ I40E_FDIR_IPTYPE_IPV6) {
+ if (filter->input.flow_ext.is_udp)
+ filter->input.flow.esp_ipv6_udp_flow.spi =
+ esp_spec->hdr.spi;
+ else
+ filter->input.flow.esp_ipv6_flow.spi =
+ esp_spec->hdr.spi;
+ }
+}
+
/* 1. Last in item should be NULL as range is not supported.
* 2. Supported patterns: refer to array i40e_supported_patterns.
* 3. Default supported flow type and input set: refer to array
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
+ const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
uint8_t pctype = 0;
uint64_t input_set = I40E_INSET_NONE;
uint16_t frag_off;
enum rte_flow_item_type item_type;
+ enum rte_flow_item_type next_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
uint32_t i, j;
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
+ next_type = (item + 1)->type;
+
+ if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+ (!eth_spec || !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL eth spec/mask.");
+ return -rte_errno;
+ }
if (eth_spec && eth_mask) {
- if (!is_zero_ether_addr(ð_mask->src) ||
- !is_zero_ether_addr(ð_mask->dst)) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
}
}
if (eth_spec && eth_mask && eth_mask->type) {
- enum rte_flow_item_type next = (item + 1)->type;
-
if (eth_mask->type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
- ether_type == ETHER_TYPE_IPv4 ||
- ether_type == ETHER_TYPE_IPv6 ||
- ether_type == ETHER_TYPE_ARP ||
+ if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == RTE_ETHER_TYPE_IPV4 ||
+ ether_type == RTE_ETHER_TYPE_IPV6 ||
+ ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
ether_type =
rte_be_to_cpu_16(vlan_spec->inner_type);
- if (ether_type == ETHER_TYPE_IPv4 ||
- ether_type == ETHER_TYPE_IPv6 ||
- ether_type == ETHER_TYPE_ARP ||
+ if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+ ether_type == RTE_ETHER_TYPE_IPV6 ||
+ ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
/* Check if it is fragment. */
frag_off = ipv4_spec->hdr.fragment_offset;
frag_off = rte_be_to_cpu_16(frag_off);
- if (frag_off & IPV4_HDR_OFFSET_MASK ||
- frag_off & IPV4_HDR_MF_FLAG)
+ if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
+ frag_off & RTE_IPV4_HDR_MF_FLAG)
pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
/* Get the filter info */
ipv4_spec->hdr.src_addr;
filter->input.flow.ip4_flow.dst_ip =
ipv4_spec->hdr.dst_addr;
+
+ filter->input.flow_ext.inner_ip = false;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV4;
} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
filter->input.flow_ext.inner_ip = true;
filter->input.flow_ext.iip_type =
I40E_FDIR_IPTYPE_IPV4;
+ } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
+ filter->input.flow_ext.inner_ip = false;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV4;
} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
filter->input.flow.ipv6_flow.hop_limits =
ipv6_spec->hdr.hop_limits;
+ filter->input.flow_ext.inner_ip = false;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV6;
+
rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
filter->input.flow_ext.inner_ip = true;
filter->input.flow_ext.iip_type =
I40E_FDIR_IPTYPE_IPV6;
+ } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
+ filter->input.flow_ext.inner_ip = false;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV6;
} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
udp_spec->hdr.dst_port;
}
}
-
+ filter->input.flow_ext.is_udp = true;
layer_idx = I40E_FLXPLD_L4_IDX;
break;
cus_proto = item_type;
}
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ if (!pf->esp_support) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ESP protocol");
+ return -rte_errno;
+ }
+
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+
+ if (!esp_spec || !esp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ESP item");
+ return -rte_errno;
+ }
+
+ if (esp_spec && esp_mask) {
+ if (esp_mask->hdr.spi != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ESP mask");
+ return -rte_errno;
+ }
+ i40e_flow_set_filter_spi(filter, esp_spec);
+ filter->input.flow_ext.customized_pctype = true;
+ cus_proto = item_type;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec = item->spec;
sctp_mask = item->mask;
return -rte_errno;
}
break;
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ l2tpv3oip_spec = item->spec;
+ l2tpv3oip_mask = item->mask;
+
+ if (!l2tpv3oip_spec || !l2tpv3oip_mask)
+ break;
+
+ if (l2tpv3oip_mask->session_id != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid L2TPv3 mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.ip4_l2tpv3oip_flow.session_id =
+ l2tpv3oip_spec->session_id;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV4;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.ip6_l2tpv3oip_flow.session_id =
+ l2tpv3oip_spec->session_id;
+ filter->input.flow_ext.oip_type =
+ I40E_FDIR_IPTYPE_IPV6;
+ }
+
+ filter->input.flow_ext.customized_pctype = true;
+ cus_proto = item_type;
+ break;
default:
break;
}
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- const struct rte_flow_action_mark *mark_spec;
+ const struct rte_flow_action_mark *mark_spec = NULL;
uint32_t index = 0;
/* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
case RTE_FLOW_ACTION_TYPE_PASSTHRU:
filter->action.behavior = I40E_FDIR_PASSTHRU;
break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ filter->action.behavior = I40E_FDIR_PASSTHRU;
+ mark_spec = act->conf;
+ filter->action.report_status = I40E_FDIR_REPORT_ID;
+ filter->soft_id = mark_spec->id;
+ break;
default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
+ if (mark_spec) {
+ /* Double MARK actions requested */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
mark_spec = act->conf;
filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
+ if (mark_spec) {
+ /* MARK + FLAG not supported */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
+ /* RSS filter won't be next if FDIR did not pass thru */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ break;
case RTE_FLOW_ACTION_TYPE_END:
return 0;
default:
cons_filter_type = RTE_ETH_FILTER_FDIR;
- if (dev->data->dev_conf.fdir_conf.mode !=
- RTE_FDIR_MODE_PERFECT) {
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT ||
+ pf->fdir.fdir_vsi == NULL) {
/* Enable fdir when fdir flow is added at first time. */
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
/* DST address of inner MAC shouldn't be masked.
* SRC address of Inner MAC should be masked.
*/
- if (!is_broadcast_ether_addr(ð_mask->dst) ||
- !is_zero_ether_addr(ð_mask->src) ||
+ if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
+ !rte_is_zero_ether_addr(ð_mask->src) ||
eth_mask->type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_IMAC;
}
}
/* DST address of inner MAC shouldn't be masked.
* SRC address of Inner MAC should be masked.
*/
- if (!is_broadcast_ether_addr(ð_mask->dst) ||
- !is_zero_ether_addr(ð_mask->src) ||
+ if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
+ !rte_is_zero_ether_addr(ð_mask->src) ||
eth_mask->type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (!nvgre_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_IMAC;
}
}
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
/* If the last flow is destroyed, disable fdir. */
- if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) {
+ if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
i40e_fdir_teardown(pf);
dev->data->dev_conf.fdir_conf.mode =
RTE_FDIR_MODE_NONE;
+ i40e_fdir_rx_proc_enable(dev, 0);
}
break;
case RTE_ETH_FILTER_HASH:
int ret = 0;
memset(&cld_filter, 0, sizeof(cld_filter));
- ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
+ rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
(struct rte_ether_addr *)&cld_filter.element.outer_mac);
- ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
+ rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
(struct rte_ether_addr *)&cld_filter.element.inner_mac);
cld_filter.element.inner_vlan = filter->input.inner_vlan;
cld_filter.element.flags = filter->input.flags;
return -rte_errno;
}
+ /* Disable FDIR processing as all FDIR rules are now flushed */
+ i40e_fdir_rx_proc_enable(dev, 0);
+
return ret;
}