#include "base/ixgbe_phy.h"
#include "rte_pmd_ixgbe.h"
-static int ixgbe_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error);
-static int
-cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
- struct rte_flow_error *error);
-static int
-cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action *actions,
- struct rte_eth_ethertype_filter *filter,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ethertype_filter *filter,
- struct rte_flow_error *error);
-static int
-cons_parse_syn_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
- struct rte_flow_error *error);
-static int
-cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *filter,
- struct rte_flow_error *error);
-static int
-ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *rule,
- struct rte_flow_error *error);
-static int
-ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct ixgbe_fdir_rule *rule,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct ixgbe_fdir_rule *rule,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct ixgbe_fdir_rule *rule,
- struct rte_flow_error *error);
-static int
-ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct ixgbe_fdir_rule *rule,
- struct rte_flow_error *error);
-static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-static int ixgbe_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error);
-
-const struct rte_flow_ops ixgbe_flow_ops = {
- ixgbe_flow_validate,
- ixgbe_flow_create,
- ixgbe_flow_destroy,
- ixgbe_flow_flush,
- NULL,
-};
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
do { \
item = pattern + index;\
* IPV4 src_addr 192.168.1.20 0xFFFFFFFF
* dst_addr 192.167.3.50 0xFFFFFFFF
* next_proto_id 17 0xFF
- * UDP/TCP src_port 80 0xFFFF
- * dst_port 80 0xFFFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
uint32_t index;
if (!pattern) {
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
- item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
- } else {
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
udp_mask = (const struct rte_flow_item_udp *)item->mask;
/**
udp_spec = (const struct rte_flow_item_udp *)item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
}
/* check if the next not void item is END */
/* a specific function for ixgbe because the flags is specific */
static int
-ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_ntuple_filter *filter,
struct rte_flow_error *error)
{
int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
}
static int
-ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_ethertype_filter *filter,
struct rte_flow_error *error)
{
int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
ret = cons_parse_ethertype_filter(attr, pattern,
actions, filter, error);
}
static int
-ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
+ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_syn_filter *filter,
struct rte_flow_error *error)
{
int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
ret = cons_parse_syn_filter(attr, pattern,
actions, filter, error);
}
static int
-ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
+ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
* UDP/TCP/SCTP PATTERN:
* The first not void item can be ETH or IPV4.
* The second not void item must be IPV4 if the first one is ETH.
- * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* dst_addr 192.167.3.50 0xFFFFFFFF
* UDP/TCP/SCTP src_port 80 0xFFFF
* dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
* END
* MAC VLAN pattern example:
* ITEM Spec Mask
{0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
* MAC VLAN tci 0x2016 0xEFFF
- * tpid 0x8100 0xFFFF
* END
* Other members in mask and spec should set to 0x00.
* Item->last should be NULL.
const struct rte_flow_item_sctp *sctp_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
uint32_t index, j;
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
+ rule->mask.flex_bytes_mask = 0;
/* parse pattern */
index = 0;
vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
- if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
-
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
- if (vlan_mask->tpid != (uint16_t)~0U) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
rule->mask.vlan_tci_mask = vlan_mask->tci;
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
- /**
- * Check if the next not void item is not vlan.
- */
+ /* Next not void item must be END */
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
rule->ixgbe_fdir.formatted.dst_port =
tcp_spec->hdr.dst_port;
}
+
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the UDP info */
rule->ixgbe_fdir.formatted.dst_port =
udp_spec->hdr.dst_port;
}
+
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the SCTP info */
rule->ixgbe_fdir.formatted.dst_port =
sctp_spec->hdr.dst_port;
}
+
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->ixgbe_fdir.formatted.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
}
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
* IPV4/IPV6 NULL NULL
* UDP NULL NULL
* VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
* END
* NEGRV pattern example:
* ITEM Spec Mask
* IPV4/IPV6 NULL NULL
* NVGRE protocol 0x6558 0xFFFF
* tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
item, "Not supported by fdir filter");
return -rte_errno;
}
- /*Not supported last point for range*/
+ /* Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
- (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
- if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
-
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
- if (vlan_mask->tpid != (uint16_t)~0U) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
rule->mask.vlan_tci_mask = vlan_mask->tci;
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
- /**
- * Check if the next not void item is not vlan.
- */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
/* check if the next not void item is END */
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
}
static int
-ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
+ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct ixgbe_fdir_rule *rule,
struct rte_flow_error *error)
{
- int ret = 0;
-
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
- ixgbe_parse_fdir_filter(attr, pattern, actions,
- rule, error);
-
-
- if (fdir_mode == RTE_FDIR_MODE_NONE ||
- fdir_mode != rule->mode)
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
return -ENOTSUP;
- return ret;
-}
-
-static int
-ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct ixgbe_fdir_rule *rule,
- struct rte_flow_error *error)
-{
- int ret;
-
ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
actions, rule, error);
if (!ret)
- return 0;
+ goto step_next;
ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
actions, rule, error);
+step_next:
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
return ret;
}
ixgbe_flow_mem_ptr, entries);
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
- ret = ixgbe_parse_ntuple_filter(attr, pattern,
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
if (!ret) {
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
}
memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
- ret = ixgbe_parse_ethertype_filter(attr, pattern,
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret) {
ret = ixgbe_add_del_ethertype_filter(dev,
}
memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
- ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
if (!ret) {
ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
if (!ret) {
}
memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
- ret = ixgbe_parse_fdir_filter(attr, pattern,
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
actions, &fdir_rule, error);
if (!ret) {
/* A mask cannot be deleted. */
rte_memcpy(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ ixgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
ret = ixgbe_fdir_set_input_mask(dev);
if (ret)
goto out;
sizeof(struct ixgbe_hw_fdir_mask));
if (ret)
goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
}
}
}
memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
- ret = cons_parse_l2_tn_filter(attr, pattern,
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
if (!ret) {
ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
out:
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
rte_free(ixgbe_flow_mem_ptr);
rte_free(flow);
return NULL;
* the HW. Because there can be no enough room for the rule.
*/
static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ixgbe_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
int ret;
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
- ret = ixgbe_parse_ntuple_filter(attr, pattern,
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
if (!ret)
return 0;
memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
- ret = ixgbe_parse_ethertype_filter(attr, pattern,
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret)
return 0;
memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
- ret = ixgbe_parse_syn_filter(attr, pattern,
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
actions, &syn_filter, error);
if (!ret)
return 0;
memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
- ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
actions, &fdir_rule, error);
if (!ret)
return 0;
memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
- ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
return ret;
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
TAILQ_REMOVE(&filter_fdir_list,
fdir_rule_ptr, entries);
rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
}
break;
case RTE_ETH_FILTER_L2_TUNNEL:
return 0;
}
+
+const struct rte_flow_ops ixgbe_flow_ops = {
+ ixgbe_flow_validate,
+ ixgbe_flow_create,
+ ixgbe_flow_destroy,
+ ixgbe_flow_flush,
+ NULL,
+};