#define IXGBE_MAX_N_TUPLE_PRIO 7
#define IXGBE_MAX_FLX_SOURCE_OFF 62
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+
+static struct ixgbe_ntuple_filter_list filter_ntuple_list;
+static struct ixgbe_ethertype_filter_list filter_ethertype_list;
+static struct ixgbe_syn_filter_list filter_syn_list;
+static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_flow_mem_list ixgbe_flow_list;
+
/**
* Endless loop will never happen with below assumption
* 1. there is at least one no-void item(END)
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
*/
static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+#ifdef RTE_LIBRTE_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
- item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
}
/* get the TCP/UDP info */
- if (!item->spec || !item->mask) {
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec || !item->mask)) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
udp_spec = (const struct rte_flow_item_udp *)item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
- } else {
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
/**
sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
}
/* check if the next not void item is END */
return -rte_errno;
}
+action:
+
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
if (ret)
return ret;
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
/* Ixgbe doesn't support tcp flags. */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
- filter->priority > IXGBE_5TUPLE_MAX_PRI ||
- filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ if (filter->queue >= dev->data->nb_rx_queues)
return -rte_errno;
/* fixed value for ixgbe */
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ if (filter->queue >= dev->data->nb_rx_queues) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
ret = cons_parse_syn_filter(attr, pattern,
actions, filter, error);
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
if (ret)
return ret;
return -rte_errno;
}
+ if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
return ret;
}
* Item->last should be NULL.
*/
static int
-ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct ixgbe_fdir_rule *rule,
const struct rte_flow_item_vlan *vlan_mask;
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
-
uint8_t j;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
if (!pattern) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_NUM,
item, "Not supported last point for range");
return -rte_errno;
}
- /**
- * Only care about src & dst ports,
- * others should be masked.
- */
- if (!item->mask) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
- rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
- if (sctp_mask->hdr.tag ||
- sctp_mask->hdr.cksum) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
- rule->mask.src_port_mask = sctp_mask->hdr.src_port;
- rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
- if (item->spec) {
- rule->b_spec = TRUE;
- sctp_spec =
+ /* only x550 family only support sctp port */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
- rule->ixgbe_fdir.formatted.src_port =
- sctp_spec->hdr.src_port;
- rule->ixgbe_fdir.formatted.dst_port =
- sctp_spec->hdr.dst_port;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ } else {
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
}
item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
hw->mac.type != ixgbe_mac_X550EM_a)
return -ENOTSUP;
- ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
actions, rule, error);
if (!ret)
ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
actions, rule, error);
+ if (ret)
+ return ret;
+
step_next:
+
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ rule->fdirflags == IXGBE_FDIRCMD_DROP &&
+ (rule->ixgbe_fdir.formatted.src_port != 0 ||
+ rule->ixgbe_fdir.formatted.dst_port != 0))
+ return -ENOTSUP;
+
if (fdir_mode == RTE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
return ret;
}
+void
+ixgbe_filterlist_init(void)
+{
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+}
+
void
ixgbe_filterlist_flush(void)
{
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
if (!flow) {
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
if (!ret) {
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
sizeof(struct ixgbe_ntuple_filter_ele), 0);
- (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
ethertype_filter_ptr = rte_zmalloc(
"ixgbe_ethertype_filter",
sizeof(struct ixgbe_ethertype_filter_ele), 0);
- (void)rte_memcpy(ðertype_filter_ptr->filter_info,
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(ðertype_filter_ptr->filter_info,
ðertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
if (!ret) {
syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
sizeof(struct ixgbe_eth_syn_filter_ele), 0);
- (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
goto out;
fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
} else {
/**
* Only support one global mask,
if (!ret) {
fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
sizeof(struct ixgbe_fdir_rule_ele), 0);
- (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
&fdir_rule,
sizeof(struct ixgbe_fdir_rule));
TAILQ_INSERT_TAIL(&filter_fdir_list,
return flow;
}
- if (ret)
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
goto out;
+ }
}
goto out;
if (!ret) {
l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
- (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ if (!l2_tn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&l2_tn_filter_ptr->filter_info,
&l2_tn_filter,
sizeof(struct rte_eth_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
case RTE_ETH_FILTER_NTUPLE:
ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&ntuple_filter,
+ rte_memcpy(&ntuple_filter,
&ntuple_filter_ptr->filter_info,
sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
case RTE_ETH_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(ðertype_filter,
+ rte_memcpy(ðertype_filter,
ðertype_filter_ptr->filter_info,
sizeof(struct rte_eth_ethertype_filter));
ret = ixgbe_add_del_ethertype_filter(dev,
case RTE_ETH_FILTER_SYN:
syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&syn_filter,
+ rte_memcpy(&syn_filter,
&syn_filter_ptr->filter_info,
sizeof(struct rte_eth_syn_filter));
ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
break;
case RTE_ETH_FILTER_FDIR:
fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
- (void)rte_memcpy(&fdir_rule,
+ rte_memcpy(&fdir_rule,
&fdir_rule_ptr->filter_info,
sizeof(struct ixgbe_fdir_rule));
ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
case RTE_ETH_FILTER_L2_TUNNEL:
l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
sizeof(struct rte_eth_l2_tunnel_conf));
ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {