X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=82fc7051aaffd12b97c8ac84580b85227f8847ed;hb=71864623ba38e3db83501c24315c2e9b3f366b84;hp=bbe06e898778724013d2ae60ec40c569d2cb0272;hpb=d9347d254008f54c372fe067ee4e7cc1a3ac9047;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index bbe06e8987..82fc7051aa 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -79,6 +79,51 @@ #define IXGBE_MAX_N_TUPLE_PRIO 7 #define IXGBE_MAX_FLX_SOURCE_OFF 62 +/* ntuple filter list structure */ +struct ixgbe_ntuple_filter_ele { + TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct ixgbe_ethertype_filter_ele { + TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct ixgbe_eth_syn_filter_ele { + TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct ixgbe_fdir_rule_ele { + TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; + struct ixgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct ixgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; + struct rte_eth_l2_tunnel_conf filter_info; +}; +/* ixgbe_flow memory list structure */ +struct ixgbe_flow_mem { + TAILQ_ENTRY(ixgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); +TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); +TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); +TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); +TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); + +static struct ixgbe_ntuple_filter_list filter_ntuple_list; +static struct ixgbe_ethertype_filter_list filter_ethertype_list; +static struct ixgbe_syn_filter_list filter_syn_list; +static struct ixgbe_fdir_rule_filter_list filter_fdir_list; +static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_flow_mem_list ixgbe_flow_list; + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -142,6 +187,9 @@ const struct rte_flow_action *next_no_void_action( * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -181,6 +229,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } +#ifdef RTE_LIBRTE_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -266,7 +351,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && - item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -275,7 +361,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* get the TCP/UDP info */ - if (!item->spec || !item->mask) { + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -355,7 +442,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, udp_spec = (const struct rte_flow_item_udp *)item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; - } else { + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { sctp_mask = (const struct rte_flow_item_sctp *)item->mask; /** @@ -378,6 +465,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, sctp_spec = (const struct rte_flow_item_sctp *)item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; } /* check if the next not void item is END */ @@ -390,6 +479,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } +action: + /** * n-tuple only supports forwarding, * check if the first not void action is QUEUE. @@ -468,6 +559,12 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* Ixgbe doesn't support tcp flags. */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -487,9 +584,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) + if (filter->queue >= dev->data->nb_rx_queues) return -rte_errno; /* fixed value for ixgbe */ @@ -706,7 +801,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + if (filter->queue >= dev->data->nb_rx_queues) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -983,6 +1078,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + if (ret) return ret; @@ -1176,6 +1274,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + if (l2_tn_filter->pool >= dev->data->nb_rx_queues) + return -rte_errno; + return ret; } @@ -1383,7 +1484,8 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) * Item->last should be NULL. */ static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, @@ -1406,9 +1508,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_vlan *vlan_mask; const struct rte_flow_item_raw *raw_mask; const struct rte_flow_item_raw *raw_spec; - uint8_t j; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!pattern) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, @@ -1906,7 +2009,21 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - if (item->mask) { + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } rule->b_mask = TRUE; sctp_mask = (const struct rte_flow_item_sctp *)item->mask; @@ -1920,21 +2037,36 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } rule->mask.src_port_mask = sctp_mask->hdr.src_port; rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; - } - if (item->spec) { - rule->b_spec = TRUE; - sctp_spec = + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = (const struct rte_flow_item_sctp *)item->spec; - rule->ixgbe_fdir.formatted.src_port = - sctp_spec->hdr.src_port; - rule->ixgbe_fdir.formatted.dst_port = - sctp_spec->hdr.dst_port; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } } item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2528,7 +2660,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, actions, rule, error); if (!ret) @@ -2544,16 +2676,31 @@ step_next: if (hw->mac.type == ixgbe_mac_82599EB && rule->fdirflags == IXGBE_FDIRCMD_DROP && - (rule->mask.src_port_mask != 0 || - rule->mask.dst_port_mask != 0)) + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) return -ENOTSUP; if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + return ret; } +void +ixgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&ixgbe_flow_list); +} + void ixgbe_filterlist_flush(void) { @@ -2636,6 +2783,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -2656,12 +2804,23 @@ ixgbe_flow_create(struct rte_eth_dev *dev, memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + if (!ret) { ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); if (!ret) { ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", sizeof(struct ixgbe_ntuple_filter_ele), 0); - (void)rte_memcpy(&ntuple_filter_ptr->filter_info, + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); TAILQ_INSERT_TAIL(&filter_ntuple_list, @@ -2683,7 +2842,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "ixgbe_ethertype_filter", sizeof(struct ixgbe_ethertype_filter_ele), 0); - (void)rte_memcpy(ðertype_filter_ptr->filter_info, + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); TAILQ_INSERT_TAIL(&filter_ethertype_list, @@ -2703,7 +2866,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", sizeof(struct ixgbe_eth_syn_filter_ele), 0); - (void)rte_memcpy(&syn_filter_ptr->filter_info, + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); TAILQ_INSERT_TAIL(&filter_syn_list, @@ -2739,6 +2906,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, goto out; fdir_info->mask_added = TRUE; + first_mask = TRUE; } else { /** * Only support one global mask, @@ -2762,7 +2930,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", sizeof(struct ixgbe_fdir_rule_ele), 0); - (void)rte_memcpy(&fdir_rule_ptr->filter_info, + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, sizeof(struct ixgbe_fdir_rule)); TAILQ_INSERT_TAIL(&filter_fdir_list, @@ -2773,8 +2945,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev, return flow; } - if (ret) + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; goto out; + } } goto out; @@ -2788,7 +2967,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); - (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, sizeof(struct rte_eth_l2_tunnel_conf)); TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, @@ -2887,7 +3070,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_NTUPLE: ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&ntuple_filter, + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); @@ -2900,7 +3083,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(ðertype_filter, + rte_memcpy(ðertype_filter, ðertype_filter_ptr->filter_info, sizeof(struct rte_eth_ethertype_filter)); ret = ixgbe_add_del_ethertype_filter(dev, @@ -2914,7 +3097,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_SYN: syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&syn_filter, + rte_memcpy(&syn_filter, &syn_filter_ptr->filter_info, sizeof(struct rte_eth_syn_filter)); ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); @@ -2926,7 +3109,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; - (void)rte_memcpy(&fdir_rule, + rte_memcpy(&fdir_rule, &fdir_rule_ptr->filter_info, sizeof(struct ixgbe_fdir_rule)); ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); @@ -2941,7 +3124,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_L2_TUNNEL: l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) pmd_flow->rule; - (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); if (!ret) {