X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_generic_flow.c;h=3e6ed7005ba86c768f00df25aa8f62f5544c4c96;hb=e495e930a9b65f70ba810a8d6b5e2fb0fd0f483b;hp=261f0b78e4a052662cf4ad9f3f5cfa6ba23c250a;hpb=2321e34c23b386c46e4a644682e40214cf59ee4f;p=dpdk.git diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 261f0b78e4..3e6ed7005b 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -212,6 +212,27 @@ enum rte_flow_item_type pattern_eth_qinq_ipv6[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }; +enum rte_flow_item_type pattern_eth_ipv6_frag_ext[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_frag_ext[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_frag_ext[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, + RTE_FLOW_ITEM_TYPE_END, +}; enum rte_flow_item_type pattern_eth_ipv6_udp[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV6, @@ -1799,7 +1820,7 @@ ice_flow_init(struct ice_adapter *ad) TAILQ_INIT(&pf->dist_parser_list); rte_spinlock_init(&pf->flow_ops_lock); - TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { if (engine->init == NULL) { PMD_INIT_LOG(ERR, "Invalid engine type (%d)", engine->type); @@ -1825,7 +1846,7 @@ ice_flow_uninit(struct ice_adapter *ad) struct ice_flow_parser_node *p_parser; void *temp; - TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { if (engine->uninit) engine->uninit(ad); } @@ -1902,9 +1923,9 @@ ice_register_parser(struct ice_flow_parser *parser, } else { if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH || parser->engine->type == ICE_FLOW_ENGINE_HASH) - TAILQ_INSERT_TAIL(list, parser_node, node); - else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) TAILQ_INSERT_HEAD(list, parser_node, node); + else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) + TAILQ_INSERT_TAIL(list, parser_node, node); else if (parser->engine->type == ICE_FLOW_ENGINE_ACL) TAILQ_INSERT_HEAD(list, parser_node, node); else @@ -1925,7 +1946,7 @@ ice_unregister_parser(struct ice_flow_parser *parser, if (list == NULL) return; - TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { if (p_parser->parser->engine->type == parser->engine->type) { TAILQ_REMOVE(list, p_parser, node); rte_free(p_parser); @@ -2087,6 +2108,7 @@ static struct ice_ptype_match ice_ptype_map[] = { {pattern_eth_ipv6_udp, ICE_PTYPE_IPV6_UDP_PAY}, {pattern_eth_ipv6_tcp, ICE_PTYPE_IPV6_TCP_PAY}, {pattern_eth_ipv6_sctp, ICE_PTYPE_IPV6_SCTP_PAY}, + {pattern_eth_ipv6_frag_ext, ICE_PTYPE_IPV6FRAG_PAY}, {pattern_eth_ipv6_gtpu, ICE_MAC_IPV6_GTPU}, {pattern_eth_ipv6_gtpu_eh, ICE_MAC_IPV6_GTPU}, {pattern_eth_ipv6_gtpu_ipv4, ICE_MAC_IPV6_GTPU_IPV4_PAY}, @@ -2112,11 +2134,15 @@ static struct ice_ptype_match ice_ptype_map[] = { {pattern_eth_arp, ICE_PTYPE_MAC_PAY}, {pattern_eth_vlan_ipv4, ICE_PTYPE_IPV4_PAY}, {pattern_eth_qinq_ipv4, ICE_PTYPE_IPV4_PAY}, + {pattern_eth_qinq_ipv4_udp, ICE_PTYPE_IPV4_UDP_PAY}, + {pattern_eth_qinq_ipv4_tcp, ICE_PTYPE_IPV4_TCP_PAY}, {pattern_eth_vlan_ipv4_udp, ICE_PTYPE_IPV4_UDP_PAY}, {pattern_eth_vlan_ipv4_tcp, ICE_PTYPE_IPV4_TCP_PAY}, {pattern_eth_vlan_ipv4_sctp, ICE_PTYPE_IPV4_SCTP_PAY}, {pattern_eth_vlan_ipv6, ICE_PTYPE_IPV6_PAY}, {pattern_eth_qinq_ipv6, ICE_PTYPE_IPV6_PAY}, + {pattern_eth_qinq_ipv6_udp, ICE_PTYPE_IPV6_UDP_PAY}, + {pattern_eth_qinq_ipv6_tcp, ICE_PTYPE_IPV6_TCP_PAY}, {pattern_eth_vlan_ipv6_udp, ICE_PTYPE_IPV6_UDP_PAY}, {pattern_eth_vlan_ipv6_tcp, ICE_PTYPE_IPV6_TCP_PAY}, {pattern_eth_vlan_ipv6_sctp, ICE_PTYPE_IPV6_SCTP_PAY}, @@ -2250,7 +2276,7 @@ ice_parse_engine_create(struct ice_adapter *ad, void *meta = NULL; void *temp; - TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { int ret; if (parser_node->parser->parse_pattern_action(ad, @@ -2283,7 +2309,7 @@ ice_parse_engine_validate(struct ice_adapter *ad, struct ice_flow_parser_node *parser_node; void *temp; - TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { if (parser_node->parser->parse_pattern_action(ad, parser_node->parser->array, parser_node->parser->array_len, @@ -2455,7 +2481,7 @@ ice_flow_flush(struct rte_eth_dev *dev, void *temp; int ret = 0; - TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { ret = ice_flow_destroy(dev, p_flow, error); if (ret) { PMD_DRV_LOG(ERR, "Failed to flush flows"); @@ -2496,15 +2522,16 @@ ice_flow_query(struct rte_eth_dev *dev, ret = flow->engine->query_count(ad, flow, count, error); break; default: - return rte_flow_error_set(error, ENOTSUP, + ret = rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); + goto out; } } +out: rte_spinlock_unlock(&pf->flow_ops_lock); - return ret; } @@ -2519,7 +2546,7 @@ ice_flow_redirect(struct ice_adapter *ad, rte_spinlock_lock(&pf->flow_ops_lock); - TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { + RTE_TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { if (!p_flow->engine->redirect) continue; ret = p_flow->engine->redirect(ad, p_flow, rd);