#include <stdarg.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_tailq.h>
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_frag_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_vlan_ipv6_frag_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_qinq_ipv6_frag_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_ipv6_udp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
struct rte_flow *flow,
struct ice_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
} else {
*ice_pipeline_stage =
ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
- /* Not supported */
- if (attr->priority) {
+ if (attr->priority > 1) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "Not support priority.");
+ attr, "Only support priority 0 and 1.");
return -rte_errno;
}
}
{pattern_eth_ipv6_udp, ICE_PTYPE_IPV6_UDP_PAY},
{pattern_eth_ipv6_tcp, ICE_PTYPE_IPV6_TCP_PAY},
{pattern_eth_ipv6_sctp, ICE_PTYPE_IPV6_SCTP_PAY},
+ {pattern_eth_ipv6_frag_ext, ICE_PTYPE_IPV6FRAG_PAY},
{pattern_eth_ipv6_gtpu, ICE_MAC_IPV6_GTPU},
{pattern_eth_ipv6_gtpu_eh, ICE_MAC_IPV6_GTPU},
{pattern_eth_ipv6_gtpu_ipv4, ICE_MAC_IPV6_GTPU_IPV4_PAY},
{pattern_eth_ipv4_nvgre_eth_ipv4, ICE_MAC_IPV4_TUN_IPV4_PAY},
{pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_MAC_IPV4_TUN_IPV4_UDP_PAY},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_MAC_IPV4_TUN_IPV4_TCP},
+ {pattern_empty, 0},
};
static bool
for (i = 0; i < array_len; i++) {
if (ice_match_pattern(array[i].pattern_list,
items)) {
- pattern_match_item->input_set_mask =
- array[i].input_set_mask;
+ pattern_match_item->input_set_mask_o =
+ array[i].input_set_mask_o;
+ pattern_match_item->input_set_mask_i =
+ array[i].input_set_mask_i;
pattern_match_item->pattern_list =
array[i].pattern_list;
pattern_match_item->meta = array[i].meta;
ice_parse_engine_create(struct ice_adapter *ad,
struct rte_flow *flow,
struct ice_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
if (parser_node->parser->parse_pattern_action(ad,
parser_node->parser->array,
parser_node->parser->array_len,
- pattern, actions, &meta, error) < 0)
+ pattern, actions, priority, &meta, error) < 0)
continue;
engine = parser_node->parser->engine;
ice_parse_engine_validate(struct ice_adapter *ad,
struct rte_flow *flow __rte_unused,
struct ice_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
if (parser_node->parser->parse_pattern_action(ad,
parser_node->parser->array,
parser_node->parser->array_len,
- pattern, actions, NULL, error) < 0)
+ pattern, actions, priority, NULL, error) < 0)
continue;
engine = parser_node->parser->engine;
return ret;
*engine = ice_parse_engine(ad, flow, &pf->rss_parser_list,
- pattern, actions, error);
+ attr->priority, pattern, actions, error);
if (*engine != NULL)
return 0;
case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
*engine = ice_parse_engine(ad, flow, &pf->dist_parser_list,
- pattern, actions, error);
+ attr->priority, pattern, actions, error);
break;
case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
*engine = ice_parse_engine(ad, flow, &pf->perm_parser_list,
- pattern, actions, error);
+ attr->priority, pattern, actions, error);
break;
default:
return -EINVAL;