RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_any[] = {
+ RTE_FLOW_ITEM_TYPE_ANY,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* raw */
+enum rte_flow_item_type pattern_raw[] = {
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
/* L2 */
enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
TAILQ_INIT(&pf->dist_parser_list);
rte_spinlock_init(&pf->flow_ops_lock);
- TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (ice_parser_create(&ad->hw, &ad->psr) != ICE_SUCCESS)
+ PMD_INIT_LOG(WARNING, "Failed to initialize DDP parser, raw packet filter will not be supported");
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
if (engine->init == NULL) {
PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
engine->type);
struct ice_flow_parser_node *p_parser;
void *temp;
- TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
if (engine->uninit)
engine->uninit(ad);
}
TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
rte_free(p_parser);
}
+
+ if (ad->psr != NULL) {
+ ice_parser_destroy(ad->psr);
+ ad->psr = NULL;
+ }
}
static struct ice_parser_list *
{
struct ice_parser_list *list;
struct ice_flow_parser_node *parser_node;
+ struct ice_flow_parser_node *existing_node;
+ void *temp;
parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0);
if (parser_node == NULL) {
if (ad->devargs.pipe_mode_support) {
TAILQ_INSERT_TAIL(list, parser_node, node);
} else {
- if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH ||
- parser->engine->type == ICE_FLOW_ENGINE_HASH)
+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH) {
+ RTE_TAILQ_FOREACH_SAFE(existing_node, list,
+ node, temp) {
+ if (existing_node->parser->engine->type ==
+ ICE_FLOW_ENGINE_ACL) {
+ TAILQ_INSERT_AFTER(list, existing_node,
+ parser_node, node);
+ goto DONE;
+ }
+ }
+ TAILQ_INSERT_HEAD(list, parser_node, node);
+ } else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) {
+ RTE_TAILQ_FOREACH_SAFE(existing_node, list,
+ node, temp) {
+ if (existing_node->parser->engine->type ==
+ ICE_FLOW_ENGINE_SWITCH) {
+ TAILQ_INSERT_AFTER(list, existing_node,
+ parser_node, node);
+ goto DONE;
+ }
+ }
TAILQ_INSERT_HEAD(list, parser_node, node);
- else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
+ } else if (parser->engine->type == ICE_FLOW_ENGINE_HASH) {
TAILQ_INSERT_TAIL(list, parser_node, node);
- else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ } else if (parser->engine->type == ICE_FLOW_ENGINE_ACL) {
TAILQ_INSERT_HEAD(list, parser_node, node);
- else
+ } else {
return -EINVAL;
+ }
}
+DONE:
return 0;
}
if (list == NULL)
return;
- TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
if (p_parser->parser->engine->type == parser->engine->type) {
TAILQ_REMOVE(list, p_parser, node);
rte_free(p_parser);
};
static struct ice_ptype_match ice_ptype_map[] = {
+ {pattern_raw, ICE_PTYPE_IPV4_PAY},
+ {pattern_any, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4_udp, ICE_PTYPE_IPV4_UDP_PAY},
{pattern_eth_ipv4_tcp, ICE_PTYPE_IPV4_TCP_PAY},
void *meta = NULL;
void *temp;
- TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
int ret;
if (parser_node->parser->parse_pattern_action(ad,
struct ice_flow_parser_node *parser_node;
void *temp;
- TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
if (parser_node->parser->parse_pattern_action(ad,
parser_node->parser->array,
parser_node->parser->array_len,
void *temp;
int ret = 0;
- TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
ret = ice_flow_destroy(dev, p_flow, error);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to flush flows");
- return -EINVAL;
+ if (ret != -EAGAIN)
+ ret = -EINVAL;
+ return ret;
}
}
struct ice_pf *pf = &ad->pf;
struct rte_flow *p_flow;
void *temp;
- int ret;
+ int ret = 0;
rte_spinlock_lock(&pf->flow_ops_lock);
- TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
+ RTE_TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
if (!p_flow->engine->redirect)
continue;
ret = p_flow->engine->redirect(ad, p_flow, rd);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to redirect flows");
- return ret;
+ break;
}
}
rte_spinlock_unlock(&pf->flow_ops_lock);
- return 0;
+ return ret;
}