X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Ftf_ulp%2Fbnxt_ulp_flow.c;h=413e4c3b26770db01774d7a730c5dda7f3c30017;hb=92ef4b8f1688ded571fb2085727e5e82f2afe5d6;hp=9326401b4d98d404c7c756b3ca7a8d4d0a79cbb4;hpb=aa0fa4d9f4ce3144c9ca31044deafc16f54c5604;p=dpdk.git diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 9326401b4d..413e4c3b26 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2020 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -9,7 +9,14 @@ #include "ulp_matcher.h" #include "ulp_flow_db.h" #include "ulp_mapper.h" +#include "ulp_fc_mgr.h" +#include "ulp_port_db.h" +#include "ulp_ha_mgr.h" +#include "ulp_tun.h" #include +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG +#include "ulp_template_debug_proto.h" +#endif static int32_t bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr, @@ -59,90 +66,205 @@ bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr, return BNXT_TF_RC_SUCCESS; } +static inline void +bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params, + const struct rte_flow_attr *attr) +{ + /* Set the flow attributes */ + if (attr->egress) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; + if (attr->ingress) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; +#if RTE_VERSION_NUM(17, 11, 10, 16) < RTE_VERSION + if (attr->transfer) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_TRANSFER; +#endif +} + +void +bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms, + struct ulp_rte_parser_params *params, + enum bnxt_ulp_fdb_type flow_type) +{ + uint32_t ulp_flags = 0; + + memset(mapper_cparms, 0, sizeof(*mapper_cparms)); + mapper_cparms->flow_type = flow_type; + mapper_cparms->app_priority = params->priority; + mapper_cparms->dir_attr = params->dir_attr; + mapper_cparms->class_tid = params->class_id; + mapper_cparms->act_tid = params->act_tmpl; + mapper_cparms->func_id = params->func_id; + mapper_cparms->hdr_bitmap = ¶ms->hdr_bitmap; + mapper_cparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap; + mapper_cparms->hdr_field = params->hdr_field; + mapper_cparms->enc_field = params->enc_field; + mapper_cparms->comp_fld = params->comp_fld; + mapper_cparms->act = ¶ms->act_bitmap; + mapper_cparms->act_prop = ¶ms->act_prop; + mapper_cparms->flow_id = params->fid; + mapper_cparms->parent_flow = params->parent_flow; + mapper_cparms->child_flow = params->child_flow; + mapper_cparms->fld_bitmap = ¶ms->fld_bitmap; + mapper_cparms->flow_pattern_id = params->flow_pattern_id; + mapper_cparms->act_pattern_id = params->act_pattern_id; + mapper_cparms->app_id = params->app_id; + mapper_cparms->port_id = params->port_id; + mapper_cparms->tun_idx = params->tun_idx; + + /* update the signature fields into the computed field list */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID, + params->hdr_sig_id); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID, + params->flow_sig_id); + + /* update the WC Priority flag */ + if (!bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags) && + ULP_HIGH_AVAIL_IS_ENABLED(ulp_flags)) { + enum ulp_ha_mgr_region region = ULP_HA_REGION_LOW; + int32_t rc; + + rc = ulp_ha_mgr_region_get(params->ulp_ctx, ®ion); + if (rc) + BNXT_TF_DBG(ERR, "Unable to get WC region\n"); + if (region == ULP_HA_REGION_HI) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG, + 1); + } +} + /* Function to create the rte flow. */ static struct rte_flow * -bnxt_ulp_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +bnxt_ulp_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 }; struct ulp_rte_parser_params params; - struct bnxt_ulp_context *ulp_ctx = NULL; - uint32_t class_id, act_tmpl; + struct bnxt_ulp_context *ulp_ctx; + int rc, ret = BNXT_TF_RC_ERROR; struct rte_flow *flow_id; + uint16_t func_id; uint32_t fid; - uint8_t *buffer; - uint32_t vnic; - int ret; if (bnxt_ulp_flow_validate_args(attr, pattern, actions, error) == BNXT_TF_RC_ERROR) { BNXT_TF_DBG(ERR, "Invalid arguments being passed\n"); - return NULL; + goto flow_error; } ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); if (!ulp_ctx) { BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); - return NULL; + goto flow_error; } /* Initialize the parser params */ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; - if (attr->egress) - params.dir = ULP_DIR_EGRESS; + if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) { + BNXT_TF_DBG(ERR, "failed to get the app id\n"); + goto flow_error; + } + + /* Set the flow attributes */ + bnxt_ulp_set_dir_attributes(¶ms, attr); /* copy the device port id and direction for further processing */ - buffer = params.hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; - rte_memcpy(buffer, &dev->data->port_id, sizeof(uint16_t)); + ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_INCOMING_IF, + dev->data->port_id); + ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DEV_PORT_ID, + dev->data->port_id); + ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_SVIF_FLAG, + BNXT_ULP_INVALID_SVIF_VAL); + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, + dev->data->port_id, + &func_id)) { + BNXT_TF_DBG(ERR, "conversion of port to func id failed\n"); + goto flow_error; + } - /* Set the implicit vnic in the action property */ - vnic = (uint32_t)bnxt_get_vnic_id(dev->data->port_id); - vnic = htonl(vnic); - rte_memcpy(¶ms.act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], - &vnic, BNXT_ULP_ACT_PROP_SZ_VNIC); + /* Protect flow creation */ + if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) { + BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n"); + goto flow_error; + } + + /* Allocate a Flow ID for attaching all resources for the flow to. + * Once allocated, all errors have to walk the list of resources and + * free each of them. + */ + rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + func_id, &fid); + if (rc) { + BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n"); + goto release_lock; + } /* Parse the rte flow pattern */ ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) - goto parse_error; + goto free_fid; /* Parse the rte flow action */ ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) - goto parse_error; - - ret = ulp_matcher_pattern_match(¶ms, &class_id); - + goto free_fid; + + params.fid = fid; + params.func_id = func_id; + params.priority = attr->priority; + params.port_id = dev->data->port_id; + + /* Perform the rte flow post process */ + bnxt_ulp_rte_parser_post_process(¶ms); + + /* do the tunnel offload process if any */ + ret = ulp_tunnel_offload_process(¶ms); + if (ret == BNXT_TF_RC_ERROR) + goto free_fid; + +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER + /* Dump the rte flow pattern */ + ulp_parser_hdr_info_dump(¶ms); + /* Dump the rte flow action */ + ulp_parser_act_info_dump(¶ms); +#endif +#endif + + ret = ulp_matcher_pattern_match(¶ms, ¶ms.class_id); if (ret != BNXT_TF_RC_SUCCESS) - goto parse_error; + goto free_fid; - ret = ulp_matcher_action_match(¶ms, &act_tmpl); + ret = ulp_matcher_action_match(¶ms, ¶ms.act_tmpl); if (ret != BNXT_TF_RC_SUCCESS) - goto parse_error; + goto free_fid; - mapper_cparms.app_priority = attr->priority; - mapper_cparms.hdr_bitmap = ¶ms.hdr_bitmap; - mapper_cparms.hdr_field = params.hdr_field; - mapper_cparms.act = ¶ms.act_bitmap; - mapper_cparms.act_prop = ¶ms.act_prop; - mapper_cparms.class_tid = class_id; - mapper_cparms.act_tid = act_tmpl; + bnxt_ulp_init_mapper_params(&mapper_cparms, ¶ms, + BNXT_ULP_FDB_TYPE_REGULAR); + /* Call the ulp mapper to create the flow in the hardware. */ + ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms); + if (ret) + goto free_fid; - /* call the ulp mapper to create the flow in the hardware */ - ret = ulp_mapper_flow_create(ulp_ctx, - &mapper_cparms, - &fid); - if (!ret) { - flow_id = (struct rte_flow *)((uintptr_t)fid); - return flow_id; - } + bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); -parse_error: + flow_id = (struct rte_flow *)((uintptr_t)fid); + return flow_id; + +free_fid: + ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid); +release_lock: + bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); +flow_error: rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow."); return NULL; @@ -150,28 +272,41 @@ parse_error: /* Function to validate the rte flow. */ static int -bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, +bnxt_ulp_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct ulp_rte_parser_params params; + struct ulp_rte_parser_params params; + struct bnxt_ulp_context *ulp_ctx; uint32_t class_id, act_tmpl; - int ret; + int ret = BNXT_TF_RC_ERROR; if (bnxt_ulp_flow_validate_args(attr, pattern, actions, error) == BNXT_TF_RC_ERROR) { BNXT_TF_DBG(ERR, "Invalid arguments being passed\n"); - return -EINVAL; + goto parse_error; + } + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + goto parse_error; } /* Initialize the parser params */ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; - if (attr->egress) - params.dir = ULP_DIR_EGRESS; + if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) { + BNXT_TF_DBG(ERR, "failed to get the app id\n"); + goto parse_error; + } + + /* Set the flow attributes */ + bnxt_ulp_set_dir_attributes(¶ms, attr); /* Parse the rte flow pattern */ ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); @@ -183,6 +318,14 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; + /* Perform the rte flow post process */ + bnxt_ulp_rte_parser_post_process(¶ms); + + /* do the tunnel offload process if any */ + ret = ulp_tunnel_offload_process(¶ms); + if (ret == BNXT_TF_RC_ERROR) + goto parse_error; + ret = ulp_matcher_pattern_match(¶ms, &class_id); if (ret != BNXT_TF_RC_SUCCESS) @@ -202,31 +345,63 @@ parse_error: } /* Function to destroy the rte flow. */ -static int +int bnxt_ulp_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - int ret = 0; struct bnxt_ulp_context *ulp_ctx; - uint32_t fid; + uint32_t flow_id; + uint16_t func_id; + int ret; ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); if (!ulp_ctx) { BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to destroy flow."); + if (error) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); return -EINVAL; } - fid = (uint32_t)(uintptr_t)flow; + flow_id = (uint32_t)(uintptr_t)flow; - ret = ulp_mapper_flow_destroy(ulp_ctx, fid); - if (ret) - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to destroy flow."); + if (ulp_port_db_port_func_id_get(ulp_ctx, + dev->data->port_id, + &func_id)) { + BNXT_TF_DBG(ERR, "conversion of port to func id failed\n"); + if (error) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + return -EINVAL; + } + + if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) == + false) { + BNXT_TF_DBG(ERR, "Incorrect device params\n"); + if (error) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) { + BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n"); + return -EINVAL; + } + ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + flow_id); + if (ret) { + BNXT_TF_DBG(ERR, "Failed to destroy flow.\n"); + if (error) + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + } + bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); return ret; } @@ -237,24 +412,26 @@ bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) { struct bnxt_ulp_context *ulp_ctx; - int32_t ret; - struct bnxt *bp; + int32_t ret = 0; + uint16_t func_id; ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); if (!ulp_ctx) { - BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to flush flow."); - return -EINVAL; + return ret; } - bp = eth_dev->data->dev_private; /* Free the resources for the last device */ - if (!ulp_ctx_deinit_allowed(bp)) - return 0; - - ret = ulp_flow_db_flush_flows(ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); + if (ulp_ctx_deinit_allowed(ulp_ctx)) { + ret = ulp_flow_db_session_flow_flush(ulp_ctx); + } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) { + ret = ulp_port_db_port_func_id_get(ulp_ctx, + eth_dev->data->port_id, + &func_id); + if (!ret) + ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id); + else + BNXT_TF_DBG(ERR, "convert port to func id failed\n"); + } if (ret) rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -262,11 +439,243 @@ bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, return ret; } +/* Function to query the rte flows. */ +static int32_t +bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev, + struct rte_flow *flow, + const struct rte_flow_action *action, + void *data, + struct rte_flow_error *error) +{ + int rc = 0; + struct bnxt_ulp_context *ulp_ctx; + struct rte_flow_query_count *count; + uint32_t flow_id; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to query flow."); + return -EINVAL; + } + + flow_id = (uint32_t)(uintptr_t)flow; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + count = data; + rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count); + if (rc) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to query flow."); + } + break; + default: + rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "Unsupported action item"); + } + + return rc; +} + +/* Tunnel offload Apis */ +#define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS 1 + +static int +bnxt_ulp_tunnel_decap_set(struct rte_eth_dev *eth_dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_action **pmd_actions, + uint32_t *num_of_actions, + struct rte_flow_error *error) +{ + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_flow_app_tun_ent *tun_entry; + int32_t rc = 0; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (ulp_ctx == NULL) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "ULP context uninitialized"); + return -EINVAL; + } + + if (tunnel == NULL) { + BNXT_TF_DBG(ERR, "No tunnel specified\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "no tunnel specified"); + return -EINVAL; + } + + if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + BNXT_TF_DBG(ERR, "Tunnel type unsupported\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "tunnel type unsupported"); + return -EINVAL; + } + + rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry); + if (rc < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "tunnel decap set failed"); + return -EINVAL; + } + + rc = ulp_app_tun_entry_set_decap_action(tun_entry); + if (rc < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "tunnel decap set failed"); + return -EINVAL; + } + + *pmd_actions = &tun_entry->action; + *num_of_actions = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS; + return 0; +} + +static int +bnxt_ulp_tunnel_match(struct rte_eth_dev *eth_dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_item **pmd_items, + uint32_t *num_of_items, + struct rte_flow_error *error) +{ + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_flow_app_tun_ent *tun_entry; + int32_t rc = 0; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (ulp_ctx == NULL) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "ULP context uninitialized"); + return -EINVAL; + } + + if (tunnel == NULL) { + BNXT_TF_DBG(ERR, "No tunnel specified\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "no tunnel specified"); + return -EINVAL; + } + + if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + BNXT_TF_DBG(ERR, "Tunnel type unsupported\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "tunnel type unsupported"); + return -EINVAL; + } + + rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry); + if (rc < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "tunnel match set failed"); + return -EINVAL; + } + + rc = ulp_app_tun_entry_set_decap_item(tun_entry); + if (rc < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "tunnel match set failed"); + return -EINVAL; + } + + *pmd_items = &tun_entry->item; + *num_of_items = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS; + return 0; +} + +static int +bnxt_ulp_tunnel_decap_release(struct rte_eth_dev *eth_dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *error) +{ + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_flow_app_tun_ent *tun_entry; + const struct rte_flow_action *action_item = pmd_actions; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (ulp_ctx == NULL) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "ULP context uninitialized"); + return -EINVAL; + } + if (num_actions != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) { + BNXT_TF_DBG(ERR, "num actions is invalid\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "num actions is invalid"); + return -EINVAL; + } + while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { + if (action_item->type == (typeof(tun_entry->action.type)) + BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) { + tun_entry = ulp_app_tun_match_entry(ulp_ctx, + action_item->conf); + ulp_app_tun_entry_delete(tun_entry); + } + action_item++; + } + return 0; +} + +static int +bnxt_ulp_tunnel_item_release(struct rte_eth_dev *eth_dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, + struct rte_flow_error *error) +{ + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_flow_app_tun_ent *tun_entry; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (ulp_ctx == NULL) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "ULP context uninitialized"); + return -EINVAL; + } + if (num_items != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) { + BNXT_TF_DBG(ERR, "num items is invalid\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "num items is invalid"); + return -EINVAL; + } + + tun_entry = ulp_app_tun_match_entry(ulp_ctx, pmd_items->spec); + ulp_app_tun_entry_delete(tun_entry); + return 0; +} + const struct rte_flow_ops bnxt_ulp_rte_flow_ops = { .validate = bnxt_ulp_flow_validate, .create = bnxt_ulp_flow_create, .destroy = bnxt_ulp_flow_destroy, .flush = bnxt_ulp_flow_flush, - .query = NULL, - .isolate = NULL + .query = bnxt_ulp_flow_query, + .isolate = NULL, + /* Tunnel offload callbacks */ + .tunnel_decap_set = bnxt_ulp_tunnel_decap_set, + .tunnel_match = bnxt_ulp_tunnel_match, + .tunnel_action_decap_release = bnxt_ulp_tunnel_decap_release, + .tunnel_item_release = bnxt_ulp_tunnel_item_release, + .get_restore_info = NULL };