/* Function to create the rte flow. */
static struct rte_flow *
-bnxt_ulp_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+bnxt_ulp_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- struct ulp_rte_hdr_bitmap hdr_bitmap;
- struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX];
- struct ulp_rte_act_bitmap act_bitmap;
- struct ulp_rte_act_prop act_prop;
- enum ulp_direction_type dir = ULP_DIR_INGRESS;
- uint32_t class_id, act_tmpl;
- uint32_t app_priority;
- int ret;
+ struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 };
+ struct ulp_rte_parser_params params;
struct bnxt_ulp_context *ulp_ctx = NULL;
- uint32_t vnic;
- uint8_t svif;
+ uint32_t class_id, act_tmpl;
struct rte_flow *flow_id;
uint32_t fid;
+ int ret;
if (bnxt_ulp_flow_validate_args(attr,
pattern, actions,
return NULL;
}
- /* clear the header bitmap and field structure */
- memset(&hdr_bitmap, 0, sizeof(struct ulp_rte_hdr_bitmap));
- memset(hdr_field, 0, sizeof(hdr_field));
- memset(&act_bitmap, 0, sizeof(act_bitmap));
- memset(&act_prop, 0, sizeof(act_prop));
-
- svif = bnxt_get_svif(dev->data->port_id, false);
- BNXT_TF_DBG(ERR, "SVIF for port[%d][port]=0x%08x\n",
- dev->data->port_id, svif);
-
- hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
- hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec[0] = svif;
- hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask[0] = -1;
- ULP_BITMAP_SET(hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF);
-
- /*
- * VNIC is being pushed as 32bit and the pop will take care of
- * proper size
- */
- vnic = (uint32_t)bnxt_get_vnic_id(dev->data->port_id);
- vnic = htonl(vnic);
- rte_memcpy(&act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
- &vnic, BNXT_ULP_ACT_PROP_SZ_VNIC);
+ /* Initialize the parser params */
+ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
+
+ if (attr->egress)
+ params.dir = ULP_DIR_EGRESS;
+
+ /* copy the device port id and direction for further processing */
+ ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_INCOMING_IF,
+ dev->data->port_id);
+ ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_DIRECTION, params.dir);
/* Parse the rte flow pattern */
- ret = bnxt_ulp_rte_parser_hdr_parse(pattern,
- &hdr_bitmap,
- hdr_field);
+ ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
if (ret != BNXT_TF_RC_SUCCESS)
goto parse_error;
/* Parse the rte flow action */
- ret = bnxt_ulp_rte_parser_act_parse(actions,
- &act_bitmap,
- &act_prop);
+ ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
if (ret != BNXT_TF_RC_SUCCESS)
goto parse_error;
- if (attr->egress)
- dir = ULP_DIR_EGRESS;
-
- ret = ulp_matcher_pattern_match(dir, &hdr_bitmap, hdr_field,
- &act_bitmap, &class_id);
-
+ ret = ulp_matcher_pattern_match(¶ms, &class_id);
if (ret != BNXT_TF_RC_SUCCESS)
goto parse_error;
- ret = ulp_matcher_action_match(dir, &act_bitmap, &act_tmpl);
+ ret = ulp_matcher_action_match(¶ms, &act_tmpl);
if (ret != BNXT_TF_RC_SUCCESS)
goto parse_error;
- app_priority = attr->priority;
- /* call the ulp mapper to create the flow in the hardware */
- ret = ulp_mapper_flow_create(ulp_ctx,
- app_priority,
- &hdr_bitmap,
- hdr_field,
- &act_bitmap,
- &act_prop,
- class_id,
- act_tmpl,
- &fid);
+ mapper_cparms.app_priority = attr->priority;
+ mapper_cparms.hdr_bitmap = ¶ms.hdr_bitmap;
+ mapper_cparms.hdr_field = params.hdr_field;
+ mapper_cparms.act = ¶ms.act_bitmap;
+ mapper_cparms.act_prop = ¶ms.act_prop;
+ mapper_cparms.class_tid = class_id;
+ mapper_cparms.act_tid = act_tmpl;
+ mapper_cparms.func_id = bnxt_get_fw_func_id(dev->data->port_id);
+ mapper_cparms.dir = params.dir;
+
+ /* Call the ulp mapper to create the flow in the hardware. */
+ ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms, &fid);
if (!ret) {
flow_id = (struct rte_flow *)((uintptr_t)fid);
return flow_id;
return NULL;
}
+/* Function to validate the rte flow. */
+static int
+bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct ulp_rte_parser_params params;
+ uint32_t class_id, act_tmpl;
+ int ret;
+
+ if (bnxt_ulp_flow_validate_args(attr,
+ pattern, actions,
+ error) == BNXT_TF_RC_ERROR) {
+ BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
+ return -EINVAL;
+ }
+
+ /* Initialize the parser params */
+ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
+
+ if (attr->egress)
+ params.dir = ULP_DIR_EGRESS;
+
+ /* Parse the rte flow pattern */
+ ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ /* Parse the rte flow action */
+ ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ ret = ulp_matcher_pattern_match(¶ms, &class_id);
+
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ ret = ulp_matcher_action_match(¶ms, &act_tmpl);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ /* all good return success */
+ return ret;
+
+parse_error:
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to validate flow.");
+ return -EINVAL;
+}
+
+/* Function to destroy the rte flow. */
+static int
+bnxt_ulp_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct bnxt_ulp_context *ulp_ctx;
+ uint32_t flow_id;
+ uint16_t func_id;
+
+ ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
+ if (!ulp_ctx) {
+ BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ return -EINVAL;
+ }
+
+ flow_id = (uint32_t)(uintptr_t)flow;
+ func_id = bnxt_get_fw_func_id(dev->data->port_id);
+
+ if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) ==
+ false) {
+ BNXT_TF_DBG(ERR, "Incorrect device params\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ return -EINVAL;
+ }
+
+ ret = ulp_mapper_flow_destroy(ulp_ctx, flow_id);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+/* Function to destroy the rte flows. */
+static int32_t
+bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev,
+ struct rte_flow_error *error)
+{
+ struct bnxt_ulp_context *ulp_ctx;
+ int32_t ret = 0;
+ struct bnxt *bp;
+ uint16_t func_id;
+
+ ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
+ if (!ulp_ctx) {
+ BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush flow.");
+ return -EINVAL;
+ }
+ bp = eth_dev->data->dev_private;
+
+ /* Free the resources for the last device */
+ if (ulp_ctx_deinit_allowed(bp)) {
+ ret = ulp_flow_db_session_flow_flush(ulp_ctx);
+ } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) {
+ func_id = bnxt_get_fw_func_id(eth_dev->data->port_id);
+ ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id);
+ }
+ if (ret)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush flow.");
+ return ret;
+}
+
const struct rte_flow_ops bnxt_ulp_rte_flow_ops = {
- .validate = NULL,
+ .validate = bnxt_ulp_flow_validate,
.create = bnxt_ulp_flow_create,
- .destroy = NULL,
- .flush = NULL,
+ .destroy = bnxt_ulp_flow_destroy,
+ .flush = bnxt_ulp_flow_flush,
.query = NULL,
.isolate = NULL
};