1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "bnxt_tf_common.h"
8 #include "ulp_rte_parser.h"
9 #include "ulp_matcher.h"
10 #include "ulp_flow_db.h"
11 #include "ulp_mapper.h"
12 #include "ulp_fc_mgr.h"
13 #include "ulp_port_db.h"
14 #include "ulp_ha_mgr.h"
16 #include <rte_malloc.h>
17 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
18 #include "ulp_template_debug_proto.h"
22 bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr,
23 const struct rte_flow_item pattern[],
24 const struct rte_flow_action actions[],
25 struct rte_flow_error *error)
27 /* Perform the validation of the arguments for null */
29 return BNXT_TF_RC_ERROR;
32 rte_flow_error_set(error,
34 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
37 return BNXT_TF_RC_ERROR;
41 rte_flow_error_set(error,
43 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
46 return BNXT_TF_RC_ERROR;
50 rte_flow_error_set(error,
52 RTE_FLOW_ERROR_TYPE_ATTR,
55 return BNXT_TF_RC_ERROR;
58 if (attr->egress && attr->ingress) {
59 rte_flow_error_set(error,
61 RTE_FLOW_ERROR_TYPE_ATTR,
63 "EGRESS AND INGRESS UNSUPPORTED");
64 return BNXT_TF_RC_ERROR;
66 return BNXT_TF_RC_SUCCESS;
70 bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params,
71 const struct rte_flow_attr *attr)
73 /* Set the flow attributes */
75 params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS;
77 params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS;
78 #if RTE_VERSION_NUM(17, 11, 10, 16) < RTE_VERSION
80 params->dir_attr |= BNXT_ULP_FLOW_ATTR_TRANSFER;
85 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
86 struct ulp_rte_parser_params *params,
87 enum bnxt_ulp_fdb_type flow_type)
89 uint32_t ulp_flags = 0;
91 memset(mapper_cparms, 0, sizeof(*mapper_cparms));
92 mapper_cparms->flow_type = flow_type;
93 mapper_cparms->app_priority = params->priority;
94 mapper_cparms->dir_attr = params->dir_attr;
95 mapper_cparms->class_tid = params->class_id;
96 mapper_cparms->act_tid = params->act_tmpl;
97 mapper_cparms->func_id = params->func_id;
98 mapper_cparms->hdr_bitmap = ¶ms->hdr_bitmap;
99 mapper_cparms->hdr_field = params->hdr_field;
100 mapper_cparms->comp_fld = params->comp_fld;
101 mapper_cparms->act = ¶ms->act_bitmap;
102 mapper_cparms->act_prop = ¶ms->act_prop;
103 mapper_cparms->flow_id = params->fid;
104 mapper_cparms->parent_flow = params->parent_flow;
105 mapper_cparms->child_flow = params->child_flow;
106 mapper_cparms->fld_bitmap = ¶ms->fld_bitmap;
107 mapper_cparms->flow_pattern_id = params->flow_pattern_id;
108 mapper_cparms->act_pattern_id = params->act_pattern_id;
109 mapper_cparms->app_id = params->app_id;
110 mapper_cparms->port_id = params->port_id;
111 mapper_cparms->tun_idx = params->tun_idx;
113 /* update the signature fields into the computed field list */
114 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID,
116 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID,
117 params->flow_sig_id);
119 /* update the WC Priority flag */
120 if (!bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags) &&
121 ULP_HIGH_AVAIL_IS_ENABLED(ulp_flags)) {
122 enum ulp_ha_mgr_region region = ULP_HA_REGION_LOW;
125 rc = ulp_ha_mgr_region_get(params->ulp_ctx, ®ion);
127 BNXT_TF_DBG(ERR, "Unable to get WC region\n");
128 if (region == ULP_HA_REGION_HI)
129 ULP_COMP_FLD_IDX_WR(params,
130 BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG,
135 /* Function to create the rte flow. */
136 static struct rte_flow *
137 bnxt_ulp_flow_create(struct rte_eth_dev *dev,
138 const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_flow_error *error)
143 struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 };
144 struct ulp_rte_parser_params params;
145 struct bnxt_ulp_context *ulp_ctx;
146 int rc, ret = BNXT_TF_RC_ERROR;
147 struct rte_flow *flow_id;
151 if (bnxt_ulp_flow_validate_args(attr,
153 error) == BNXT_TF_RC_ERROR) {
154 BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
158 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
160 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
164 /* Initialize the parser params */
165 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
166 params.ulp_ctx = ulp_ctx;
168 if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) {
169 BNXT_TF_DBG(ERR, "failed to get the app id\n");
173 /* Set the flow attributes */
174 bnxt_ulp_set_dir_attributes(¶ms, attr);
176 /* copy the device port id and direction for further processing */
177 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_INCOMING_IF,
179 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DEV_PORT_ID,
181 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_SVIF_FLAG,
182 BNXT_ULP_INVALID_SVIF_VAL);
184 /* Get the function id */
185 if (ulp_port_db_port_func_id_get(ulp_ctx,
188 BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
192 /* Protect flow creation */
193 if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
194 BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
198 /* Allocate a Flow ID for attaching all resources for the flow to.
199 * Once allocated, all errors have to walk the list of resources and
202 rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
205 BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n");
209 /* Parse the rte flow pattern */
210 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
211 if (ret != BNXT_TF_RC_SUCCESS)
214 /* Parse the rte flow action */
215 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
216 if (ret != BNXT_TF_RC_SUCCESS)
220 params.func_id = func_id;
221 params.priority = attr->priority;
222 params.port_id = dev->data->port_id;
224 /* Perform the rte flow post process */
225 bnxt_ulp_rte_parser_post_process(¶ms);
227 /* do the tunnel offload process if any */
228 ret = ulp_tunnel_offload_process(¶ms);
229 if (ret == BNXT_TF_RC_ERROR)
232 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
233 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER
234 /* Dump the rte flow pattern */
235 ulp_parser_hdr_info_dump(¶ms);
236 /* Dump the rte flow action */
237 ulp_parser_act_info_dump(¶ms);
241 ret = ulp_matcher_pattern_match(¶ms, ¶ms.class_id);
242 if (ret != BNXT_TF_RC_SUCCESS)
245 ret = ulp_matcher_action_match(¶ms, ¶ms.act_tmpl);
246 if (ret != BNXT_TF_RC_SUCCESS)
249 bnxt_ulp_init_mapper_params(&mapper_cparms, ¶ms,
250 BNXT_ULP_FDB_TYPE_REGULAR);
251 /* Call the ulp mapper to create the flow in the hardware. */
252 ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms);
256 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
258 flow_id = (struct rte_flow *)((uintptr_t)fid);
262 ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid);
264 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
266 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
267 "Failed to create flow.");
271 /* Function to validate the rte flow. */
273 bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
274 const struct rte_flow_attr *attr,
275 const struct rte_flow_item pattern[],
276 const struct rte_flow_action actions[],
277 struct rte_flow_error *error)
279 struct ulp_rte_parser_params params;
280 struct bnxt_ulp_context *ulp_ctx;
281 uint32_t class_id, act_tmpl;
282 int ret = BNXT_TF_RC_ERROR;
284 if (bnxt_ulp_flow_validate_args(attr,
286 error) == BNXT_TF_RC_ERROR) {
287 BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
291 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
293 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
297 /* Initialize the parser params */
298 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
299 params.ulp_ctx = ulp_ctx;
301 if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) {
302 BNXT_TF_DBG(ERR, "failed to get the app id\n");
306 /* Set the flow attributes */
307 bnxt_ulp_set_dir_attributes(¶ms, attr);
309 /* Parse the rte flow pattern */
310 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
311 if (ret != BNXT_TF_RC_SUCCESS)
314 /* Parse the rte flow action */
315 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
316 if (ret != BNXT_TF_RC_SUCCESS)
319 /* Perform the rte flow post process */
320 bnxt_ulp_rte_parser_post_process(¶ms);
322 /* do the tunnel offload process if any */
323 ret = ulp_tunnel_offload_process(¶ms);
324 if (ret == BNXT_TF_RC_ERROR)
327 ret = ulp_matcher_pattern_match(¶ms, &class_id);
329 if (ret != BNXT_TF_RC_SUCCESS)
332 ret = ulp_matcher_action_match(¶ms, &act_tmpl);
333 if (ret != BNXT_TF_RC_SUCCESS)
336 /* all good return success */
340 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
341 "Failed to validate flow.");
345 /* Function to destroy the rte flow. */
347 bnxt_ulp_flow_destroy(struct rte_eth_dev *dev,
348 struct rte_flow *flow,
349 struct rte_flow_error *error)
351 struct bnxt_ulp_context *ulp_ctx;
356 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
358 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
362 "Failed to destroy flow.");
366 flow_id = (uint32_t)(uintptr_t)flow;
368 if (ulp_port_db_port_func_id_get(ulp_ctx,
371 BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
375 "Failed to destroy flow.");
379 if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) ==
381 BNXT_TF_DBG(ERR, "Incorrect device params\n");
383 rte_flow_error_set(error, EINVAL,
384 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
385 "Failed to destroy flow.");
389 if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
390 BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
393 ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
396 BNXT_TF_DBG(ERR, "Failed to destroy flow.\n");
398 rte_flow_error_set(error, -ret,
399 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
400 "Failed to destroy flow.");
402 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
407 /* Function to destroy the rte flows. */
409 bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev,
410 struct rte_flow_error *error)
412 struct bnxt_ulp_context *ulp_ctx;
416 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
421 /* Free the resources for the last device */
422 if (ulp_ctx_deinit_allowed(ulp_ctx)) {
423 ret = ulp_flow_db_session_flow_flush(ulp_ctx);
424 } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) {
425 ret = ulp_port_db_port_func_id_get(ulp_ctx,
426 eth_dev->data->port_id,
429 ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id);
431 BNXT_TF_DBG(ERR, "convert port to func id failed\n");
434 rte_flow_error_set(error, ret,
435 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436 "Failed to flush flow.");
440 /* Function to query the rte flows. */
442 bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
443 struct rte_flow *flow,
444 const struct rte_flow_action *action,
446 struct rte_flow_error *error)
449 struct bnxt_ulp_context *ulp_ctx;
450 struct rte_flow_query_count *count;
453 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
455 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
458 "Failed to query flow.");
462 flow_id = (uint32_t)(uintptr_t)flow;
464 switch (action->type) {
465 case RTE_FLOW_ACTION_TYPE_COUNT:
467 rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
469 rte_flow_error_set(error, EINVAL,
470 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
471 "Failed to query flow.");
475 rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
476 NULL, "Unsupported action item");
482 /* Tunnel offload Apis */
483 #define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS 1
486 bnxt_ulp_tunnel_decap_set(struct rte_eth_dev *eth_dev,
487 struct rte_flow_tunnel *tunnel,
488 struct rte_flow_action **pmd_actions,
489 uint32_t *num_of_actions,
490 struct rte_flow_error *error)
492 struct bnxt_ulp_context *ulp_ctx;
493 struct bnxt_flow_app_tun_ent *tun_entry;
496 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
497 if (ulp_ctx == NULL) {
498 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
499 rte_flow_error_set(error, EINVAL,
500 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
501 "ULP context uninitialized");
505 if (tunnel == NULL) {
506 BNXT_TF_DBG(ERR, "No tunnel specified\n");
507 rte_flow_error_set(error, EINVAL,
508 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
509 "no tunnel specified");
513 if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
514 BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
517 "tunnel type unsupported");
521 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
523 rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
525 "tunnel decap set failed");
529 rc = ulp_app_tun_entry_set_decap_action(tun_entry);
531 rte_flow_error_set(error, EINVAL,
532 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
533 "tunnel decap set failed");
537 *pmd_actions = &tun_entry->action;
538 *num_of_actions = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
543 bnxt_ulp_tunnel_match(struct rte_eth_dev *eth_dev,
544 struct rte_flow_tunnel *tunnel,
545 struct rte_flow_item **pmd_items,
546 uint32_t *num_of_items,
547 struct rte_flow_error *error)
549 struct bnxt_ulp_context *ulp_ctx;
550 struct bnxt_flow_app_tun_ent *tun_entry;
553 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
554 if (ulp_ctx == NULL) {
555 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
556 rte_flow_error_set(error, EINVAL,
557 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
558 "ULP context uninitialized");
562 if (tunnel == NULL) {
563 BNXT_TF_DBG(ERR, "No tunnel specified\n");
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
566 "no tunnel specified");
570 if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
571 BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
572 rte_flow_error_set(error, EINVAL,
573 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
574 "tunnel type unsupported");
578 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
582 "tunnel match set failed");
586 rc = ulp_app_tun_entry_set_decap_item(tun_entry);
588 rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
590 "tunnel match set failed");
594 *pmd_items = &tun_entry->item;
595 *num_of_items = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
600 bnxt_ulp_tunnel_decap_release(struct rte_eth_dev *eth_dev,
601 struct rte_flow_action *pmd_actions,
602 uint32_t num_actions,
603 struct rte_flow_error *error)
605 struct bnxt_ulp_context *ulp_ctx;
606 struct bnxt_flow_app_tun_ent *tun_entry;
607 const struct rte_flow_action *action_item = pmd_actions;
609 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
610 if (ulp_ctx == NULL) {
611 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
614 "ULP context uninitialized");
617 if (num_actions != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
618 BNXT_TF_DBG(ERR, "num actions is invalid\n");
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
621 "num actions is invalid");
624 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
625 if (action_item->type == (typeof(tun_entry->action.type))
626 BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
627 tun_entry = ulp_app_tun_match_entry(ulp_ctx,
629 ulp_app_tun_entry_delete(tun_entry);
637 bnxt_ulp_tunnel_item_release(struct rte_eth_dev *eth_dev,
638 struct rte_flow_item *pmd_items,
640 struct rte_flow_error *error)
642 struct bnxt_ulp_context *ulp_ctx;
643 struct bnxt_flow_app_tun_ent *tun_entry;
645 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
646 if (ulp_ctx == NULL) {
647 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
648 rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
650 "ULP context uninitialized");
653 if (num_items != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
654 BNXT_TF_DBG(ERR, "num items is invalid\n");
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
657 "num items is invalid");
661 tun_entry = ulp_app_tun_match_entry(ulp_ctx, pmd_items->spec);
662 ulp_app_tun_entry_delete(tun_entry);
666 const struct rte_flow_ops bnxt_ulp_rte_flow_ops = {
667 .validate = bnxt_ulp_flow_validate,
668 .create = bnxt_ulp_flow_create,
669 .destroy = bnxt_ulp_flow_destroy,
670 .flush = bnxt_ulp_flow_flush,
671 .query = bnxt_ulp_flow_query,
673 /* Tunnel offload callbacks */
674 .tunnel_decap_set = bnxt_ulp_tunnel_decap_set,
675 .tunnel_match = bnxt_ulp_tunnel_match,
676 .tunnel_action_decap_release = bnxt_ulp_tunnel_decap_release,
677 .tunnel_item_release = bnxt_ulp_tunnel_item_release,
678 .get_restore_info = NULL