1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "bnxt_tf_common.h"
8 #include "ulp_rte_parser.h"
9 #include "ulp_matcher.h"
10 #include "ulp_flow_db.h"
11 #include "ulp_mapper.h"
12 #include "ulp_fc_mgr.h"
13 #include "ulp_port_db.h"
14 #include "ulp_ha_mgr.h"
16 #include <rte_malloc.h>
19 bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr,
20 const struct rte_flow_item pattern[],
21 const struct rte_flow_action actions[],
22 struct rte_flow_error *error)
24 /* Perform the validation of the arguments for null */
26 return BNXT_TF_RC_ERROR;
29 rte_flow_error_set(error,
31 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
34 return BNXT_TF_RC_ERROR;
38 rte_flow_error_set(error,
40 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
43 return BNXT_TF_RC_ERROR;
47 rte_flow_error_set(error,
49 RTE_FLOW_ERROR_TYPE_ATTR,
52 return BNXT_TF_RC_ERROR;
55 if (attr->egress && attr->ingress) {
56 rte_flow_error_set(error,
58 RTE_FLOW_ERROR_TYPE_ATTR,
60 "EGRESS AND INGRESS UNSUPPORTED");
61 return BNXT_TF_RC_ERROR;
63 return BNXT_TF_RC_SUCCESS;
67 bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params,
68 const struct rte_flow_attr *attr)
70 /* Set the flow attributes */
72 params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS;
74 params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS;
75 #if RTE_VERSION_NUM(17, 11, 10, 16) < RTE_VERSION
77 params->dir_attr |= BNXT_ULP_FLOW_ATTR_TRANSFER;
82 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
83 struct ulp_rte_parser_params *params,
84 enum bnxt_ulp_fdb_type flow_type)
86 uint32_t ulp_flags = 0;
88 memset(mapper_cparms, 0, sizeof(*mapper_cparms));
89 mapper_cparms->flow_type = flow_type;
90 mapper_cparms->app_priority = params->priority;
91 mapper_cparms->dir_attr = params->dir_attr;
92 mapper_cparms->class_tid = params->class_id;
93 mapper_cparms->act_tid = params->act_tmpl;
94 mapper_cparms->func_id = params->func_id;
95 mapper_cparms->hdr_bitmap = ¶ms->hdr_bitmap;
96 mapper_cparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap;
97 mapper_cparms->hdr_field = params->hdr_field;
98 mapper_cparms->enc_field = params->enc_field;
99 mapper_cparms->comp_fld = params->comp_fld;
100 mapper_cparms->act = ¶ms->act_bitmap;
101 mapper_cparms->act_prop = ¶ms->act_prop;
102 mapper_cparms->flow_id = params->fid;
103 mapper_cparms->parent_flow = params->parent_flow;
104 mapper_cparms->child_flow = params->child_flow;
105 mapper_cparms->fld_bitmap = ¶ms->fld_bitmap;
106 mapper_cparms->flow_pattern_id = params->flow_pattern_id;
107 mapper_cparms->act_pattern_id = params->act_pattern_id;
108 mapper_cparms->app_id = params->app_id;
109 mapper_cparms->port_id = params->port_id;
110 mapper_cparms->tun_idx = params->tun_idx;
112 /* update the signature fields into the computed field list */
113 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID,
115 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID,
116 params->flow_sig_id);
118 /* update the WC Priority flag */
119 if (!bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags) &&
120 ULP_HIGH_AVAIL_IS_ENABLED(ulp_flags)) {
121 enum ulp_ha_mgr_region region = ULP_HA_REGION_LOW;
124 rc = ulp_ha_mgr_region_get(params->ulp_ctx, ®ion);
126 BNXT_TF_DBG(ERR, "Unable to get WC region\n");
127 if (region == ULP_HA_REGION_HI)
128 ULP_COMP_FLD_IDX_WR(params,
129 BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG,
134 /* Function to create the rte flow. */
135 static struct rte_flow *
136 bnxt_ulp_flow_create(struct rte_eth_dev *dev,
137 const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct rte_flow_error *error)
142 struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 };
143 struct ulp_rte_parser_params params;
144 struct bnxt_ulp_context *ulp_ctx;
145 int rc, ret = BNXT_TF_RC_ERROR;
146 struct rte_flow *flow_id;
150 if (bnxt_ulp_flow_validate_args(attr,
152 error) == BNXT_TF_RC_ERROR) {
153 BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
157 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
159 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
163 /* Initialize the parser params */
164 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
165 params.ulp_ctx = ulp_ctx;
167 if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) {
168 BNXT_TF_DBG(ERR, "failed to get the app id\n");
172 /* Set the flow attributes */
173 bnxt_ulp_set_dir_attributes(¶ms, attr);
175 /* copy the device port id and direction for further processing */
176 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_INCOMING_IF,
178 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DEV_PORT_ID,
180 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_SVIF_FLAG,
181 BNXT_ULP_INVALID_SVIF_VAL);
183 /* Get the function id */
184 if (ulp_port_db_port_func_id_get(ulp_ctx,
187 BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
191 /* Protect flow creation */
192 if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
193 BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
197 /* Allocate a Flow ID for attaching all resources for the flow to.
198 * Once allocated, all errors have to walk the list of resources and
201 rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
204 BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n");
208 /* Parse the rte flow pattern */
209 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
210 if (ret != BNXT_TF_RC_SUCCESS)
213 /* Parse the rte flow action */
214 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
215 if (ret != BNXT_TF_RC_SUCCESS)
219 params.func_id = func_id;
220 params.priority = attr->priority;
221 params.port_id = dev->data->port_id;
223 /* Perform the rte flow post process */
224 bnxt_ulp_rte_parser_post_process(¶ms);
226 /* do the tunnel offload process if any */
227 ret = ulp_tunnel_offload_process(¶ms);
228 if (ret == BNXT_TF_RC_ERROR)
231 ret = ulp_matcher_pattern_match(¶ms, ¶ms.class_id);
232 if (ret != BNXT_TF_RC_SUCCESS)
235 ret = ulp_matcher_action_match(¶ms, ¶ms.act_tmpl);
236 if (ret != BNXT_TF_RC_SUCCESS)
239 bnxt_ulp_init_mapper_params(&mapper_cparms, ¶ms,
240 BNXT_ULP_FDB_TYPE_REGULAR);
241 /* Call the ulp mapper to create the flow in the hardware. */
242 ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms);
246 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
248 flow_id = (struct rte_flow *)((uintptr_t)fid);
252 ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid);
254 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
256 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
257 "Failed to create flow.");
261 /* Function to validate the rte flow. */
263 bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
264 const struct rte_flow_attr *attr,
265 const struct rte_flow_item pattern[],
266 const struct rte_flow_action actions[],
267 struct rte_flow_error *error)
269 struct ulp_rte_parser_params params;
270 struct bnxt_ulp_context *ulp_ctx;
271 uint32_t class_id, act_tmpl;
272 int ret = BNXT_TF_RC_ERROR;
274 if (bnxt_ulp_flow_validate_args(attr,
276 error) == BNXT_TF_RC_ERROR) {
277 BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
281 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
283 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
287 /* Initialize the parser params */
288 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
289 params.ulp_ctx = ulp_ctx;
291 if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id)) {
292 BNXT_TF_DBG(ERR, "failed to get the app id\n");
296 /* Set the flow attributes */
297 bnxt_ulp_set_dir_attributes(¶ms, attr);
299 /* Parse the rte flow pattern */
300 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
301 if (ret != BNXT_TF_RC_SUCCESS)
304 /* Parse the rte flow action */
305 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
306 if (ret != BNXT_TF_RC_SUCCESS)
309 /* Perform the rte flow post process */
310 bnxt_ulp_rte_parser_post_process(¶ms);
312 /* do the tunnel offload process if any */
313 ret = ulp_tunnel_offload_process(¶ms);
314 if (ret == BNXT_TF_RC_ERROR)
317 ret = ulp_matcher_pattern_match(¶ms, &class_id);
319 if (ret != BNXT_TF_RC_SUCCESS)
322 ret = ulp_matcher_action_match(¶ms, &act_tmpl);
323 if (ret != BNXT_TF_RC_SUCCESS)
326 /* all good return success */
330 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331 "Failed to validate flow.");
335 /* Function to destroy the rte flow. */
337 bnxt_ulp_flow_destroy(struct rte_eth_dev *dev,
338 struct rte_flow *flow,
339 struct rte_flow_error *error)
341 struct bnxt_ulp_context *ulp_ctx;
346 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
348 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
352 "Failed to destroy flow.");
356 flow_id = (uint32_t)(uintptr_t)flow;
358 if (ulp_port_db_port_func_id_get(ulp_ctx,
361 BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
363 rte_flow_error_set(error, EINVAL,
364 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
365 "Failed to destroy flow.");
369 if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) ==
371 BNXT_TF_DBG(ERR, "Incorrect device params\n");
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
375 "Failed to destroy flow.");
379 if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
380 BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
383 ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
386 BNXT_TF_DBG(ERR, "Failed to destroy flow.\n");
388 rte_flow_error_set(error, -ret,
389 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
390 "Failed to destroy flow.");
392 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
397 /* Function to destroy the rte flows. */
399 bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev,
400 struct rte_flow_error *error)
402 struct bnxt_ulp_context *ulp_ctx;
406 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
411 /* Free the resources for the last device */
412 if (ulp_ctx_deinit_allowed(ulp_ctx)) {
413 ret = ulp_flow_db_session_flow_flush(ulp_ctx);
414 } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) {
415 ret = ulp_port_db_port_func_id_get(ulp_ctx,
416 eth_dev->data->port_id,
419 ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id);
421 BNXT_TF_DBG(ERR, "convert port to func id failed\n");
424 rte_flow_error_set(error, ret,
425 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426 "Failed to flush flow.");
430 /* Function to query the rte flows. */
432 bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
433 struct rte_flow *flow,
434 const struct rte_flow_action *action,
436 struct rte_flow_error *error)
439 struct bnxt_ulp_context *ulp_ctx;
440 struct rte_flow_query_count *count;
443 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
445 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
446 rte_flow_error_set(error, EINVAL,
447 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
448 "Failed to query flow.");
452 flow_id = (uint32_t)(uintptr_t)flow;
454 switch (action->type) {
455 case RTE_FLOW_ACTION_TYPE_COUNT:
457 rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
459 rte_flow_error_set(error, EINVAL,
460 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
461 "Failed to query flow.");
465 rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
466 NULL, "Unsupported action item");
472 /* Tunnel offload Apis */
473 #define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS 1
476 bnxt_ulp_tunnel_decap_set(struct rte_eth_dev *eth_dev,
477 struct rte_flow_tunnel *tunnel,
478 struct rte_flow_action **pmd_actions,
479 uint32_t *num_of_actions,
480 struct rte_flow_error *error)
482 struct bnxt_ulp_context *ulp_ctx;
483 struct bnxt_flow_app_tun_ent *tun_entry;
486 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
487 if (ulp_ctx == NULL) {
488 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
491 "ULP context uninitialized");
495 if (tunnel == NULL) {
496 BNXT_TF_DBG(ERR, "No tunnel specified\n");
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
499 "no tunnel specified");
503 if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
504 BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
505 rte_flow_error_set(error, EINVAL,
506 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
507 "tunnel type unsupported");
511 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
513 rte_flow_error_set(error, EINVAL,
514 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
515 "tunnel decap set failed");
519 rc = ulp_app_tun_entry_set_decap_action(tun_entry);
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
523 "tunnel decap set failed");
527 *pmd_actions = &tun_entry->action;
528 *num_of_actions = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
533 bnxt_ulp_tunnel_match(struct rte_eth_dev *eth_dev,
534 struct rte_flow_tunnel *tunnel,
535 struct rte_flow_item **pmd_items,
536 uint32_t *num_of_items,
537 struct rte_flow_error *error)
539 struct bnxt_ulp_context *ulp_ctx;
540 struct bnxt_flow_app_tun_ent *tun_entry;
543 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
544 if (ulp_ctx == NULL) {
545 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
548 "ULP context uninitialized");
552 if (tunnel == NULL) {
553 BNXT_TF_DBG(ERR, "No tunnel specified\n");
554 rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
556 "no tunnel specified");
560 if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
561 BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
564 "tunnel type unsupported");
568 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
570 rte_flow_error_set(error, EINVAL,
571 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
572 "tunnel match set failed");
576 rc = ulp_app_tun_entry_set_decap_item(tun_entry);
578 rte_flow_error_set(error, EINVAL,
579 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
580 "tunnel match set failed");
584 *pmd_items = &tun_entry->item;
585 *num_of_items = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
590 bnxt_ulp_tunnel_decap_release(struct rte_eth_dev *eth_dev,
591 struct rte_flow_action *pmd_actions,
592 uint32_t num_actions,
593 struct rte_flow_error *error)
595 struct bnxt_ulp_context *ulp_ctx;
596 struct bnxt_flow_app_tun_ent *tun_entry;
597 const struct rte_flow_action *action_item = pmd_actions;
599 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
600 if (ulp_ctx == NULL) {
601 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
604 "ULP context uninitialized");
607 if (num_actions != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
608 BNXT_TF_DBG(ERR, "num actions is invalid\n");
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
611 "num actions is invalid");
614 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
615 if (action_item->type == (typeof(tun_entry->action.type))
616 BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
617 tun_entry = ulp_app_tun_match_entry(ulp_ctx,
619 ulp_app_tun_entry_delete(tun_entry);
627 bnxt_ulp_tunnel_item_release(struct rte_eth_dev *eth_dev,
628 struct rte_flow_item *pmd_items,
630 struct rte_flow_error *error)
632 struct bnxt_ulp_context *ulp_ctx;
633 struct bnxt_flow_app_tun_ent *tun_entry;
635 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
636 if (ulp_ctx == NULL) {
637 BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
640 "ULP context uninitialized");
643 if (num_items != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
644 BNXT_TF_DBG(ERR, "num items is invalid\n");
645 rte_flow_error_set(error, EINVAL,
646 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
647 "num items is invalid");
651 tun_entry = ulp_app_tun_match_entry(ulp_ctx, pmd_items->spec);
652 ulp_app_tun_entry_delete(tun_entry);
656 const struct rte_flow_ops bnxt_ulp_rte_flow_ops = {
657 .validate = bnxt_ulp_flow_validate,
658 .create = bnxt_ulp_flow_create,
659 .destroy = bnxt_ulp_flow_destroy,
660 .flush = bnxt_ulp_flow_flush,
661 .query = bnxt_ulp_flow_query,
663 /* Tunnel offload callbacks */
664 .tunnel_decap_set = bnxt_ulp_tunnel_decap_set,
665 .tunnel_match = bnxt_ulp_tunnel_match,
666 .tunnel_action_decap_release = bnxt_ulp_tunnel_decap_release,
667 .tunnel_item_release = bnxt_ulp_tunnel_item_release,
668 .get_restore_info = NULL