1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (uint32_t)
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
406 enum bnxt_ulp_direction_type dir;
407 struct ulp_rte_hdr_field *hdr_field;
408 enum bnxt_ulp_svif_type svif_type;
409 enum bnxt_ulp_intf_type port_type;
411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412 BNXT_ULP_INVALID_SVIF_VAL) {
414 "SVIF already set,multiple source not support'd\n");
415 return BNXT_TF_RC_ERROR;
418 /* Get port type details */
419 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
420 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
421 BNXT_TF_DBG(ERR, "Invalid port type\n");
422 return BNXT_TF_RC_ERROR;
425 /* Update the match port type */
426 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
428 /* compute the direction */
429 bnxt_ulp_rte_parser_direction_compute(params);
431 /* Get the computed direction */
432 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
433 if (dir == BNXT_ULP_DIR_INGRESS) {
434 svif_type = BNXT_ULP_PHY_PORT_SVIF;
436 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
437 svif_type = BNXT_ULP_VF_FUNC_SVIF;
439 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
441 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
443 svif = rte_cpu_to_be_16(svif);
444 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
445 memcpy(hdr_field->spec, &svif, sizeof(svif));
446 memcpy(hdr_field->mask, &mask, sizeof(mask));
447 hdr_field->size = sizeof(svif);
448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
449 rte_be_to_cpu_16(svif));
450 return BNXT_TF_RC_SUCCESS;
453 /* Function to handle the parsing of the RTE port id */
455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
457 uint16_t port_id = 0;
458 uint16_t svif_mask = 0xFFFF;
460 int32_t rc = BNXT_TF_RC_ERROR;
462 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
463 BNXT_ULP_INVALID_SVIF_VAL)
464 return BNXT_TF_RC_SUCCESS;
466 /* SVIF not set. So get the port id */
467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
469 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
472 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
476 /* Update the SVIF details */
477 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
481 /* Function to handle the implicit action port id */
483 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
485 struct rte_flow_action action_item = {0};
486 struct rte_flow_action_port_id port_id = {0};
488 /* Read the action port set bit */
489 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
490 /* Already set, so just exit */
491 return BNXT_TF_RC_SUCCESS;
493 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
494 action_item.conf = &port_id;
496 /* Update the action port based on incoming port */
497 ulp_rte_port_id_act_handler(&action_item, params);
499 /* Reset the action port set bit */
500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
501 return BNXT_TF_RC_SUCCESS;
504 /* Function to handle the parsing of RTE Flow item PF Header. */
506 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
507 struct ulp_rte_parser_params *params)
509 uint16_t port_id = 0;
510 uint16_t svif_mask = 0xFFFF;
513 /* Get the implicit port id */
514 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
516 /* perform the conversion from dpdk port to bnxt ifindex */
517 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
520 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521 return BNXT_TF_RC_ERROR;
524 /* Update the SVIF details */
525 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
528 /* Function to handle the parsing of RTE Flow item VF Header. */
530 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_vf *vf_spec = item->spec;
534 const struct rte_flow_item_vf *vf_mask = item->mask;
537 int32_t rc = BNXT_TF_RC_PARSE_ERR;
539 /* Get VF rte_flow_item for Port details */
541 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
545 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
550 /* perform the conversion from VF Func id to bnxt ifindex */
551 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
554 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
557 /* Update the SVIF details */
558 return ulp_rte_parser_svif_set(params, ifindex, mask);
561 /* Function to handle the parsing of RTE Flow item port id Header. */
563 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
564 struct ulp_rte_parser_params *params)
566 const struct rte_flow_item_port_id *port_spec = item->spec;
567 const struct rte_flow_item_port_id *port_mask = item->mask;
569 int32_t rc = BNXT_TF_RC_PARSE_ERR;
573 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
577 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
580 mask = port_mask->id;
582 /* perform the conversion from dpdk port to bnxt ifindex */
583 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
586 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
589 /* Update the SVIF details */
590 return ulp_rte_parser_svif_set(params, ifindex, mask);
593 /* Function to handle the parsing of RTE Flow item phy port Header. */
595 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
596 struct ulp_rte_parser_params *params)
598 const struct rte_flow_item_phy_port *port_spec = item->spec;
599 const struct rte_flow_item_phy_port *port_mask = item->mask;
601 int32_t rc = BNXT_TF_RC_ERROR;
603 enum bnxt_ulp_direction_type dir;
604 struct ulp_rte_hdr_field *hdr_field;
606 /* Copy the rte_flow_item for phy port into hdr_field */
608 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
612 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
615 mask = port_mask->index;
617 /* Update the match port type */
618 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
619 BNXT_ULP_INTF_TYPE_PHY_PORT);
621 /* Compute the Hw direction */
622 bnxt_ulp_rte_parser_direction_compute(params);
624 /* Direction validation */
625 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
626 if (dir == BNXT_ULP_DIR_EGRESS) {
628 "Parse Err:Phy ports are valid only for ingress\n");
629 return BNXT_TF_RC_PARSE_ERR;
632 /* Get the physical port details from port db */
633 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
636 BNXT_TF_DBG(ERR, "Failed to get port details\n");
637 return BNXT_TF_RC_PARSE_ERR;
640 /* Update the SVIF details */
641 svif = rte_cpu_to_be_16(svif);
642 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
643 memcpy(hdr_field->spec, &svif, sizeof(svif));
644 memcpy(hdr_field->mask, &mask, sizeof(mask));
645 hdr_field->size = sizeof(svif);
646 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
647 rte_be_to_cpu_16(svif));
648 return BNXT_TF_RC_SUCCESS;
651 /* Function to handle the update of proto header based on field values */
653 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
654 uint16_t type, uint32_t in_flag)
656 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
658 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
659 BNXT_ULP_HDR_BIT_I_IPV4);
660 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
662 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
663 BNXT_ULP_HDR_BIT_O_IPV4);
664 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
666 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
668 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
669 BNXT_ULP_HDR_BIT_I_IPV6);
670 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
672 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
673 BNXT_ULP_HDR_BIT_O_IPV6);
674 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
679 /* Internal Function to identify broadcast or multicast packets */
681 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
683 if (rte_is_multicast_ether_addr(eth_addr) ||
684 rte_is_broadcast_ether_addr(eth_addr)) {
686 "No support for bcast or mcast addr offload\n");
692 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
694 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
695 struct ulp_rte_parser_params *params)
697 const struct rte_flow_item_eth *eth_spec = item->spec;
698 const struct rte_flow_item_eth *eth_mask = item->mask;
699 uint32_t idx = 0, dmac_idx = 0;
701 uint16_t eth_type = 0;
702 uint32_t inner_flag = 0;
704 /* Perform validations */
706 /* Todo: work around to avoid multicast and broadcast addr */
707 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
708 return BNXT_TF_RC_PARSE_ERR;
710 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
711 return BNXT_TF_RC_PARSE_ERR;
713 eth_type = eth_spec->type;
716 if (ulp_rte_prsr_fld_size_validate(params, &idx,
717 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
718 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
719 return BNXT_TF_RC_ERROR;
722 * Copy the rte_flow_item for eth into hdr_field using ethernet
726 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
727 ulp_rte_prsr_fld_mask(params, &idx, size,
728 ulp_deference_struct(eth_spec, dst.addr_bytes),
729 ulp_deference_struct(eth_mask, dst.addr_bytes),
730 ULP_PRSR_ACT_DEFAULT);
732 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
733 ulp_rte_prsr_fld_mask(params, &idx, size,
734 ulp_deference_struct(eth_spec, src.addr_bytes),
735 ulp_deference_struct(eth_mask, src.addr_bytes),
736 ULP_PRSR_ACT_DEFAULT);
738 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
739 ulp_rte_prsr_fld_mask(params, &idx, size,
740 ulp_deference_struct(eth_spec, type),
741 ulp_deference_struct(eth_mask, type),
742 ULP_PRSR_ACT_MATCH_IGNORE);
744 /* Update the protocol hdr bitmap */
745 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
746 BNXT_ULP_HDR_BIT_O_ETH) ||
747 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
748 BNXT_ULP_HDR_BIT_O_IPV4) ||
749 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
750 BNXT_ULP_HDR_BIT_O_IPV6) ||
751 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
752 BNXT_ULP_HDR_BIT_O_UDP) ||
753 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
754 BNXT_ULP_HDR_BIT_O_TCP)) {
755 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
758 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
762 /* Update the field protocol hdr bitmap */
763 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
765 return BNXT_TF_RC_SUCCESS;
768 /* Function to handle the parsing of RTE Flow item Vlan Header. */
770 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
771 struct ulp_rte_parser_params *params)
773 const struct rte_flow_item_vlan *vlan_spec = item->spec;
774 const struct rte_flow_item_vlan *vlan_mask = item->mask;
775 struct ulp_rte_hdr_bitmap *hdr_bit;
777 uint16_t vlan_tag = 0, priority = 0;
778 uint16_t vlan_tag_mask = 0, priority_mask = 0;
779 uint32_t outer_vtag_num;
780 uint32_t inner_vtag_num;
781 uint16_t eth_type = 0;
782 uint32_t inner_flag = 0;
786 vlan_tag = ntohs(vlan_spec->tci);
787 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
788 vlan_tag &= ULP_VLAN_TAG_MASK;
789 vlan_tag = htons(vlan_tag);
790 eth_type = vlan_spec->inner_type;
794 vlan_tag_mask = ntohs(vlan_mask->tci);
795 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
796 vlan_tag_mask &= 0xfff;
799 * the storage for priority and vlan tag is 2 bytes
800 * The mask of priority which is 3 bits if it is all 1's
801 * then make the rest bits 13 bits as 1's
802 * so that it is matched as exact match.
804 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
805 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
806 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
807 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
808 vlan_tag_mask = htons(vlan_tag_mask);
811 if (ulp_rte_prsr_fld_size_validate(params, &idx,
812 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
813 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
814 return BNXT_TF_RC_ERROR;
818 * Copy the rte_flow_item for vlan into hdr_field using Vlan
821 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
823 * The priority field is ignored since OVS is setting it as
824 * wild card match and it is not supported. This is a work
825 * around and shall be addressed in the future.
827 ulp_rte_prsr_fld_mask(params, &idx, size,
829 (vlan_mask) ? &priority_mask : NULL,
830 ULP_PRSR_ACT_MASK_IGNORE);
832 ulp_rte_prsr_fld_mask(params, &idx, size,
834 (vlan_mask) ? &vlan_tag_mask : NULL,
835 ULP_PRSR_ACT_DEFAULT);
837 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
838 ulp_rte_prsr_fld_mask(params, &idx, size,
839 ulp_deference_struct(vlan_spec, inner_type),
840 ulp_deference_struct(vlan_mask, inner_type),
841 ULP_PRSR_ACT_MATCH_IGNORE);
843 /* Get the outer tag and inner tag counts */
844 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
845 BNXT_ULP_CF_IDX_O_VTAG_NUM);
846 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
847 BNXT_ULP_CF_IDX_I_VTAG_NUM);
849 /* Update the hdr_bitmap of the vlans */
850 hdr_bit = ¶ms->hdr_bitmap;
851 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
852 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
854 /* Update the vlan tag num */
856 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
860 ULP_BITMAP_SET(params->hdr_bitmap.bits,
861 BNXT_ULP_HDR_BIT_OO_VLAN);
862 if (vlan_mask && vlan_tag_mask)
863 ULP_COMP_FLD_IDX_WR(params,
864 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
866 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
867 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
868 outer_vtag_num == 1) {
869 /* update the vlan tag num */
871 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
873 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
874 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
875 ULP_BITMAP_SET(params->hdr_bitmap.bits,
876 BNXT_ULP_HDR_BIT_OI_VLAN);
877 if (vlan_mask && vlan_tag_mask)
878 ULP_COMP_FLD_IDX_WR(params,
879 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
881 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
882 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
884 /* update the vlan tag num */
886 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
888 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
889 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
890 ULP_BITMAP_SET(params->hdr_bitmap.bits,
891 BNXT_ULP_HDR_BIT_IO_VLAN);
892 if (vlan_mask && vlan_tag_mask)
893 ULP_COMP_FLD_IDX_WR(params,
894 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
896 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
897 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
898 inner_vtag_num == 1) {
899 /* update the vlan tag num */
901 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
903 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
904 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
905 ULP_BITMAP_SET(params->hdr_bitmap.bits,
906 BNXT_ULP_HDR_BIT_II_VLAN);
907 if (vlan_mask && vlan_tag_mask)
908 ULP_COMP_FLD_IDX_WR(params,
909 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
912 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
913 return BNXT_TF_RC_ERROR;
915 /* Update the field protocol hdr bitmap */
916 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
917 return BNXT_TF_RC_SUCCESS;
920 /* Function to handle the update of proto header based on field values */
922 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
923 uint8_t proto, uint32_t in_flag)
925 if (proto == IPPROTO_UDP) {
927 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
928 BNXT_ULP_HDR_BIT_I_UDP);
929 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
931 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
932 BNXT_ULP_HDR_BIT_O_UDP);
933 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
935 } else if (proto == IPPROTO_TCP) {
937 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
938 BNXT_ULP_HDR_BIT_I_TCP);
939 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
941 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
942 BNXT_ULP_HDR_BIT_O_TCP);
943 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
945 } else if (proto == IPPROTO_GRE) {
946 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
947 } else if (proto == IPPROTO_ICMP) {
948 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
949 ULP_BITMAP_SET(param->hdr_bitmap.bits,
950 BNXT_ULP_HDR_BIT_I_ICMP);
952 ULP_BITMAP_SET(param->hdr_bitmap.bits,
953 BNXT_ULP_HDR_BIT_O_ICMP);
957 ULP_COMP_FLD_IDX_WR(param,
958 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
960 ULP_COMP_FLD_IDX_WR(param,
961 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
964 ULP_COMP_FLD_IDX_WR(param,
965 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
967 ULP_COMP_FLD_IDX_WR(param,
968 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
974 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
976 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
977 struct ulp_rte_parser_params *params)
979 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
980 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
981 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
982 uint32_t idx = 0, dip_idx = 0;
985 uint32_t inner_flag = 0;
988 /* validate there are no 3rd L3 header */
989 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
991 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
992 return BNXT_TF_RC_ERROR;
995 if (ulp_rte_prsr_fld_size_validate(params, &idx,
996 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
997 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
998 return BNXT_TF_RC_ERROR;
1002 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1005 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1006 ulp_rte_prsr_fld_mask(params, &idx, size,
1007 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1008 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1009 ULP_PRSR_ACT_DEFAULT);
1012 * The tos field is ignored since OVS is setting it as wild card
1013 * match and it is not supported. This is a work around and
1014 * shall be addressed in the future.
1016 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1017 ulp_rte_prsr_fld_mask(params, &idx, size,
1018 ulp_deference_struct(ipv4_spec,
1019 hdr.type_of_service),
1020 ulp_deference_struct(ipv4_mask,
1021 hdr.type_of_service),
1022 ULP_PRSR_ACT_MASK_IGNORE);
1024 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1025 ulp_rte_prsr_fld_mask(params, &idx, size,
1026 ulp_deference_struct(ipv4_spec, hdr.total_length),
1027 ulp_deference_struct(ipv4_mask, hdr.total_length),
1028 ULP_PRSR_ACT_DEFAULT);
1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1031 ulp_rte_prsr_fld_mask(params, &idx, size,
1032 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1033 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1034 ULP_PRSR_ACT_DEFAULT);
1036 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1037 ulp_rte_prsr_fld_mask(params, &idx, size,
1038 ulp_deference_struct(ipv4_spec,
1039 hdr.fragment_offset),
1040 ulp_deference_struct(ipv4_mask,
1041 hdr.fragment_offset),
1042 ULP_PRSR_ACT_DEFAULT);
1044 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1045 ulp_rte_prsr_fld_mask(params, &idx, size,
1046 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1047 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1048 ULP_PRSR_ACT_DEFAULT);
1050 /* Ignore proto for matching templates */
1051 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1052 ulp_rte_prsr_fld_mask(params, &idx, size,
1053 ulp_deference_struct(ipv4_spec,
1055 ulp_deference_struct(ipv4_mask,
1057 ULP_PRSR_ACT_MATCH_IGNORE);
1059 proto = ipv4_spec->hdr.next_proto_id;
1061 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1062 ulp_rte_prsr_fld_mask(params, &idx, size,
1063 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1064 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1065 ULP_PRSR_ACT_DEFAULT);
1067 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1068 ulp_rte_prsr_fld_mask(params, &idx, size,
1069 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1070 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1071 ULP_PRSR_ACT_DEFAULT);
1074 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1075 ulp_rte_prsr_fld_mask(params, &idx, size,
1076 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1077 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1078 ULP_PRSR_ACT_DEFAULT);
1080 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1081 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1082 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1083 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1084 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1087 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1089 /* Update the tunnel offload dest ip offset */
1090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1094 /* Some of the PMD applications may set the protocol field
1095 * in the IPv4 spec but don't set the mask. So, consider
1096 * the mask in the proto value calculation.
1099 proto &= ipv4_mask->hdr.next_proto_id;
1101 /* Update the field protocol hdr bitmap */
1102 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1103 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1104 return BNXT_TF_RC_SUCCESS;
1107 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1109 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1110 struct ulp_rte_parser_params *params)
1112 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1113 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1114 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1115 uint32_t idx = 0, dip_idx = 0;
1117 uint32_t ver_spec = 0, ver_mask = 0;
1118 uint32_t tc_spec = 0, tc_mask = 0;
1119 uint32_t lab_spec = 0, lab_mask = 0;
1121 uint32_t inner_flag = 0;
1124 /* validate there are no 3rd L3 header */
1125 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1127 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1128 return BNXT_TF_RC_ERROR;
1131 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1132 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1133 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1134 return BNXT_TF_RC_ERROR;
1138 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1142 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1143 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1144 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1145 proto = ipv6_spec->hdr.proto;
1149 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1150 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1151 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1153 /* Some of the PMD applications may set the protocol field
1154 * in the IPv6 spec but don't set the mask. So, consider
1155 * the mask in proto value calculation.
1157 proto &= ipv6_mask->hdr.proto;
1160 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1161 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1162 ULP_PRSR_ACT_DEFAULT);
1164 * The TC and flow label field are ignored since OVS is
1165 * setting it for match and it is not supported.
1166 * This is a work around and
1167 * shall be addressed in the future.
1169 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1170 ULP_PRSR_ACT_MASK_IGNORE);
1171 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1172 ULP_PRSR_ACT_MASK_IGNORE);
1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1175 ulp_rte_prsr_fld_mask(params, &idx, size,
1176 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1177 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1178 ULP_PRSR_ACT_DEFAULT);
1180 /* Ignore proto for template matching */
1181 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1182 ulp_rte_prsr_fld_mask(params, &idx, size,
1183 ulp_deference_struct(ipv6_spec, hdr.proto),
1184 ulp_deference_struct(ipv6_mask, hdr.proto),
1185 ULP_PRSR_ACT_MATCH_IGNORE);
1187 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1188 ulp_rte_prsr_fld_mask(params, &idx, size,
1189 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1190 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1191 ULP_PRSR_ACT_DEFAULT);
1193 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1194 ulp_rte_prsr_fld_mask(params, &idx, size,
1195 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1196 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1197 ULP_PRSR_ACT_DEFAULT);
1200 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1201 ulp_rte_prsr_fld_mask(params, &idx, size,
1202 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1203 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1204 ULP_PRSR_ACT_DEFAULT);
1206 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1207 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1208 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1209 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1213 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1215 /* Update the tunnel offload dest ip offset */
1216 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1220 /* Update the field protocol hdr bitmap */
1221 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1222 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1224 return BNXT_TF_RC_SUCCESS;
1227 /* Function to handle the update of proto header based on field values */
1229 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1230 uint16_t src_port, uint16_t src_mask,
1231 uint16_t dst_port, uint16_t dst_mask,
1232 enum bnxt_ulp_hdr_bit hdr_bit)
1235 case BNXT_ULP_HDR_BIT_I_UDP:
1236 case BNXT_ULP_HDR_BIT_I_TCP:
1237 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1238 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1239 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1240 (uint64_t)rte_be_to_cpu_16(src_port));
1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1242 (uint64_t)rte_be_to_cpu_16(dst_port));
1243 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1244 (uint64_t)rte_be_to_cpu_16(src_mask));
1245 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1246 (uint64_t)rte_be_to_cpu_16(dst_mask));
1247 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1249 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1250 !!(src_port & src_mask));
1251 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1252 !!(dst_port & dst_mask));
1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1254 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1255 IPPROTO_UDP : IPPROTO_TCP);
1257 case BNXT_ULP_HDR_BIT_O_UDP:
1258 case BNXT_ULP_HDR_BIT_O_TCP:
1259 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1261 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1262 (uint64_t)rte_be_to_cpu_16(src_port));
1263 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1264 (uint64_t)rte_be_to_cpu_16(dst_port));
1265 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1266 (uint64_t)rte_be_to_cpu_16(src_mask));
1267 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1268 (uint64_t)rte_be_to_cpu_16(dst_mask));
1269 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1271 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1272 !!(src_port & src_mask));
1273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1274 !!(dst_port & dst_mask));
1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1276 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1277 IPPROTO_UDP : IPPROTO_TCP);
1283 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1284 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1285 ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1286 BNXT_ULP_HDR_BIT_T_VXLAN);
1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1291 /* Function to handle the parsing of RTE Flow item UDP Header. */
1293 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1294 struct ulp_rte_parser_params *params)
1296 const struct rte_flow_item_udp *udp_spec = item->spec;
1297 const struct rte_flow_item_udp *udp_mask = item->mask;
1298 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1301 uint16_t dport = 0, sport = 0;
1302 uint16_t dport_mask = 0, sport_mask = 0;
1304 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1306 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1308 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1309 return BNXT_TF_RC_ERROR;
1313 sport = udp_spec->hdr.src_port;
1314 dport = udp_spec->hdr.dst_port;
1317 sport_mask = udp_mask->hdr.src_port;
1318 dport_mask = udp_mask->hdr.dst_port;
1321 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1322 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1323 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1324 return BNXT_TF_RC_ERROR;
1328 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1331 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1332 ulp_rte_prsr_fld_mask(params, &idx, size,
1333 ulp_deference_struct(udp_spec, hdr.src_port),
1334 ulp_deference_struct(udp_mask, hdr.src_port),
1335 ULP_PRSR_ACT_DEFAULT);
1337 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1338 ulp_rte_prsr_fld_mask(params, &idx, size,
1339 ulp_deference_struct(udp_spec, hdr.dst_port),
1340 ulp_deference_struct(udp_mask, hdr.dst_port),
1341 ULP_PRSR_ACT_DEFAULT);
1343 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1344 ulp_rte_prsr_fld_mask(params, &idx, size,
1345 ulp_deference_struct(udp_spec, hdr.dgram_len),
1346 ulp_deference_struct(udp_mask, hdr.dgram_len),
1347 ULP_PRSR_ACT_DEFAULT);
1349 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1350 ulp_rte_prsr_fld_mask(params, &idx, size,
1351 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1352 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1353 ULP_PRSR_ACT_DEFAULT);
1355 /* Set the udp header bitmap and computed l4 header bitmaps */
1356 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1357 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1358 out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1360 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1361 dport_mask, out_l4);
1362 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1363 return BNXT_TF_RC_SUCCESS;
1366 /* Function to handle the parsing of RTE Flow item TCP Header. */
1368 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1369 struct ulp_rte_parser_params *params)
1371 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1372 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1373 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1375 uint16_t dport = 0, sport = 0;
1376 uint16_t dport_mask = 0, sport_mask = 0;
1379 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1381 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1383 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1384 return BNXT_TF_RC_ERROR;
1388 sport = tcp_spec->hdr.src_port;
1389 dport = tcp_spec->hdr.dst_port;
1392 sport_mask = tcp_mask->hdr.src_port;
1393 dport_mask = tcp_mask->hdr.dst_port;
1396 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1397 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1398 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1399 return BNXT_TF_RC_ERROR;
1403 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1406 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1407 ulp_rte_prsr_fld_mask(params, &idx, size,
1408 ulp_deference_struct(tcp_spec, hdr.src_port),
1409 ulp_deference_struct(tcp_mask, hdr.src_port),
1410 ULP_PRSR_ACT_DEFAULT);
1412 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1413 ulp_rte_prsr_fld_mask(params, &idx, size,
1414 ulp_deference_struct(tcp_spec, hdr.dst_port),
1415 ulp_deference_struct(tcp_mask, hdr.dst_port),
1416 ULP_PRSR_ACT_DEFAULT);
1418 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1419 ulp_rte_prsr_fld_mask(params, &idx, size,
1420 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1421 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1422 ULP_PRSR_ACT_DEFAULT);
1424 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1425 ulp_rte_prsr_fld_mask(params, &idx, size,
1426 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1427 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1428 ULP_PRSR_ACT_DEFAULT);
1430 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1431 ulp_rte_prsr_fld_mask(params, &idx, size,
1432 ulp_deference_struct(tcp_spec, hdr.data_off),
1433 ulp_deference_struct(tcp_mask, hdr.data_off),
1434 ULP_PRSR_ACT_DEFAULT);
1436 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1437 ulp_rte_prsr_fld_mask(params, &idx, size,
1438 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1439 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1440 ULP_PRSR_ACT_DEFAULT);
1442 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1443 ulp_rte_prsr_fld_mask(params, &idx, size,
1444 ulp_deference_struct(tcp_spec, hdr.rx_win),
1445 ulp_deference_struct(tcp_mask, hdr.rx_win),
1446 ULP_PRSR_ACT_DEFAULT);
1448 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1449 ulp_rte_prsr_fld_mask(params, &idx, size,
1450 ulp_deference_struct(tcp_spec, hdr.cksum),
1451 ulp_deference_struct(tcp_mask, hdr.cksum),
1452 ULP_PRSR_ACT_DEFAULT);
1454 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1455 ulp_rte_prsr_fld_mask(params, &idx, size,
1456 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1457 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1458 ULP_PRSR_ACT_DEFAULT);
1460 /* Set the udp header bitmap and computed l4 header bitmaps */
1461 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1462 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1463 out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1465 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1466 dport_mask, out_l4);
1467 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1468 return BNXT_TF_RC_SUCCESS;
1471 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1473 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1474 struct ulp_rte_parser_params *params)
1476 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1477 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1478 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1482 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1483 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1484 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1485 return BNXT_TF_RC_ERROR;
1489 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1492 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1493 ulp_rte_prsr_fld_mask(params, &idx, size,
1494 ulp_deference_struct(vxlan_spec, flags),
1495 ulp_deference_struct(vxlan_mask, flags),
1496 ULP_PRSR_ACT_DEFAULT);
1498 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1499 ulp_rte_prsr_fld_mask(params, &idx, size,
1500 ulp_deference_struct(vxlan_spec, rsvd0),
1501 ulp_deference_struct(vxlan_mask, rsvd0),
1502 ULP_PRSR_ACT_DEFAULT);
1504 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1505 ulp_rte_prsr_fld_mask(params, &idx, size,
1506 ulp_deference_struct(vxlan_spec, vni),
1507 ulp_deference_struct(vxlan_mask, vni),
1508 ULP_PRSR_ACT_DEFAULT);
1510 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1511 ulp_rte_prsr_fld_mask(params, &idx, size,
1512 ulp_deference_struct(vxlan_spec, rsvd1),
1513 ulp_deference_struct(vxlan_mask, rsvd1),
1514 ULP_PRSR_ACT_DEFAULT);
1516 /* Update the hdr_bitmap with vxlan */
1517 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1518 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1519 return BNXT_TF_RC_SUCCESS;
1522 /* Function to handle the parsing of RTE Flow item GRE Header. */
1524 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1525 struct ulp_rte_parser_params *params)
1527 const struct rte_flow_item_gre *gre_spec = item->spec;
1528 const struct rte_flow_item_gre *gre_mask = item->mask;
1529 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1533 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1534 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1535 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1536 return BNXT_TF_RC_ERROR;
1539 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1540 ulp_rte_prsr_fld_mask(params, &idx, size,
1541 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1542 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1543 ULP_PRSR_ACT_DEFAULT);
1545 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1546 ulp_rte_prsr_fld_mask(params, &idx, size,
1547 ulp_deference_struct(gre_spec, protocol),
1548 ulp_deference_struct(gre_mask, protocol),
1549 ULP_PRSR_ACT_DEFAULT);
1551 /* Update the hdr_bitmap with GRE */
1552 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1553 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1554 return BNXT_TF_RC_SUCCESS;
1557 /* Function to handle the parsing of RTE Flow item ANY. */
1559 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1560 struct ulp_rte_parser_params *params __rte_unused)
1562 return BNXT_TF_RC_SUCCESS;
1565 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1567 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1568 struct ulp_rte_parser_params *params)
1570 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1571 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1572 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1576 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1577 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1578 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1579 return BNXT_TF_RC_ERROR;
1582 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1583 ulp_rte_prsr_fld_mask(params, &idx, size,
1584 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1585 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1586 ULP_PRSR_ACT_DEFAULT);
1588 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1589 ulp_rte_prsr_fld_mask(params, &idx, size,
1590 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1591 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1592 ULP_PRSR_ACT_DEFAULT);
1594 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1595 ulp_rte_prsr_fld_mask(params, &idx, size,
1596 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1597 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1598 ULP_PRSR_ACT_DEFAULT);
1600 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1601 ulp_rte_prsr_fld_mask(params, &idx, size,
1602 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1603 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1604 ULP_PRSR_ACT_DEFAULT);
1606 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1607 ulp_rte_prsr_fld_mask(params, &idx, size,
1608 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1609 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1610 ULP_PRSR_ACT_DEFAULT);
1612 /* Update the hdr_bitmap with ICMP */
1613 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1614 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1616 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1617 return BNXT_TF_RC_SUCCESS;
1620 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1622 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1623 struct ulp_rte_parser_params *params)
1625 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1626 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1627 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1631 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1632 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1633 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1634 return BNXT_TF_RC_ERROR;
1637 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1638 ulp_rte_prsr_fld_mask(params, &idx, size,
1639 ulp_deference_struct(icmp_spec, type),
1640 ulp_deference_struct(icmp_mask, type),
1641 ULP_PRSR_ACT_DEFAULT);
1643 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1644 ulp_rte_prsr_fld_mask(params, &idx, size,
1645 ulp_deference_struct(icmp_spec, code),
1646 ulp_deference_struct(icmp_mask, code),
1647 ULP_PRSR_ACT_DEFAULT);
1649 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1650 ulp_rte_prsr_fld_mask(params, &idx, size,
1651 ulp_deference_struct(icmp_spec, checksum),
1652 ulp_deference_struct(icmp_mask, checksum),
1653 ULP_PRSR_ACT_DEFAULT);
1655 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1656 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1657 return BNXT_TF_RC_ERROR;
1660 /* Update the hdr_bitmap with ICMP */
1661 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1662 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1664 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1665 return BNXT_TF_RC_SUCCESS;
1668 /* Function to handle the parsing of RTE Flow item void Header */
1670 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1671 struct ulp_rte_parser_params *params __rte_unused)
1673 return BNXT_TF_RC_SUCCESS;
1676 /* Function to handle the parsing of RTE Flow action void Header. */
1678 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1679 struct ulp_rte_parser_params *params __rte_unused)
1681 return BNXT_TF_RC_SUCCESS;
1684 /* Function to handle the parsing of RTE Flow action Mark Header. */
1686 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1687 struct ulp_rte_parser_params *param)
1689 const struct rte_flow_action_mark *mark;
1690 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1693 mark = action_item->conf;
1695 mark_id = tfp_cpu_to_be_32(mark->id);
1696 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1697 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1699 /* Update the hdr_bitmap with vxlan */
1700 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1701 return BNXT_TF_RC_SUCCESS;
1703 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1704 return BNXT_TF_RC_ERROR;
1707 /* Function to handle the parsing of RTE Flow action RSS Header. */
1709 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1710 struct ulp_rte_parser_params *param)
1712 const struct rte_flow_action_rss *rss;
1713 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1715 if (action_item == NULL || action_item->conf == NULL) {
1716 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1717 return BNXT_TF_RC_ERROR;
1720 rss = action_item->conf;
1721 /* Copy the rss into the specific action properties */
1722 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1723 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1724 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1725 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1726 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1727 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1729 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1730 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1731 return BNXT_TF_RC_ERROR;
1733 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1736 /* set the RSS action header bit */
1737 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1739 return BNXT_TF_RC_SUCCESS;
1742 /* Function to handle the parsing of RTE Flow item eth Header. */
1744 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1745 const struct rte_flow_item_eth *eth_spec)
1747 struct ulp_rte_hdr_field *field;
1750 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1751 size = sizeof(eth_spec->dst.addr_bytes);
1752 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1754 size = sizeof(eth_spec->src.addr_bytes);
1755 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1757 size = sizeof(eth_spec->type);
1758 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1760 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1763 /* Function to handle the parsing of RTE Flow item vlan Header. */
1765 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1766 const struct rte_flow_item_vlan *vlan_spec,
1769 struct ulp_rte_hdr_field *field;
1773 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1774 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1775 BNXT_ULP_HDR_BIT_OO_VLAN);
1777 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1778 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1779 BNXT_ULP_HDR_BIT_OI_VLAN);
1782 size = sizeof(vlan_spec->tci);
1783 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1785 size = sizeof(vlan_spec->inner_type);
1786 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1789 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1791 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1792 const struct rte_flow_item_ipv4 *ip)
1794 struct ulp_rte_hdr_field *field;
1798 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1799 size = sizeof(ip->hdr.version_ihl);
1800 if (!ip->hdr.version_ihl)
1801 val8 = RTE_IPV4_VHL_DEF;
1803 val8 = ip->hdr.version_ihl;
1804 field = ulp_rte_parser_fld_copy(field, &val8, size);
1806 size = sizeof(ip->hdr.type_of_service);
1807 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1809 size = sizeof(ip->hdr.packet_id);
1810 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1812 size = sizeof(ip->hdr.fragment_offset);
1813 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1815 size = sizeof(ip->hdr.time_to_live);
1816 if (!ip->hdr.time_to_live)
1817 val8 = BNXT_ULP_DEFAULT_TTL;
1819 val8 = ip->hdr.time_to_live;
1820 field = ulp_rte_parser_fld_copy(field, &val8, size);
1822 size = sizeof(ip->hdr.next_proto_id);
1823 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1825 size = sizeof(ip->hdr.src_addr);
1826 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1828 size = sizeof(ip->hdr.dst_addr);
1829 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1831 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1834 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1836 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1837 const struct rte_flow_item_ipv6 *ip)
1839 struct ulp_rte_hdr_field *field;
1844 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1845 size = sizeof(ip->hdr.vtc_flow);
1846 if (!ip->hdr.vtc_flow)
1847 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1849 val32 = ip->hdr.vtc_flow;
1850 field = ulp_rte_parser_fld_copy(field, &val32, size);
1852 size = sizeof(ip->hdr.proto);
1853 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1855 size = sizeof(ip->hdr.hop_limits);
1856 if (!ip->hdr.hop_limits)
1857 val8 = BNXT_ULP_DEFAULT_TTL;
1859 val8 = ip->hdr.hop_limits;
1860 field = ulp_rte_parser_fld_copy(field, &val8, size);
1862 size = sizeof(ip->hdr.src_addr);
1863 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1865 size = sizeof(ip->hdr.dst_addr);
1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1868 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1871 /* Function to handle the parsing of RTE Flow item UDP Header. */
1873 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1874 const struct rte_flow_item_udp *udp_spec)
1876 struct ulp_rte_hdr_field *field;
1878 uint8_t type = IPPROTO_UDP;
1880 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1881 size = sizeof(udp_spec->hdr.src_port);
1882 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1884 size = sizeof(udp_spec->hdr.dst_port);
1885 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1887 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1889 /* Update thhe ip header protocol */
1890 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1891 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1892 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1893 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1896 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1898 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1899 struct rte_flow_item_vxlan *vxlan_spec)
1901 struct ulp_rte_hdr_field *field;
1904 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1905 size = sizeof(vxlan_spec->flags);
1906 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1908 size = sizeof(vxlan_spec->rsvd0);
1909 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1911 size = sizeof(vxlan_spec->vni);
1912 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1914 size = sizeof(vxlan_spec->rsvd1);
1915 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1917 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1920 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1922 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1923 struct ulp_rte_parser_params *params)
1925 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1926 const struct rte_flow_item *item;
1927 const struct rte_flow_item_ipv4 *ipv4_spec;
1928 const struct rte_flow_item_ipv6 *ipv6_spec;
1929 struct rte_flow_item_vxlan vxlan_spec;
1930 uint32_t vlan_num = 0, vlan_size = 0;
1931 uint32_t ip_size = 0, ip_type = 0;
1932 uint32_t vxlan_size = 0;
1933 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1934 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1936 vxlan_encap = action_item->conf;
1938 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1939 return BNXT_TF_RC_ERROR;
1942 item = vxlan_encap->definition;
1944 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1945 return BNXT_TF_RC_ERROR;
1948 if (!ulp_rte_item_skip_void(&item, 0))
1949 return BNXT_TF_RC_ERROR;
1951 /* must have ethernet header */
1952 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1953 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1954 return BNXT_TF_RC_ERROR;
1957 /* Parse the ethernet header */
1959 ulp_rte_enc_eth_hdr_handler(params, item->spec);
1961 /* Goto the next item */
1962 if (!ulp_rte_item_skip_void(&item, 1))
1963 return BNXT_TF_RC_ERROR;
1965 /* May have vlan header */
1966 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1969 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1971 if (!ulp_rte_item_skip_void(&item, 1))
1972 return BNXT_TF_RC_ERROR;
1975 /* may have two vlan headers */
1976 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1979 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1981 if (!ulp_rte_item_skip_void(&item, 1))
1982 return BNXT_TF_RC_ERROR;
1985 /* Update the vlan count and size of more than one */
1987 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1988 vlan_num = tfp_cpu_to_be_32(vlan_num);
1989 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1992 vlan_size = tfp_cpu_to_be_32(vlan_size);
1993 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1998 /* L3 must be IPv4, IPv6 */
1999 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2000 ipv4_spec = item->spec;
2001 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2003 /* Update the ip size details */
2004 ip_size = tfp_cpu_to_be_32(ip_size);
2005 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2006 &ip_size, sizeof(uint32_t));
2008 /* update the ip type */
2009 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2010 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2011 &ip_type, sizeof(uint32_t));
2013 /* update the computed field to notify it is ipv4 header */
2014 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2017 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2019 if (!ulp_rte_item_skip_void(&item, 1))
2020 return BNXT_TF_RC_ERROR;
2021 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2022 ipv6_spec = item->spec;
2023 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2025 /* Update the ip size details */
2026 ip_size = tfp_cpu_to_be_32(ip_size);
2027 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2028 &ip_size, sizeof(uint32_t));
2030 /* update the ip type */
2031 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2032 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2033 &ip_type, sizeof(uint32_t));
2035 /* update the computed field to notify it is ipv6 header */
2036 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2039 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2041 if (!ulp_rte_item_skip_void(&item, 1))
2042 return BNXT_TF_RC_ERROR;
2044 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2045 return BNXT_TF_RC_ERROR;
2049 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2050 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2051 return BNXT_TF_RC_ERROR;
2054 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2056 if (!ulp_rte_item_skip_void(&item, 1))
2057 return BNXT_TF_RC_ERROR;
2060 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2061 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2062 return BNXT_TF_RC_ERROR;
2064 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2065 /* copy the vxlan details */
2066 memcpy(&vxlan_spec, item->spec, vxlan_size);
2067 vxlan_spec.flags = 0x08;
2068 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2069 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2070 &vxlan_size, sizeof(uint32_t));
2072 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2074 /* update the hdr_bitmap with vxlan */
2075 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2076 return BNXT_TF_RC_SUCCESS;
2079 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2081 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2083 struct ulp_rte_parser_params *params)
2085 /* update the hdr_bitmap with vxlan */
2086 ULP_BITMAP_SET(params->act_bitmap.bits,
2087 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2088 /* Update computational field with tunnel decap info */
2089 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2091 return BNXT_TF_RC_SUCCESS;
2094 /* Function to handle the parsing of RTE Flow action drop Header. */
2096 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2097 struct ulp_rte_parser_params *params)
2099 /* Update the hdr_bitmap with drop */
2100 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2101 return BNXT_TF_RC_SUCCESS;
2104 /* Function to handle the parsing of RTE Flow action count. */
2106 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2107 struct ulp_rte_parser_params *params)
2109 const struct rte_flow_action_count *act_count;
2110 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2112 act_count = action_item->conf;
2114 if (act_count->shared) {
2116 "Parse Error:Shared count not supported\n");
2117 return BNXT_TF_RC_PARSE_ERR;
2119 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2121 BNXT_ULP_ACT_PROP_SZ_COUNT);
2124 /* Update the hdr_bitmap with count */
2125 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2126 return BNXT_TF_RC_SUCCESS;
2129 /* Function to handle the parsing of action ports. */
2131 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2134 enum bnxt_ulp_direction_type dir;
2137 struct ulp_rte_act_prop *act = ¶m->act_prop;
2138 enum bnxt_ulp_intf_type port_type;
2141 /* Get the direction */
2142 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2143 if (dir == BNXT_ULP_DIR_EGRESS) {
2144 /* For egress direction, fill vport */
2145 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2146 return BNXT_TF_RC_ERROR;
2149 pid = rte_cpu_to_be_32(pid);
2150 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2151 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2153 /* For ingress direction, fill vnic */
2154 port_type = ULP_COMP_FLD_IDX_RD(param,
2155 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2156 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2157 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2159 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2161 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2163 return BNXT_TF_RC_ERROR;
2166 pid = rte_cpu_to_be_32(pid);
2167 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2168 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2171 /* Update the action port set bit */
2172 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2173 return BNXT_TF_RC_SUCCESS;
2176 /* Function to handle the parsing of RTE Flow action PF. */
2178 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2179 struct ulp_rte_parser_params *params)
2183 enum bnxt_ulp_intf_type intf_type;
2185 /* Get the port id of the current device */
2186 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2188 /* Get the port db ifindex */
2189 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2191 BNXT_TF_DBG(ERR, "Invalid port id\n");
2192 return BNXT_TF_RC_ERROR;
2195 /* Check the port is PF port */
2196 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2197 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2198 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2199 return BNXT_TF_RC_ERROR;
2201 /* Update the action properties */
2202 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2203 return ulp_rte_parser_act_port_set(params, ifindex);
2206 /* Function to handle the parsing of RTE Flow action VF. */
2208 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2209 struct ulp_rte_parser_params *params)
2211 const struct rte_flow_action_vf *vf_action;
2212 enum bnxt_ulp_intf_type intf_type;
2216 vf_action = action_item->conf;
2218 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2219 return BNXT_TF_RC_PARSE_ERR;
2222 if (vf_action->original) {
2223 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2224 return BNXT_TF_RC_PARSE_ERR;
2227 bp = bnxt_pmd_get_bp(params->port_id);
2229 BNXT_TF_DBG(ERR, "Invalid bp\n");
2230 return BNXT_TF_RC_ERROR;
2233 /* vf_action->id is a logical number which in this case is an
2234 * offset from the first VF. So, to get the absolute VF id, the
2235 * offset must be added to the absolute first vf id of that port.
2237 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2241 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2242 return BNXT_TF_RC_ERROR;
2244 /* Check the port is VF port */
2245 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2246 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2247 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2248 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2249 return BNXT_TF_RC_ERROR;
2252 /* Update the action properties */
2253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2254 return ulp_rte_parser_act_port_set(params, ifindex);
2257 /* Function to handle the parsing of RTE Flow action port_id. */
2259 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2260 struct ulp_rte_parser_params *param)
2262 const struct rte_flow_action_port_id *port_id = act_item->conf;
2264 enum bnxt_ulp_intf_type intf_type;
2268 "ParseErr: Invalid Argument\n");
2269 return BNXT_TF_RC_PARSE_ERR;
2271 if (port_id->original) {
2273 "ParseErr:Portid Original not supported\n");
2274 return BNXT_TF_RC_PARSE_ERR;
2277 /* Get the port db ifindex */
2278 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2280 BNXT_TF_DBG(ERR, "Invalid port id\n");
2281 return BNXT_TF_RC_ERROR;
2284 /* Get the intf type */
2285 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2287 BNXT_TF_DBG(ERR, "Invalid port type\n");
2288 return BNXT_TF_RC_ERROR;
2291 /* Set the action port */
2292 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2293 return ulp_rte_parser_act_port_set(param, ifindex);
2296 /* Function to handle the parsing of RTE Flow action phy_port. */
2298 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2299 struct ulp_rte_parser_params *prm)
2301 const struct rte_flow_action_phy_port *phy_port;
2305 enum bnxt_ulp_direction_type dir;
2307 phy_port = action_item->conf;
2310 "ParseErr: Invalid Argument\n");
2311 return BNXT_TF_RC_PARSE_ERR;
2314 if (phy_port->original) {
2316 "Parse Err:Port Original not supported\n");
2317 return BNXT_TF_RC_PARSE_ERR;
2319 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2320 if (dir != BNXT_ULP_DIR_EGRESS) {
2322 "Parse Err:Phy ports are valid only for egress\n");
2323 return BNXT_TF_RC_PARSE_ERR;
2325 /* Get the physical port details from port db */
2326 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2329 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2334 pid = rte_cpu_to_be_32(pid);
2335 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2336 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2338 /* Update the action port set bit */
2339 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2340 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2341 BNXT_ULP_INTF_TYPE_PHY_PORT);
2342 return BNXT_TF_RC_SUCCESS;
2345 /* Function to handle the parsing of RTE Flow action pop vlan. */
2347 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2348 struct ulp_rte_parser_params *params)
2350 /* Update the act_bitmap with pop */
2351 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2352 return BNXT_TF_RC_SUCCESS;
2355 /* Function to handle the parsing of RTE Flow action push vlan. */
2357 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2358 struct ulp_rte_parser_params *params)
2360 const struct rte_flow_action_of_push_vlan *push_vlan;
2362 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2364 push_vlan = action_item->conf;
2366 ethertype = push_vlan->ethertype;
2367 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2369 "Parse Err: Ethertype not supported\n");
2370 return BNXT_TF_RC_PARSE_ERR;
2372 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2373 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2374 /* Update the hdr_bitmap with push vlan */
2375 ULP_BITMAP_SET(params->act_bitmap.bits,
2376 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2377 return BNXT_TF_RC_SUCCESS;
2379 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2380 return BNXT_TF_RC_ERROR;
2383 /* Function to handle the parsing of RTE Flow action set vlan id. */
2385 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2386 struct ulp_rte_parser_params *params)
2388 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2390 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2392 vlan_vid = action_item->conf;
2393 if (vlan_vid && vlan_vid->vlan_vid) {
2394 vid = vlan_vid->vlan_vid;
2395 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2396 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2397 /* Update the hdr_bitmap with vlan vid */
2398 ULP_BITMAP_SET(params->act_bitmap.bits,
2399 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2400 return BNXT_TF_RC_SUCCESS;
2402 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2403 return BNXT_TF_RC_ERROR;
2406 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2408 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2409 struct ulp_rte_parser_params *params)
2411 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2413 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2415 vlan_pcp = action_item->conf;
2417 pcp = vlan_pcp->vlan_pcp;
2418 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2419 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2420 /* Update the hdr_bitmap with vlan vid */
2421 ULP_BITMAP_SET(params->act_bitmap.bits,
2422 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2423 return BNXT_TF_RC_SUCCESS;
2425 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2426 return BNXT_TF_RC_ERROR;
2429 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2431 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2432 struct ulp_rte_parser_params *params)
2434 const struct rte_flow_action_set_ipv4 *set_ipv4;
2435 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2437 set_ipv4 = action_item->conf;
2439 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2440 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2441 /* Update the hdr_bitmap with set ipv4 src */
2442 ULP_BITMAP_SET(params->act_bitmap.bits,
2443 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2444 return BNXT_TF_RC_SUCCESS;
2446 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2447 return BNXT_TF_RC_ERROR;
2450 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2452 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2453 struct ulp_rte_parser_params *params)
2455 const struct rte_flow_action_set_ipv4 *set_ipv4;
2456 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2458 set_ipv4 = action_item->conf;
2460 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2461 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2462 /* Update the hdr_bitmap with set ipv4 dst */
2463 ULP_BITMAP_SET(params->act_bitmap.bits,
2464 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2465 return BNXT_TF_RC_SUCCESS;
2467 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2468 return BNXT_TF_RC_ERROR;
2471 /* Function to handle the parsing of RTE Flow action set tp src.*/
2473 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2474 struct ulp_rte_parser_params *params)
2476 const struct rte_flow_action_set_tp *set_tp;
2477 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2479 set_tp = action_item->conf;
2481 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2482 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2483 /* Update the hdr_bitmap with set tp src */
2484 ULP_BITMAP_SET(params->act_bitmap.bits,
2485 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2486 return BNXT_TF_RC_SUCCESS;
2489 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2490 return BNXT_TF_RC_ERROR;
2493 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2495 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2496 struct ulp_rte_parser_params *params)
2498 const struct rte_flow_action_set_tp *set_tp;
2499 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2501 set_tp = action_item->conf;
2503 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2504 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2505 /* Update the hdr_bitmap with set tp dst */
2506 ULP_BITMAP_SET(params->act_bitmap.bits,
2507 BNXT_ULP_ACT_BIT_SET_TP_DST);
2508 return BNXT_TF_RC_SUCCESS;
2511 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2512 return BNXT_TF_RC_ERROR;
2515 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2517 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2518 struct ulp_rte_parser_params *params)
2520 /* Update the act_bitmap with dec ttl */
2521 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2522 return BNXT_TF_RC_SUCCESS;
2525 /* Function to handle the parsing of RTE Flow action JUMP */
2527 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2528 struct ulp_rte_parser_params *params)
2530 /* Update the act_bitmap with dec ttl */
2531 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2532 return BNXT_TF_RC_SUCCESS;
2536 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2537 struct ulp_rte_parser_params *params)
2539 const struct rte_flow_action_sample *sample;
2542 sample = action_item->conf;
2544 /* if SAMPLE bit is set it means this sample action is nested within the
2545 * actions of another sample action; this is not allowed
2547 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2548 BNXT_ULP_ACT_BIT_SAMPLE))
2549 return BNXT_TF_RC_ERROR;
2551 /* a sample action is only allowed as a shared action */
2552 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2553 BNXT_ULP_ACT_BIT_SHARED))
2554 return BNXT_TF_RC_ERROR;
2556 /* only a ratio of 1 i.e. 100% is supported */
2557 if (sample->ratio != 1)
2558 return BNXT_TF_RC_ERROR;
2560 if (!sample->actions)
2561 return BNXT_TF_RC_ERROR;
2563 /* parse the nested actions for a sample action */
2564 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2565 if (ret == BNXT_TF_RC_SUCCESS)
2566 /* Update the act_bitmap with sample */
2567 ULP_BITMAP_SET(params->act_bitmap.bits,
2568 BNXT_ULP_ACT_BIT_SAMPLE);
2573 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2575 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2576 struct ulp_rte_parser_params *params)
2578 /* Set the F1 flow header bit */
2579 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2580 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2583 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2585 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2586 struct ulp_rte_parser_params *params)
2589 /* Set the F2 flow header bit */
2590 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2591 return ulp_rte_vxlan_decap_act_handler(NULL, params);