1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (uint32_t)
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
406 enum bnxt_ulp_direction_type dir;
407 struct ulp_rte_hdr_field *hdr_field;
408 enum bnxt_ulp_svif_type svif_type;
409 enum bnxt_ulp_intf_type port_type;
411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412 BNXT_ULP_INVALID_SVIF_VAL) {
414 "SVIF already set,multiple source not support'd\n");
415 return BNXT_TF_RC_ERROR;
418 /* Get port type details */
419 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
420 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
421 BNXT_TF_DBG(ERR, "Invalid port type\n");
422 return BNXT_TF_RC_ERROR;
425 /* Update the match port type */
426 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
428 /* compute the direction */
429 bnxt_ulp_rte_parser_direction_compute(params);
431 /* Get the computed direction */
432 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
433 if (dir == BNXT_ULP_DIR_INGRESS) {
434 svif_type = BNXT_ULP_PHY_PORT_SVIF;
436 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
437 svif_type = BNXT_ULP_VF_FUNC_SVIF;
439 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
441 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
443 svif = rte_cpu_to_be_16(svif);
444 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
445 memcpy(hdr_field->spec, &svif, sizeof(svif));
446 memcpy(hdr_field->mask, &mask, sizeof(mask));
447 hdr_field->size = sizeof(svif);
448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
449 rte_be_to_cpu_16(svif));
450 return BNXT_TF_RC_SUCCESS;
453 /* Function to handle the parsing of the RTE port id */
455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
457 uint16_t port_id = 0;
458 uint16_t svif_mask = 0xFFFF;
460 int32_t rc = BNXT_TF_RC_ERROR;
462 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
463 BNXT_ULP_INVALID_SVIF_VAL)
464 return BNXT_TF_RC_SUCCESS;
466 /* SVIF not set. So get the port id */
467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
469 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
472 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
476 /* Update the SVIF details */
477 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
481 /* Function to handle the implicit action port id */
483 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
485 struct rte_flow_action action_item = {0};
486 struct rte_flow_action_port_id port_id = {0};
488 /* Read the action port set bit */
489 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
490 /* Already set, so just exit */
491 return BNXT_TF_RC_SUCCESS;
493 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
494 action_item.conf = &port_id;
496 /* Update the action port based on incoming port */
497 ulp_rte_port_id_act_handler(&action_item, params);
499 /* Reset the action port set bit */
500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
501 return BNXT_TF_RC_SUCCESS;
504 /* Function to handle the parsing of RTE Flow item PF Header. */
506 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
507 struct ulp_rte_parser_params *params)
509 uint16_t port_id = 0;
510 uint16_t svif_mask = 0xFFFF;
513 /* Get the implicit port id */
514 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
516 /* perform the conversion from dpdk port to bnxt ifindex */
517 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
520 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521 return BNXT_TF_RC_ERROR;
524 /* Update the SVIF details */
525 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
528 /* Function to handle the parsing of RTE Flow item VF Header. */
530 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_vf *vf_spec = item->spec;
534 const struct rte_flow_item_vf *vf_mask = item->mask;
537 int32_t rc = BNXT_TF_RC_PARSE_ERR;
539 /* Get VF rte_flow_item for Port details */
541 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
545 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
550 /* perform the conversion from VF Func id to bnxt ifindex */
551 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
554 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
557 /* Update the SVIF details */
558 return ulp_rte_parser_svif_set(params, ifindex, mask);
561 /* Function to handle the parsing of RTE Flow item port id Header. */
563 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
564 struct ulp_rte_parser_params *params)
566 const struct rte_flow_item_port_id *port_spec = item->spec;
567 const struct rte_flow_item_port_id *port_mask = item->mask;
569 int32_t rc = BNXT_TF_RC_PARSE_ERR;
573 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
577 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
580 mask = port_mask->id;
582 /* perform the conversion from dpdk port to bnxt ifindex */
583 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
586 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
589 /* Update the SVIF details */
590 return ulp_rte_parser_svif_set(params, ifindex, mask);
593 /* Function to handle the parsing of RTE Flow item phy port Header. */
595 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
596 struct ulp_rte_parser_params *params)
598 const struct rte_flow_item_phy_port *port_spec = item->spec;
599 const struct rte_flow_item_phy_port *port_mask = item->mask;
601 int32_t rc = BNXT_TF_RC_ERROR;
603 enum bnxt_ulp_direction_type dir;
604 struct ulp_rte_hdr_field *hdr_field;
606 /* Copy the rte_flow_item for phy port into hdr_field */
608 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
612 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
615 mask = port_mask->index;
617 /* Update the match port type */
618 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
619 BNXT_ULP_INTF_TYPE_PHY_PORT);
621 /* Compute the Hw direction */
622 bnxt_ulp_rte_parser_direction_compute(params);
624 /* Direction validation */
625 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
626 if (dir == BNXT_ULP_DIR_EGRESS) {
628 "Parse Err:Phy ports are valid only for ingress\n");
629 return BNXT_TF_RC_PARSE_ERR;
632 /* Get the physical port details from port db */
633 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
636 BNXT_TF_DBG(ERR, "Failed to get port details\n");
637 return BNXT_TF_RC_PARSE_ERR;
640 /* Update the SVIF details */
641 svif = rte_cpu_to_be_16(svif);
642 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
643 memcpy(hdr_field->spec, &svif, sizeof(svif));
644 memcpy(hdr_field->mask, &mask, sizeof(mask));
645 hdr_field->size = sizeof(svif);
646 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
647 rte_be_to_cpu_16(svif));
648 return BNXT_TF_RC_SUCCESS;
651 /* Function to handle the update of proto header based on field values */
653 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
654 uint16_t type, uint32_t in_flag)
656 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
658 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
659 BNXT_ULP_HDR_BIT_I_IPV4);
660 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
662 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
663 BNXT_ULP_HDR_BIT_O_IPV4);
664 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
666 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
668 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
669 BNXT_ULP_HDR_BIT_I_IPV6);
670 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
672 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
673 BNXT_ULP_HDR_BIT_O_IPV6);
674 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
679 /* Internal Function to identify broadcast or multicast packets */
681 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
683 if (rte_is_multicast_ether_addr(eth_addr) ||
684 rte_is_broadcast_ether_addr(eth_addr)) {
686 "No support for bcast or mcast addr offload\n");
692 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
694 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
695 struct ulp_rte_parser_params *params)
697 const struct rte_flow_item_eth *eth_spec = item->spec;
698 const struct rte_flow_item_eth *eth_mask = item->mask;
699 uint32_t idx = 0, dmac_idx = 0;
701 uint16_t eth_type = 0;
702 uint32_t inner_flag = 0;
704 /* Perform validations */
706 /* Todo: work around to avoid multicast and broadcast addr */
707 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
708 return BNXT_TF_RC_PARSE_ERR;
710 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
711 return BNXT_TF_RC_PARSE_ERR;
713 eth_type = eth_spec->type;
716 if (ulp_rte_prsr_fld_size_validate(params, &idx,
717 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
718 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
719 return BNXT_TF_RC_ERROR;
722 * Copy the rte_flow_item for eth into hdr_field using ethernet
726 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
727 ulp_rte_prsr_fld_mask(params, &idx, size,
728 ulp_deference_struct(eth_spec, dst.addr_bytes),
729 ulp_deference_struct(eth_mask, dst.addr_bytes),
730 ULP_PRSR_ACT_DEFAULT);
732 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
733 ulp_rte_prsr_fld_mask(params, &idx, size,
734 ulp_deference_struct(eth_spec, src.addr_bytes),
735 ulp_deference_struct(eth_mask, src.addr_bytes),
736 ULP_PRSR_ACT_DEFAULT);
738 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
739 ulp_rte_prsr_fld_mask(params, &idx, size,
740 ulp_deference_struct(eth_spec, type),
741 ulp_deference_struct(eth_mask, type),
742 ULP_PRSR_ACT_MATCH_IGNORE);
744 /* Update the protocol hdr bitmap */
745 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
746 BNXT_ULP_HDR_BIT_O_ETH) ||
747 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
748 BNXT_ULP_HDR_BIT_O_IPV4) ||
749 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
750 BNXT_ULP_HDR_BIT_O_IPV6) ||
751 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
752 BNXT_ULP_HDR_BIT_O_UDP) ||
753 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
754 BNXT_ULP_HDR_BIT_O_TCP)) {
755 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
758 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
762 /* Update the field protocol hdr bitmap */
763 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
765 return BNXT_TF_RC_SUCCESS;
768 /* Function to handle the parsing of RTE Flow item Vlan Header. */
770 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
771 struct ulp_rte_parser_params *params)
773 const struct rte_flow_item_vlan *vlan_spec = item->spec;
774 const struct rte_flow_item_vlan *vlan_mask = item->mask;
775 struct ulp_rte_hdr_bitmap *hdr_bit;
777 uint16_t vlan_tag = 0, priority = 0;
778 uint16_t vlan_tag_mask = 0, priority_mask = 0;
779 uint32_t outer_vtag_num;
780 uint32_t inner_vtag_num;
781 uint16_t eth_type = 0;
782 uint32_t inner_flag = 0;
786 vlan_tag = ntohs(vlan_spec->tci);
787 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
788 vlan_tag &= ULP_VLAN_TAG_MASK;
789 vlan_tag = htons(vlan_tag);
790 eth_type = vlan_spec->inner_type;
794 vlan_tag_mask = ntohs(vlan_mask->tci);
795 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
796 vlan_tag_mask &= 0xfff;
799 * the storage for priority and vlan tag is 2 bytes
800 * The mask of priority which is 3 bits if it is all 1's
801 * then make the rest bits 13 bits as 1's
802 * so that it is matched as exact match.
804 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
805 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
806 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
807 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
808 vlan_tag_mask = htons(vlan_tag_mask);
811 if (ulp_rte_prsr_fld_size_validate(params, &idx,
812 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
813 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
814 return BNXT_TF_RC_ERROR;
818 * Copy the rte_flow_item for vlan into hdr_field using Vlan
821 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
823 * The priority field is ignored since OVS is setting it as
824 * wild card match and it is not supported. This is a work
825 * around and shall be addressed in the future.
827 ulp_rte_prsr_fld_mask(params, &idx, size,
829 (vlan_mask) ? &priority_mask : NULL,
830 ULP_PRSR_ACT_MASK_IGNORE);
832 ulp_rte_prsr_fld_mask(params, &idx, size,
834 (vlan_mask) ? &vlan_tag_mask : NULL,
835 ULP_PRSR_ACT_DEFAULT);
837 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
838 ulp_rte_prsr_fld_mask(params, &idx, size,
839 ulp_deference_struct(vlan_spec, inner_type),
840 ulp_deference_struct(vlan_mask, inner_type),
841 ULP_PRSR_ACT_MATCH_IGNORE);
843 /* Get the outer tag and inner tag counts */
844 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
845 BNXT_ULP_CF_IDX_O_VTAG_NUM);
846 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
847 BNXT_ULP_CF_IDX_I_VTAG_NUM);
849 /* Update the hdr_bitmap of the vlans */
850 hdr_bit = ¶ms->hdr_bitmap;
851 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
852 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
854 /* Update the vlan tag num */
856 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
860 ULP_BITMAP_SET(params->hdr_bitmap.bits,
861 BNXT_ULP_HDR_BIT_OO_VLAN);
862 if (vlan_mask && vlan_tag_mask)
863 ULP_COMP_FLD_IDX_WR(params,
864 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
866 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
867 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
868 outer_vtag_num == 1) {
869 /* update the vlan tag num */
871 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
873 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
874 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
875 ULP_BITMAP_SET(params->hdr_bitmap.bits,
876 BNXT_ULP_HDR_BIT_OI_VLAN);
877 if (vlan_mask && vlan_tag_mask)
878 ULP_COMP_FLD_IDX_WR(params,
879 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
881 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
882 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
884 /* update the vlan tag num */
886 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
888 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
889 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
890 ULP_BITMAP_SET(params->hdr_bitmap.bits,
891 BNXT_ULP_HDR_BIT_IO_VLAN);
892 if (vlan_mask && vlan_tag_mask)
893 ULP_COMP_FLD_IDX_WR(params,
894 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
896 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
897 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
898 inner_vtag_num == 1) {
899 /* update the vlan tag num */
901 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
903 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
904 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
905 ULP_BITMAP_SET(params->hdr_bitmap.bits,
906 BNXT_ULP_HDR_BIT_II_VLAN);
907 if (vlan_mask && vlan_tag_mask)
908 ULP_COMP_FLD_IDX_WR(params,
909 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
912 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
913 return BNXT_TF_RC_ERROR;
915 /* Update the field protocol hdr bitmap */
916 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
917 return BNXT_TF_RC_SUCCESS;
920 /* Function to handle the update of proto header based on field values */
922 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
923 uint8_t proto, uint32_t in_flag)
925 if (proto == IPPROTO_UDP) {
927 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
928 BNXT_ULP_HDR_BIT_I_UDP);
929 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
931 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
932 BNXT_ULP_HDR_BIT_O_UDP);
933 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
935 } else if (proto == IPPROTO_TCP) {
937 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
938 BNXT_ULP_HDR_BIT_I_TCP);
939 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
941 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
942 BNXT_ULP_HDR_BIT_O_TCP);
943 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
945 } else if (proto == IPPROTO_GRE) {
946 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
947 } else if (proto == IPPROTO_ICMP) {
948 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
949 ULP_BITMAP_SET(param->hdr_bitmap.bits,
950 BNXT_ULP_HDR_BIT_I_ICMP);
952 ULP_BITMAP_SET(param->hdr_bitmap.bits,
953 BNXT_ULP_HDR_BIT_O_ICMP);
957 ULP_COMP_FLD_IDX_WR(param,
958 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
960 ULP_COMP_FLD_IDX_WR(param,
961 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
964 ULP_COMP_FLD_IDX_WR(param,
965 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
967 ULP_COMP_FLD_IDX_WR(param,
968 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
974 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
976 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
977 struct ulp_rte_parser_params *params)
979 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
980 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
981 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
982 uint32_t idx = 0, dip_idx = 0;
985 uint32_t inner_flag = 0;
988 /* validate there are no 3rd L3 header */
989 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
991 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
992 return BNXT_TF_RC_ERROR;
995 if (ulp_rte_prsr_fld_size_validate(params, &idx,
996 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
997 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
998 return BNXT_TF_RC_ERROR;
1002 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1005 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1006 ulp_rte_prsr_fld_mask(params, &idx, size,
1007 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1008 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1009 ULP_PRSR_ACT_DEFAULT);
1012 * The tos field is ignored since OVS is setting it as wild card
1013 * match and it is not supported. This is a work around and
1014 * shall be addressed in the future.
1016 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1017 ulp_rte_prsr_fld_mask(params, &idx, size,
1018 ulp_deference_struct(ipv4_spec,
1019 hdr.type_of_service),
1020 ulp_deference_struct(ipv4_mask,
1021 hdr.type_of_service),
1022 ULP_PRSR_ACT_MASK_IGNORE);
1024 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1025 ulp_rte_prsr_fld_mask(params, &idx, size,
1026 ulp_deference_struct(ipv4_spec, hdr.total_length),
1027 ulp_deference_struct(ipv4_mask, hdr.total_length),
1028 ULP_PRSR_ACT_DEFAULT);
1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1031 ulp_rte_prsr_fld_mask(params, &idx, size,
1032 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1033 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1034 ULP_PRSR_ACT_DEFAULT);
1036 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1037 ulp_rte_prsr_fld_mask(params, &idx, size,
1038 ulp_deference_struct(ipv4_spec,
1039 hdr.fragment_offset),
1040 ulp_deference_struct(ipv4_mask,
1041 hdr.fragment_offset),
1042 ULP_PRSR_ACT_DEFAULT);
1044 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1045 ulp_rte_prsr_fld_mask(params, &idx, size,
1046 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1047 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1048 ULP_PRSR_ACT_DEFAULT);
1050 /* Ignore proto for matching templates */
1051 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1052 ulp_rte_prsr_fld_mask(params, &idx, size,
1053 ulp_deference_struct(ipv4_spec,
1055 ulp_deference_struct(ipv4_mask,
1057 ULP_PRSR_ACT_MATCH_IGNORE);
1059 proto = ipv4_spec->hdr.next_proto_id;
1061 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1062 ulp_rte_prsr_fld_mask(params, &idx, size,
1063 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1064 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1065 ULP_PRSR_ACT_DEFAULT);
1067 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1068 ulp_rte_prsr_fld_mask(params, &idx, size,
1069 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1070 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1071 ULP_PRSR_ACT_DEFAULT);
1074 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1075 ulp_rte_prsr_fld_mask(params, &idx, size,
1076 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1077 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1078 ULP_PRSR_ACT_DEFAULT);
1080 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1081 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1082 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1083 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1084 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1087 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1089 /* Update the tunnel offload dest ip offset */
1090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1094 /* Some of the PMD applications may set the protocol field
1095 * in the IPv4 spec but don't set the mask. So, consider
1096 * the mask in the proto value calculation.
1099 proto &= ipv4_mask->hdr.next_proto_id;
1101 /* Update the field protocol hdr bitmap */
1102 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1103 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1104 return BNXT_TF_RC_SUCCESS;
1107 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1109 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1110 struct ulp_rte_parser_params *params)
1112 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1113 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1114 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1115 uint32_t idx = 0, dip_idx = 0;
1117 uint32_t ver_spec = 0, ver_mask = 0;
1118 uint32_t tc_spec = 0, tc_mask = 0;
1119 uint32_t lab_spec = 0, lab_mask = 0;
1121 uint32_t inner_flag = 0;
1124 /* validate there are no 3rd L3 header */
1125 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1127 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1128 return BNXT_TF_RC_ERROR;
1131 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1132 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1133 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1134 return BNXT_TF_RC_ERROR;
1138 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1142 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1143 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1144 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1145 proto = ipv6_spec->hdr.proto;
1149 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1150 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1151 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1153 /* Some of the PMD applications may set the protocol field
1154 * in the IPv6 spec but don't set the mask. So, consider
1155 * the mask in proto value calculation.
1157 proto &= ipv6_mask->hdr.proto;
1160 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1161 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1162 ULP_PRSR_ACT_DEFAULT);
1164 * The TC and flow label field are ignored since OVS is
1165 * setting it for match and it is not supported.
1166 * This is a work around and
1167 * shall be addressed in the future.
1169 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1170 ULP_PRSR_ACT_MASK_IGNORE);
1171 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1172 ULP_PRSR_ACT_MASK_IGNORE);
1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1175 ulp_rte_prsr_fld_mask(params, &idx, size,
1176 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1177 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1178 ULP_PRSR_ACT_DEFAULT);
1180 /* Ignore proto for template matching */
1181 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1182 ulp_rte_prsr_fld_mask(params, &idx, size,
1183 ulp_deference_struct(ipv6_spec, hdr.proto),
1184 ulp_deference_struct(ipv6_mask, hdr.proto),
1185 ULP_PRSR_ACT_MATCH_IGNORE);
1187 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1188 ulp_rte_prsr_fld_mask(params, &idx, size,
1189 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1190 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1191 ULP_PRSR_ACT_DEFAULT);
1193 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1194 ulp_rte_prsr_fld_mask(params, &idx, size,
1195 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1196 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1197 ULP_PRSR_ACT_DEFAULT);
1200 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1201 ulp_rte_prsr_fld_mask(params, &idx, size,
1202 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1203 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1204 ULP_PRSR_ACT_DEFAULT);
1206 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1207 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1208 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1209 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1213 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1215 /* Update the tunnel offload dest ip offset */
1216 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1220 /* Update the field protocol hdr bitmap */
1221 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1222 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1224 return BNXT_TF_RC_SUCCESS;
1227 /* Function to handle the update of proto header based on field values */
1229 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1232 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1233 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1234 BNXT_ULP_HDR_BIT_T_VXLAN);
1235 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1238 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1239 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1240 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1241 BNXT_ULP_HDR_BIT_T_GRE))
1242 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1245 /* Function to handle the parsing of RTE Flow item UDP Header. */
1247 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1248 struct ulp_rte_parser_params *params)
1250 const struct rte_flow_item_udp *udp_spec = item->spec;
1251 const struct rte_flow_item_udp *udp_mask = item->mask;
1252 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1255 uint16_t dport = 0, sport = 0;
1258 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1260 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1261 return BNXT_TF_RC_ERROR;
1265 sport = udp_spec->hdr.src_port;
1266 dport = udp_spec->hdr.dst_port;
1269 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1270 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1271 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1272 return BNXT_TF_RC_ERROR;
1276 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1279 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1280 ulp_rte_prsr_fld_mask(params, &idx, size,
1281 ulp_deference_struct(udp_spec, hdr.src_port),
1282 ulp_deference_struct(udp_mask, hdr.src_port),
1283 ULP_PRSR_ACT_DEFAULT);
1285 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1286 ulp_rte_prsr_fld_mask(params, &idx, size,
1287 ulp_deference_struct(udp_spec, hdr.dst_port),
1288 ulp_deference_struct(udp_mask, hdr.dst_port),
1289 ULP_PRSR_ACT_DEFAULT);
1291 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1292 ulp_rte_prsr_fld_mask(params, &idx, size,
1293 ulp_deference_struct(udp_spec, hdr.dgram_len),
1294 ulp_deference_struct(udp_mask, hdr.dgram_len),
1295 ULP_PRSR_ACT_DEFAULT);
1297 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1298 ulp_rte_prsr_fld_mask(params, &idx, size,
1299 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1300 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1301 ULP_PRSR_ACT_DEFAULT);
1303 /* Set the udp header bitmap and computed l4 header bitmaps */
1304 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1305 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1306 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1308 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1309 (uint32_t)rte_be_to_cpu_16(sport));
1310 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1311 (uint32_t)rte_be_to_cpu_16(dport));
1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1314 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1316 if (udp_mask && udp_mask->hdr.src_port)
1317 ULP_COMP_FLD_IDX_WR(params,
1318 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1320 if (udp_mask && udp_mask->hdr.dst_port)
1321 ULP_COMP_FLD_IDX_WR(params,
1322 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1325 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1326 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1328 (uint32_t)rte_be_to_cpu_16(sport));
1329 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1330 (uint32_t)rte_be_to_cpu_16(dport));
1331 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1335 if (udp_mask && udp_mask->hdr.src_port)
1336 ULP_COMP_FLD_IDX_WR(params,
1337 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1339 if (udp_mask && udp_mask->hdr.dst_port)
1340 ULP_COMP_FLD_IDX_WR(params,
1341 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1344 /* Update the field protocol hdr bitmap */
1345 ulp_rte_l4_proto_type_update(params, dport);
1347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1348 return BNXT_TF_RC_SUCCESS;
1351 /* Function to handle the parsing of RTE Flow item TCP Header. */
1353 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1354 struct ulp_rte_parser_params *params)
1356 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1357 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1358 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1360 uint16_t dport = 0, sport = 0;
1364 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1366 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1367 return BNXT_TF_RC_ERROR;
1371 sport = tcp_spec->hdr.src_port;
1372 dport = tcp_spec->hdr.dst_port;
1375 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1376 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1377 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1378 return BNXT_TF_RC_ERROR;
1382 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1385 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1386 ulp_rte_prsr_fld_mask(params, &idx, size,
1387 ulp_deference_struct(tcp_spec, hdr.src_port),
1388 ulp_deference_struct(tcp_mask, hdr.src_port),
1389 ULP_PRSR_ACT_DEFAULT);
1391 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1392 ulp_rte_prsr_fld_mask(params, &idx, size,
1393 ulp_deference_struct(tcp_spec, hdr.dst_port),
1394 ulp_deference_struct(tcp_mask, hdr.dst_port),
1395 ULP_PRSR_ACT_DEFAULT);
1397 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1398 ulp_rte_prsr_fld_mask(params, &idx, size,
1399 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1400 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1401 ULP_PRSR_ACT_DEFAULT);
1403 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1404 ulp_rte_prsr_fld_mask(params, &idx, size,
1405 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1406 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1407 ULP_PRSR_ACT_DEFAULT);
1409 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1410 ulp_rte_prsr_fld_mask(params, &idx, size,
1411 ulp_deference_struct(tcp_spec, hdr.data_off),
1412 ulp_deference_struct(tcp_mask, hdr.data_off),
1413 ULP_PRSR_ACT_DEFAULT);
1415 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1416 ulp_rte_prsr_fld_mask(params, &idx, size,
1417 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1418 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1419 ULP_PRSR_ACT_DEFAULT);
1421 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1422 ulp_rte_prsr_fld_mask(params, &idx, size,
1423 ulp_deference_struct(tcp_spec, hdr.rx_win),
1424 ulp_deference_struct(tcp_mask, hdr.rx_win),
1425 ULP_PRSR_ACT_DEFAULT);
1427 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1428 ulp_rte_prsr_fld_mask(params, &idx, size,
1429 ulp_deference_struct(tcp_spec, hdr.cksum),
1430 ulp_deference_struct(tcp_mask, hdr.cksum),
1431 ULP_PRSR_ACT_DEFAULT);
1433 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1434 ulp_rte_prsr_fld_mask(params, &idx, size,
1435 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1436 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1437 ULP_PRSR_ACT_DEFAULT);
1439 /* Set the udp header bitmap and computed l4 header bitmaps */
1440 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1441 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1442 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1443 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1444 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1445 (uint32_t)rte_be_to_cpu_16(sport));
1446 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1447 (uint32_t)rte_be_to_cpu_16(dport));
1448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1450 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1452 if (tcp_mask && tcp_mask->hdr.src_port)
1453 ULP_COMP_FLD_IDX_WR(params,
1454 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1456 if (tcp_mask && tcp_mask->hdr.dst_port)
1457 ULP_COMP_FLD_IDX_WR(params,
1458 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1461 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1462 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1464 (uint32_t)rte_be_to_cpu_16(sport));
1465 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1466 (uint32_t)rte_be_to_cpu_16(dport));
1467 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1469 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1471 if (tcp_mask && tcp_mask->hdr.src_port)
1472 ULP_COMP_FLD_IDX_WR(params,
1473 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1475 if (tcp_mask && tcp_mask->hdr.dst_port)
1476 ULP_COMP_FLD_IDX_WR(params,
1477 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1480 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1481 return BNXT_TF_RC_SUCCESS;
1484 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1486 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1487 struct ulp_rte_parser_params *params)
1489 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1490 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1491 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1495 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1496 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1497 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1498 return BNXT_TF_RC_ERROR;
1502 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1505 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1506 ulp_rte_prsr_fld_mask(params, &idx, size,
1507 ulp_deference_struct(vxlan_spec, flags),
1508 ulp_deference_struct(vxlan_mask, flags),
1509 ULP_PRSR_ACT_DEFAULT);
1511 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1512 ulp_rte_prsr_fld_mask(params, &idx, size,
1513 ulp_deference_struct(vxlan_spec, rsvd0),
1514 ulp_deference_struct(vxlan_mask, rsvd0),
1515 ULP_PRSR_ACT_DEFAULT);
1517 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1518 ulp_rte_prsr_fld_mask(params, &idx, size,
1519 ulp_deference_struct(vxlan_spec, vni),
1520 ulp_deference_struct(vxlan_mask, vni),
1521 ULP_PRSR_ACT_DEFAULT);
1523 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1524 ulp_rte_prsr_fld_mask(params, &idx, size,
1525 ulp_deference_struct(vxlan_spec, rsvd1),
1526 ulp_deference_struct(vxlan_mask, rsvd1),
1527 ULP_PRSR_ACT_DEFAULT);
1529 /* Update the hdr_bitmap with vxlan */
1530 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1531 ulp_rte_l4_proto_type_update(params, 0);
1532 return BNXT_TF_RC_SUCCESS;
1535 /* Function to handle the parsing of RTE Flow item GRE Header. */
1537 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1538 struct ulp_rte_parser_params *params)
1540 const struct rte_flow_item_gre *gre_spec = item->spec;
1541 const struct rte_flow_item_gre *gre_mask = item->mask;
1542 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1546 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1547 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1548 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1549 return BNXT_TF_RC_ERROR;
1552 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1553 ulp_rte_prsr_fld_mask(params, &idx, size,
1554 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1555 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1556 ULP_PRSR_ACT_DEFAULT);
1558 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1559 ulp_rte_prsr_fld_mask(params, &idx, size,
1560 ulp_deference_struct(gre_spec, protocol),
1561 ulp_deference_struct(gre_mask, protocol),
1562 ULP_PRSR_ACT_DEFAULT);
1564 /* Update the hdr_bitmap with GRE */
1565 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1566 ulp_rte_l4_proto_type_update(params, 0);
1567 return BNXT_TF_RC_SUCCESS;
1570 /* Function to handle the parsing of RTE Flow item ANY. */
1572 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1573 struct ulp_rte_parser_params *params __rte_unused)
1575 return BNXT_TF_RC_SUCCESS;
1578 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1580 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1581 struct ulp_rte_parser_params *params)
1583 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1584 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1585 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1589 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1590 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1591 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1592 return BNXT_TF_RC_ERROR;
1595 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1596 ulp_rte_prsr_fld_mask(params, &idx, size,
1597 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1598 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1599 ULP_PRSR_ACT_DEFAULT);
1601 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1602 ulp_rte_prsr_fld_mask(params, &idx, size,
1603 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1604 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1605 ULP_PRSR_ACT_DEFAULT);
1607 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1608 ulp_rte_prsr_fld_mask(params, &idx, size,
1609 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1610 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1611 ULP_PRSR_ACT_DEFAULT);
1613 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1614 ulp_rte_prsr_fld_mask(params, &idx, size,
1615 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1616 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1617 ULP_PRSR_ACT_DEFAULT);
1619 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1620 ulp_rte_prsr_fld_mask(params, &idx, size,
1621 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1622 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1623 ULP_PRSR_ACT_DEFAULT);
1625 /* Update the hdr_bitmap with ICMP */
1626 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1627 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1629 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1630 return BNXT_TF_RC_SUCCESS;
1633 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1635 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1636 struct ulp_rte_parser_params *params)
1638 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1639 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1640 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1644 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1645 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1646 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1647 return BNXT_TF_RC_ERROR;
1650 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1651 ulp_rte_prsr_fld_mask(params, &idx, size,
1652 ulp_deference_struct(icmp_spec, type),
1653 ulp_deference_struct(icmp_mask, type),
1654 ULP_PRSR_ACT_DEFAULT);
1656 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1657 ulp_rte_prsr_fld_mask(params, &idx, size,
1658 ulp_deference_struct(icmp_spec, code),
1659 ulp_deference_struct(icmp_mask, code),
1660 ULP_PRSR_ACT_DEFAULT);
1662 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1663 ulp_rte_prsr_fld_mask(params, &idx, size,
1664 ulp_deference_struct(icmp_spec, checksum),
1665 ulp_deference_struct(icmp_mask, checksum),
1666 ULP_PRSR_ACT_DEFAULT);
1668 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1669 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1670 return BNXT_TF_RC_ERROR;
1673 /* Update the hdr_bitmap with ICMP */
1674 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1675 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1677 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1678 return BNXT_TF_RC_SUCCESS;
1681 /* Function to handle the parsing of RTE Flow item void Header */
1683 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1684 struct ulp_rte_parser_params *params __rte_unused)
1686 return BNXT_TF_RC_SUCCESS;
1689 /* Function to handle the parsing of RTE Flow action void Header. */
1691 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1692 struct ulp_rte_parser_params *params __rte_unused)
1694 return BNXT_TF_RC_SUCCESS;
1697 /* Function to handle the parsing of RTE Flow action Mark Header. */
1699 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1700 struct ulp_rte_parser_params *param)
1702 const struct rte_flow_action_mark *mark;
1703 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1706 mark = action_item->conf;
1708 mark_id = tfp_cpu_to_be_32(mark->id);
1709 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1710 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1712 /* Update the hdr_bitmap with vxlan */
1713 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1714 return BNXT_TF_RC_SUCCESS;
1716 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1717 return BNXT_TF_RC_ERROR;
1720 /* Function to handle the parsing of RTE Flow action RSS Header. */
1722 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1723 struct ulp_rte_parser_params *param)
1725 const struct rte_flow_action_rss *rss;
1726 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1728 if (action_item == NULL || action_item->conf == NULL) {
1729 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1730 return BNXT_TF_RC_ERROR;
1733 rss = action_item->conf;
1734 /* Copy the rss into the specific action properties */
1735 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1736 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1737 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1738 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1739 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1740 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1742 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1743 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1744 return BNXT_TF_RC_ERROR;
1746 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1749 /* set the RSS action header bit */
1750 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1752 return BNXT_TF_RC_SUCCESS;
1755 /* Function to handle the parsing of RTE Flow item eth Header. */
1757 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1758 const struct rte_flow_item_eth *eth_spec)
1760 struct ulp_rte_hdr_field *field;
1763 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1764 size = sizeof(eth_spec->dst.addr_bytes);
1765 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1767 size = sizeof(eth_spec->src.addr_bytes);
1768 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1770 size = sizeof(eth_spec->type);
1771 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1773 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1776 /* Function to handle the parsing of RTE Flow item vlan Header. */
1778 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1779 const struct rte_flow_item_vlan *vlan_spec,
1782 struct ulp_rte_hdr_field *field;
1786 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1787 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1788 BNXT_ULP_HDR_BIT_OO_VLAN);
1790 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1791 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1792 BNXT_ULP_HDR_BIT_OI_VLAN);
1795 size = sizeof(vlan_spec->tci);
1796 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1798 size = sizeof(vlan_spec->inner_type);
1799 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1802 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1804 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1805 const struct rte_flow_item_ipv4 *ip)
1807 struct ulp_rte_hdr_field *field;
1811 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1812 size = sizeof(ip->hdr.version_ihl);
1813 if (!ip->hdr.version_ihl)
1814 val8 = RTE_IPV4_VHL_DEF;
1816 val8 = ip->hdr.version_ihl;
1817 field = ulp_rte_parser_fld_copy(field, &val8, size);
1819 size = sizeof(ip->hdr.type_of_service);
1820 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1822 size = sizeof(ip->hdr.packet_id);
1823 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1825 size = sizeof(ip->hdr.fragment_offset);
1826 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1828 size = sizeof(ip->hdr.time_to_live);
1829 if (!ip->hdr.time_to_live)
1830 val8 = BNXT_ULP_DEFAULT_TTL;
1832 val8 = ip->hdr.time_to_live;
1833 field = ulp_rte_parser_fld_copy(field, &val8, size);
1835 size = sizeof(ip->hdr.next_proto_id);
1836 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1838 size = sizeof(ip->hdr.src_addr);
1839 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1841 size = sizeof(ip->hdr.dst_addr);
1842 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1844 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1847 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1849 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1850 const struct rte_flow_item_ipv6 *ip)
1852 struct ulp_rte_hdr_field *field;
1857 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1858 size = sizeof(ip->hdr.vtc_flow);
1859 if (!ip->hdr.vtc_flow)
1860 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1862 val32 = ip->hdr.vtc_flow;
1863 field = ulp_rte_parser_fld_copy(field, &val32, size);
1865 size = sizeof(ip->hdr.proto);
1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1868 size = sizeof(ip->hdr.hop_limits);
1869 if (!ip->hdr.hop_limits)
1870 val8 = BNXT_ULP_DEFAULT_TTL;
1872 val8 = ip->hdr.hop_limits;
1873 field = ulp_rte_parser_fld_copy(field, &val8, size);
1875 size = sizeof(ip->hdr.src_addr);
1876 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1878 size = sizeof(ip->hdr.dst_addr);
1879 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1881 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1884 /* Function to handle the parsing of RTE Flow item UDP Header. */
1886 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1887 const struct rte_flow_item_udp *udp_spec)
1889 struct ulp_rte_hdr_field *field;
1891 uint8_t type = IPPROTO_UDP;
1893 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1894 size = sizeof(udp_spec->hdr.src_port);
1895 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1897 size = sizeof(udp_spec->hdr.dst_port);
1898 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1900 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1902 /* Update thhe ip header protocol */
1903 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1904 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1905 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1906 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1909 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1911 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1912 struct rte_flow_item_vxlan *vxlan_spec)
1914 struct ulp_rte_hdr_field *field;
1917 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1918 size = sizeof(vxlan_spec->flags);
1919 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1921 size = sizeof(vxlan_spec->rsvd0);
1922 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1924 size = sizeof(vxlan_spec->vni);
1925 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1927 size = sizeof(vxlan_spec->rsvd1);
1928 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1930 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1933 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1935 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1936 struct ulp_rte_parser_params *params)
1938 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1939 const struct rte_flow_item *item;
1940 const struct rte_flow_item_ipv4 *ipv4_spec;
1941 const struct rte_flow_item_ipv6 *ipv6_spec;
1942 struct rte_flow_item_vxlan vxlan_spec;
1943 uint32_t vlan_num = 0, vlan_size = 0;
1944 uint32_t ip_size = 0, ip_type = 0;
1945 uint32_t vxlan_size = 0;
1946 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1947 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1949 vxlan_encap = action_item->conf;
1951 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1952 return BNXT_TF_RC_ERROR;
1955 item = vxlan_encap->definition;
1957 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1958 return BNXT_TF_RC_ERROR;
1961 if (!ulp_rte_item_skip_void(&item, 0))
1962 return BNXT_TF_RC_ERROR;
1964 /* must have ethernet header */
1965 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1966 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1967 return BNXT_TF_RC_ERROR;
1970 /* Parse the ethernet header */
1972 ulp_rte_enc_eth_hdr_handler(params, item->spec);
1974 /* Goto the next item */
1975 if (!ulp_rte_item_skip_void(&item, 1))
1976 return BNXT_TF_RC_ERROR;
1978 /* May have vlan header */
1979 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1982 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1984 if (!ulp_rte_item_skip_void(&item, 1))
1985 return BNXT_TF_RC_ERROR;
1988 /* may have two vlan headers */
1989 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1992 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1994 if (!ulp_rte_item_skip_void(&item, 1))
1995 return BNXT_TF_RC_ERROR;
1998 /* Update the vlan count and size of more than one */
2000 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2001 vlan_num = tfp_cpu_to_be_32(vlan_num);
2002 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2005 vlan_size = tfp_cpu_to_be_32(vlan_size);
2006 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2011 /* L3 must be IPv4, IPv6 */
2012 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2013 ipv4_spec = item->spec;
2014 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2016 /* Update the ip size details */
2017 ip_size = tfp_cpu_to_be_32(ip_size);
2018 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2019 &ip_size, sizeof(uint32_t));
2021 /* update the ip type */
2022 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2023 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2024 &ip_type, sizeof(uint32_t));
2026 /* update the computed field to notify it is ipv4 header */
2027 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2030 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2032 if (!ulp_rte_item_skip_void(&item, 1))
2033 return BNXT_TF_RC_ERROR;
2034 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2035 ipv6_spec = item->spec;
2036 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2038 /* Update the ip size details */
2039 ip_size = tfp_cpu_to_be_32(ip_size);
2040 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2041 &ip_size, sizeof(uint32_t));
2043 /* update the ip type */
2044 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2045 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2046 &ip_type, sizeof(uint32_t));
2048 /* update the computed field to notify it is ipv6 header */
2049 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2052 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2054 if (!ulp_rte_item_skip_void(&item, 1))
2055 return BNXT_TF_RC_ERROR;
2057 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2058 return BNXT_TF_RC_ERROR;
2062 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2063 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2064 return BNXT_TF_RC_ERROR;
2067 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2069 if (!ulp_rte_item_skip_void(&item, 1))
2070 return BNXT_TF_RC_ERROR;
2073 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2074 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2075 return BNXT_TF_RC_ERROR;
2077 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2078 /* copy the vxlan details */
2079 memcpy(&vxlan_spec, item->spec, vxlan_size);
2080 vxlan_spec.flags = 0x08;
2081 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2082 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2083 &vxlan_size, sizeof(uint32_t));
2085 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2087 /* update the hdr_bitmap with vxlan */
2088 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2089 return BNXT_TF_RC_SUCCESS;
2092 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2094 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2096 struct ulp_rte_parser_params *params)
2098 /* update the hdr_bitmap with vxlan */
2099 ULP_BITMAP_SET(params->act_bitmap.bits,
2100 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2101 /* Update computational field with tunnel decap info */
2102 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2103 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2104 return BNXT_TF_RC_SUCCESS;
2107 /* Function to handle the parsing of RTE Flow action drop Header. */
2109 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2110 struct ulp_rte_parser_params *params)
2112 /* Update the hdr_bitmap with drop */
2113 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2114 return BNXT_TF_RC_SUCCESS;
2117 /* Function to handle the parsing of RTE Flow action count. */
2119 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2120 struct ulp_rte_parser_params *params)
2122 const struct rte_flow_action_count *act_count;
2123 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2125 act_count = action_item->conf;
2127 if (act_count->shared) {
2129 "Parse Error:Shared count not supported\n");
2130 return BNXT_TF_RC_PARSE_ERR;
2132 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2134 BNXT_ULP_ACT_PROP_SZ_COUNT);
2137 /* Update the hdr_bitmap with count */
2138 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2139 return BNXT_TF_RC_SUCCESS;
2142 /* Function to handle the parsing of action ports. */
2144 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2147 enum bnxt_ulp_direction_type dir;
2150 struct ulp_rte_act_prop *act = ¶m->act_prop;
2151 enum bnxt_ulp_intf_type port_type;
2154 /* Get the direction */
2155 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2156 if (dir == BNXT_ULP_DIR_EGRESS) {
2157 /* For egress direction, fill vport */
2158 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2159 return BNXT_TF_RC_ERROR;
2162 pid = rte_cpu_to_be_32(pid);
2163 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2164 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2166 /* For ingress direction, fill vnic */
2167 port_type = ULP_COMP_FLD_IDX_RD(param,
2168 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2169 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2170 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2172 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2174 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2176 return BNXT_TF_RC_ERROR;
2179 pid = rte_cpu_to_be_32(pid);
2180 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2181 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2184 /* Update the action port set bit */
2185 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2186 return BNXT_TF_RC_SUCCESS;
2189 /* Function to handle the parsing of RTE Flow action PF. */
2191 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2192 struct ulp_rte_parser_params *params)
2196 enum bnxt_ulp_intf_type intf_type;
2198 /* Get the port id of the current device */
2199 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2201 /* Get the port db ifindex */
2202 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2204 BNXT_TF_DBG(ERR, "Invalid port id\n");
2205 return BNXT_TF_RC_ERROR;
2208 /* Check the port is PF port */
2209 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2210 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2211 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2212 return BNXT_TF_RC_ERROR;
2214 /* Update the action properties */
2215 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2216 return ulp_rte_parser_act_port_set(params, ifindex);
2219 /* Function to handle the parsing of RTE Flow action VF. */
2221 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2222 struct ulp_rte_parser_params *params)
2224 const struct rte_flow_action_vf *vf_action;
2225 enum bnxt_ulp_intf_type intf_type;
2229 vf_action = action_item->conf;
2231 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2232 return BNXT_TF_RC_PARSE_ERR;
2235 if (vf_action->original) {
2236 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2237 return BNXT_TF_RC_PARSE_ERR;
2240 bp = bnxt_pmd_get_bp(params->port_id);
2242 BNXT_TF_DBG(ERR, "Invalid bp\n");
2243 return BNXT_TF_RC_ERROR;
2246 /* vf_action->id is a logical number which in this case is an
2247 * offset from the first VF. So, to get the absolute VF id, the
2248 * offset must be added to the absolute first vf id of that port.
2250 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2254 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2255 return BNXT_TF_RC_ERROR;
2257 /* Check the port is VF port */
2258 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2259 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2260 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2261 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2262 return BNXT_TF_RC_ERROR;
2265 /* Update the action properties */
2266 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2267 return ulp_rte_parser_act_port_set(params, ifindex);
2270 /* Function to handle the parsing of RTE Flow action port_id. */
2272 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2273 struct ulp_rte_parser_params *param)
2275 const struct rte_flow_action_port_id *port_id = act_item->conf;
2277 enum bnxt_ulp_intf_type intf_type;
2281 "ParseErr: Invalid Argument\n");
2282 return BNXT_TF_RC_PARSE_ERR;
2284 if (port_id->original) {
2286 "ParseErr:Portid Original not supported\n");
2287 return BNXT_TF_RC_PARSE_ERR;
2290 /* Get the port db ifindex */
2291 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2293 BNXT_TF_DBG(ERR, "Invalid port id\n");
2294 return BNXT_TF_RC_ERROR;
2297 /* Get the intf type */
2298 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2300 BNXT_TF_DBG(ERR, "Invalid port type\n");
2301 return BNXT_TF_RC_ERROR;
2304 /* Set the action port */
2305 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2306 return ulp_rte_parser_act_port_set(param, ifindex);
2309 /* Function to handle the parsing of RTE Flow action phy_port. */
2311 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2312 struct ulp_rte_parser_params *prm)
2314 const struct rte_flow_action_phy_port *phy_port;
2318 enum bnxt_ulp_direction_type dir;
2320 phy_port = action_item->conf;
2323 "ParseErr: Invalid Argument\n");
2324 return BNXT_TF_RC_PARSE_ERR;
2327 if (phy_port->original) {
2329 "Parse Err:Port Original not supported\n");
2330 return BNXT_TF_RC_PARSE_ERR;
2332 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2333 if (dir != BNXT_ULP_DIR_EGRESS) {
2335 "Parse Err:Phy ports are valid only for egress\n");
2336 return BNXT_TF_RC_PARSE_ERR;
2338 /* Get the physical port details from port db */
2339 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2342 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2347 pid = rte_cpu_to_be_32(pid);
2348 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2349 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2351 /* Update the action port set bit */
2352 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2353 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2354 BNXT_ULP_INTF_TYPE_PHY_PORT);
2355 return BNXT_TF_RC_SUCCESS;
2358 /* Function to handle the parsing of RTE Flow action pop vlan. */
2360 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2361 struct ulp_rte_parser_params *params)
2363 /* Update the act_bitmap with pop */
2364 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2365 return BNXT_TF_RC_SUCCESS;
2368 /* Function to handle the parsing of RTE Flow action push vlan. */
2370 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2371 struct ulp_rte_parser_params *params)
2373 const struct rte_flow_action_of_push_vlan *push_vlan;
2375 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2377 push_vlan = action_item->conf;
2379 ethertype = push_vlan->ethertype;
2380 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2382 "Parse Err: Ethertype not supported\n");
2383 return BNXT_TF_RC_PARSE_ERR;
2385 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2386 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2387 /* Update the hdr_bitmap with push vlan */
2388 ULP_BITMAP_SET(params->act_bitmap.bits,
2389 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2390 return BNXT_TF_RC_SUCCESS;
2392 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2393 return BNXT_TF_RC_ERROR;
2396 /* Function to handle the parsing of RTE Flow action set vlan id. */
2398 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2399 struct ulp_rte_parser_params *params)
2401 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2403 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2405 vlan_vid = action_item->conf;
2406 if (vlan_vid && vlan_vid->vlan_vid) {
2407 vid = vlan_vid->vlan_vid;
2408 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2409 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2410 /* Update the hdr_bitmap with vlan vid */
2411 ULP_BITMAP_SET(params->act_bitmap.bits,
2412 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2413 return BNXT_TF_RC_SUCCESS;
2415 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2416 return BNXT_TF_RC_ERROR;
2419 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2421 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2422 struct ulp_rte_parser_params *params)
2424 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2426 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2428 vlan_pcp = action_item->conf;
2430 pcp = vlan_pcp->vlan_pcp;
2431 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2432 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2433 /* Update the hdr_bitmap with vlan vid */
2434 ULP_BITMAP_SET(params->act_bitmap.bits,
2435 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2436 return BNXT_TF_RC_SUCCESS;
2438 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2439 return BNXT_TF_RC_ERROR;
2442 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2444 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2445 struct ulp_rte_parser_params *params)
2447 const struct rte_flow_action_set_ipv4 *set_ipv4;
2448 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2450 set_ipv4 = action_item->conf;
2452 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2453 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2454 /* Update the hdr_bitmap with set ipv4 src */
2455 ULP_BITMAP_SET(params->act_bitmap.bits,
2456 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2457 return BNXT_TF_RC_SUCCESS;
2459 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2460 return BNXT_TF_RC_ERROR;
2463 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2465 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2466 struct ulp_rte_parser_params *params)
2468 const struct rte_flow_action_set_ipv4 *set_ipv4;
2469 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2471 set_ipv4 = action_item->conf;
2473 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2474 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2475 /* Update the hdr_bitmap with set ipv4 dst */
2476 ULP_BITMAP_SET(params->act_bitmap.bits,
2477 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2478 return BNXT_TF_RC_SUCCESS;
2480 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2481 return BNXT_TF_RC_ERROR;
2484 /* Function to handle the parsing of RTE Flow action set tp src.*/
2486 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2487 struct ulp_rte_parser_params *params)
2489 const struct rte_flow_action_set_tp *set_tp;
2490 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2492 set_tp = action_item->conf;
2494 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2495 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2496 /* Update the hdr_bitmap with set tp src */
2497 ULP_BITMAP_SET(params->act_bitmap.bits,
2498 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2499 return BNXT_TF_RC_SUCCESS;
2502 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2503 return BNXT_TF_RC_ERROR;
2506 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2508 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2509 struct ulp_rte_parser_params *params)
2511 const struct rte_flow_action_set_tp *set_tp;
2512 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2514 set_tp = action_item->conf;
2516 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2517 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2518 /* Update the hdr_bitmap with set tp dst */
2519 ULP_BITMAP_SET(params->act_bitmap.bits,
2520 BNXT_ULP_ACT_BIT_SET_TP_DST);
2521 return BNXT_TF_RC_SUCCESS;
2524 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2525 return BNXT_TF_RC_ERROR;
2528 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2530 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2531 struct ulp_rte_parser_params *params)
2533 /* Update the act_bitmap with dec ttl */
2534 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2535 return BNXT_TF_RC_SUCCESS;
2538 /* Function to handle the parsing of RTE Flow action JUMP */
2540 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2541 struct ulp_rte_parser_params *params)
2543 /* Update the act_bitmap with dec ttl */
2544 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2545 return BNXT_TF_RC_SUCCESS;
2549 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2550 struct ulp_rte_parser_params *params)
2552 const struct rte_flow_action_sample *sample;
2555 sample = action_item->conf;
2557 /* if SAMPLE bit is set it means this sample action is nested within the
2558 * actions of another sample action; this is not allowed
2560 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2561 BNXT_ULP_ACT_BIT_SAMPLE))
2562 return BNXT_TF_RC_ERROR;
2564 /* a sample action is only allowed as a shared action */
2565 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2566 BNXT_ULP_ACT_BIT_SHARED))
2567 return BNXT_TF_RC_ERROR;
2569 /* only a ratio of 1 i.e. 100% is supported */
2570 if (sample->ratio != 1)
2571 return BNXT_TF_RC_ERROR;
2573 if (!sample->actions)
2574 return BNXT_TF_RC_ERROR;
2576 /* parse the nested actions for a sample action */
2577 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2578 if (ret == BNXT_TF_RC_SUCCESS)
2579 /* Update the act_bitmap with sample */
2580 ULP_BITMAP_SET(params->act_bitmap.bits,
2581 BNXT_ULP_ACT_BIT_SAMPLE);
2586 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2588 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2589 struct ulp_rte_parser_params *params)
2591 /* Set the F1 flow header bit */
2592 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2593 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2596 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2598 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2599 struct ulp_rte_parser_params *params)
2602 /* Set the F2 flow header bit */
2603 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2604 return ulp_rte_vxlan_decap_act_handler(NULL, params);