1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (uint32_t)
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
406 enum bnxt_ulp_direction_type dir;
407 struct ulp_rte_hdr_field *hdr_field;
408 enum bnxt_ulp_svif_type svif_type;
409 enum bnxt_ulp_intf_type port_type;
411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412 BNXT_ULP_INVALID_SVIF_VAL) {
414 "SVIF already set,multiple source not support'd\n");
415 return BNXT_TF_RC_ERROR;
418 /* Get port type details */
419 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
420 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
421 BNXT_TF_DBG(ERR, "Invalid port type\n");
422 return BNXT_TF_RC_ERROR;
425 /* Update the match port type */
426 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
428 /* compute the direction */
429 bnxt_ulp_rte_parser_direction_compute(params);
431 /* Get the computed direction */
432 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
433 if (dir == BNXT_ULP_DIR_INGRESS) {
434 svif_type = BNXT_ULP_PHY_PORT_SVIF;
436 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
437 svif_type = BNXT_ULP_VF_FUNC_SVIF;
439 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
441 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
443 svif = rte_cpu_to_be_16(svif);
444 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
445 memcpy(hdr_field->spec, &svif, sizeof(svif));
446 memcpy(hdr_field->mask, &mask, sizeof(mask));
447 hdr_field->size = sizeof(svif);
448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
449 rte_be_to_cpu_16(svif));
450 return BNXT_TF_RC_SUCCESS;
453 /* Function to handle the parsing of the RTE port id */
455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
457 uint16_t port_id = 0;
458 uint16_t svif_mask = 0xFFFF;
460 int32_t rc = BNXT_TF_RC_ERROR;
462 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
463 BNXT_ULP_INVALID_SVIF_VAL)
464 return BNXT_TF_RC_SUCCESS;
466 /* SVIF not set. So get the port id */
467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
469 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
472 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
476 /* Update the SVIF details */
477 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
481 /* Function to handle the implicit action port id */
483 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
485 struct rte_flow_action action_item = {0};
486 struct rte_flow_action_port_id port_id = {0};
488 /* Read the action port set bit */
489 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
490 /* Already set, so just exit */
491 return BNXT_TF_RC_SUCCESS;
493 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
494 action_item.conf = &port_id;
496 /* Update the action port based on incoming port */
497 ulp_rte_port_id_act_handler(&action_item, params);
499 /* Reset the action port set bit */
500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
501 return BNXT_TF_RC_SUCCESS;
504 /* Function to handle the parsing of RTE Flow item PF Header. */
506 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
507 struct ulp_rte_parser_params *params)
509 uint16_t port_id = 0;
510 uint16_t svif_mask = 0xFFFF;
513 /* Get the implicit port id */
514 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
516 /* perform the conversion from dpdk port to bnxt ifindex */
517 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
520 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521 return BNXT_TF_RC_ERROR;
524 /* Update the SVIF details */
525 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
528 /* Function to handle the parsing of RTE Flow item VF Header. */
530 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_vf *vf_spec = item->spec;
534 const struct rte_flow_item_vf *vf_mask = item->mask;
537 int32_t rc = BNXT_TF_RC_PARSE_ERR;
539 /* Get VF rte_flow_item for Port details */
541 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
545 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
550 /* perform the conversion from VF Func id to bnxt ifindex */
551 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
554 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
557 /* Update the SVIF details */
558 return ulp_rte_parser_svif_set(params, ifindex, mask);
561 /* Function to handle the parsing of RTE Flow item port id Header. */
563 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
564 struct ulp_rte_parser_params *params)
566 const struct rte_flow_item_port_id *port_spec = item->spec;
567 const struct rte_flow_item_port_id *port_mask = item->mask;
569 int32_t rc = BNXT_TF_RC_PARSE_ERR;
573 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
577 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
580 mask = port_mask->id;
582 /* perform the conversion from dpdk port to bnxt ifindex */
583 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
586 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
589 /* Update the SVIF details */
590 return ulp_rte_parser_svif_set(params, ifindex, mask);
593 /* Function to handle the parsing of RTE Flow item phy port Header. */
595 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
596 struct ulp_rte_parser_params *params)
598 const struct rte_flow_item_phy_port *port_spec = item->spec;
599 const struct rte_flow_item_phy_port *port_mask = item->mask;
601 int32_t rc = BNXT_TF_RC_ERROR;
603 enum bnxt_ulp_direction_type dir;
604 struct ulp_rte_hdr_field *hdr_field;
606 /* Copy the rte_flow_item for phy port into hdr_field */
608 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
612 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
615 mask = port_mask->index;
617 /* Update the match port type */
618 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
619 BNXT_ULP_INTF_TYPE_PHY_PORT);
621 /* Compute the Hw direction */
622 bnxt_ulp_rte_parser_direction_compute(params);
624 /* Direction validation */
625 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
626 if (dir == BNXT_ULP_DIR_EGRESS) {
628 "Parse Err:Phy ports are valid only for ingress\n");
629 return BNXT_TF_RC_PARSE_ERR;
632 /* Get the physical port details from port db */
633 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
636 BNXT_TF_DBG(ERR, "Failed to get port details\n");
637 return BNXT_TF_RC_PARSE_ERR;
640 /* Update the SVIF details */
641 svif = rte_cpu_to_be_16(svif);
642 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
643 memcpy(hdr_field->spec, &svif, sizeof(svif));
644 memcpy(hdr_field->mask, &mask, sizeof(mask));
645 hdr_field->size = sizeof(svif);
646 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
647 rte_be_to_cpu_16(svif));
648 return BNXT_TF_RC_SUCCESS;
651 /* Function to handle the update of proto header based on field values */
653 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
654 uint16_t type, uint32_t in_flag)
656 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
658 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
659 BNXT_ULP_HDR_BIT_I_IPV4);
660 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
662 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
663 BNXT_ULP_HDR_BIT_O_IPV4);
664 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
666 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
668 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
669 BNXT_ULP_HDR_BIT_I_IPV6);
670 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
672 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
673 BNXT_ULP_HDR_BIT_O_IPV6);
674 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
679 /* Internal Function to identify broadcast or multicast packets */
681 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
683 if (rte_is_multicast_ether_addr(eth_addr) ||
684 rte_is_broadcast_ether_addr(eth_addr)) {
686 "No support for bcast or mcast addr offload\n");
692 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
694 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
695 struct ulp_rte_parser_params *params)
697 const struct rte_flow_item_eth *eth_spec = item->spec;
698 const struct rte_flow_item_eth *eth_mask = item->mask;
699 uint32_t idx = 0, dmac_idx = 0;
701 uint16_t eth_type = 0;
702 uint32_t inner_flag = 0;
704 /* Perform validations */
706 /* Todo: work around to avoid multicast and broadcast addr */
707 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
708 return BNXT_TF_RC_PARSE_ERR;
710 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
711 return BNXT_TF_RC_PARSE_ERR;
713 eth_type = eth_spec->type;
716 if (ulp_rte_prsr_fld_size_validate(params, &idx,
717 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
718 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
719 return BNXT_TF_RC_ERROR;
722 * Copy the rte_flow_item for eth into hdr_field using ethernet
726 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
727 ulp_rte_prsr_fld_mask(params, &idx, size,
728 ulp_deference_struct(eth_spec, dst.addr_bytes),
729 ulp_deference_struct(eth_mask, dst.addr_bytes),
730 ULP_PRSR_ACT_DEFAULT);
732 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
733 ulp_rte_prsr_fld_mask(params, &idx, size,
734 ulp_deference_struct(eth_spec, src.addr_bytes),
735 ulp_deference_struct(eth_mask, src.addr_bytes),
736 ULP_PRSR_ACT_DEFAULT);
738 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
739 ulp_rte_prsr_fld_mask(params, &idx, size,
740 ulp_deference_struct(eth_spec, type),
741 ulp_deference_struct(eth_mask, type),
742 ULP_PRSR_ACT_MATCH_IGNORE);
744 /* Update the protocol hdr bitmap */
745 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
746 BNXT_ULP_HDR_BIT_O_ETH) ||
747 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
748 BNXT_ULP_HDR_BIT_O_IPV4) ||
749 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
750 BNXT_ULP_HDR_BIT_O_IPV6) ||
751 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
752 BNXT_ULP_HDR_BIT_O_UDP) ||
753 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
754 BNXT_ULP_HDR_BIT_O_TCP)) {
755 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
758 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
762 /* Update the field protocol hdr bitmap */
763 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
765 return BNXT_TF_RC_SUCCESS;
768 /* Function to handle the parsing of RTE Flow item Vlan Header. */
770 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
771 struct ulp_rte_parser_params *params)
773 const struct rte_flow_item_vlan *vlan_spec = item->spec;
774 const struct rte_flow_item_vlan *vlan_mask = item->mask;
775 struct ulp_rte_hdr_bitmap *hdr_bit;
777 uint16_t vlan_tag = 0, priority = 0;
778 uint16_t vlan_tag_mask = 0, priority_mask = 0;
779 uint32_t outer_vtag_num;
780 uint32_t inner_vtag_num;
781 uint16_t eth_type = 0;
782 uint32_t inner_flag = 0;
786 vlan_tag = ntohs(vlan_spec->tci);
787 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
788 vlan_tag &= ULP_VLAN_TAG_MASK;
789 vlan_tag = htons(vlan_tag);
790 eth_type = vlan_spec->inner_type;
794 vlan_tag_mask = ntohs(vlan_mask->tci);
795 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
796 vlan_tag_mask &= 0xfff;
799 * the storage for priority and vlan tag is 2 bytes
800 * The mask of priority which is 3 bits if it is all 1's
801 * then make the rest bits 13 bits as 1's
802 * so that it is matched as exact match.
804 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
805 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
806 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
807 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
808 vlan_tag_mask = htons(vlan_tag_mask);
811 if (ulp_rte_prsr_fld_size_validate(params, &idx,
812 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
813 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
814 return BNXT_TF_RC_ERROR;
818 * Copy the rte_flow_item for vlan into hdr_field using Vlan
821 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
823 * The priority field is ignored since OVS is setting it as
824 * wild card match and it is not supported. This is a work
825 * around and shall be addressed in the future.
827 ulp_rte_prsr_fld_mask(params, &idx, size,
830 ULP_PRSR_ACT_MASK_IGNORE);
832 ulp_rte_prsr_fld_mask(params, &idx, size,
835 ULP_PRSR_ACT_DEFAULT);
837 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
838 ulp_rte_prsr_fld_mask(params, &idx, size,
839 ulp_deference_struct(vlan_spec, inner_type),
840 ulp_deference_struct(vlan_mask, inner_type),
841 ULP_PRSR_ACT_MATCH_IGNORE);
843 /* Get the outer tag and inner tag counts */
844 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
845 BNXT_ULP_CF_IDX_O_VTAG_NUM);
846 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
847 BNXT_ULP_CF_IDX_I_VTAG_NUM);
849 /* Update the hdr_bitmap of the vlans */
850 hdr_bit = ¶ms->hdr_bitmap;
851 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
852 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
854 /* Update the vlan tag num */
856 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
860 ULP_BITMAP_SET(params->hdr_bitmap.bits,
861 BNXT_ULP_HDR_BIT_OO_VLAN);
862 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
863 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
864 outer_vtag_num == 1) {
865 /* update the vlan tag num */
867 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
869 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
870 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
871 ULP_BITMAP_SET(params->hdr_bitmap.bits,
872 BNXT_ULP_HDR_BIT_OI_VLAN);
873 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
874 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
876 /* update the vlan tag num */
878 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
880 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
881 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
882 ULP_BITMAP_SET(params->hdr_bitmap.bits,
883 BNXT_ULP_HDR_BIT_IO_VLAN);
885 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
886 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
887 inner_vtag_num == 1) {
888 /* update the vlan tag num */
890 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
892 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
893 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
894 ULP_BITMAP_SET(params->hdr_bitmap.bits,
895 BNXT_ULP_HDR_BIT_II_VLAN);
898 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
899 return BNXT_TF_RC_ERROR;
901 /* Update the field protocol hdr bitmap */
902 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
903 return BNXT_TF_RC_SUCCESS;
906 /* Function to handle the update of proto header based on field values */
908 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
909 uint8_t proto, uint32_t in_flag)
911 if (proto == IPPROTO_UDP) {
913 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
914 BNXT_ULP_HDR_BIT_I_UDP);
915 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
917 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
918 BNXT_ULP_HDR_BIT_O_UDP);
919 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
921 } else if (proto == IPPROTO_TCP) {
923 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
924 BNXT_ULP_HDR_BIT_I_TCP);
925 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
927 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
928 BNXT_ULP_HDR_BIT_O_TCP);
929 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
931 } else if (proto == IPPROTO_GRE) {
932 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
933 } else if (proto == IPPROTO_ICMP) {
934 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
935 ULP_BITMAP_SET(param->hdr_bitmap.bits,
936 BNXT_ULP_HDR_BIT_I_ICMP);
938 ULP_BITMAP_SET(param->hdr_bitmap.bits,
939 BNXT_ULP_HDR_BIT_O_ICMP);
943 ULP_COMP_FLD_IDX_WR(param,
944 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
946 ULP_COMP_FLD_IDX_WR(param,
947 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
950 ULP_COMP_FLD_IDX_WR(param,
951 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
953 ULP_COMP_FLD_IDX_WR(param,
954 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
960 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
962 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
963 struct ulp_rte_parser_params *params)
965 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
966 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
967 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
968 uint32_t idx = 0, dip_idx = 0;
971 uint32_t inner_flag = 0;
974 /* validate there are no 3rd L3 header */
975 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
977 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
978 return BNXT_TF_RC_ERROR;
981 if (ulp_rte_prsr_fld_size_validate(params, &idx,
982 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
983 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
984 return BNXT_TF_RC_ERROR;
988 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
991 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
992 ulp_rte_prsr_fld_mask(params, &idx, size,
993 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
994 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
995 ULP_PRSR_ACT_DEFAULT);
998 * The tos field is ignored since OVS is setting it as wild card
999 * match and it is not supported. This is a work around and
1000 * shall be addressed in the future.
1002 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1003 ulp_rte_prsr_fld_mask(params, &idx, size,
1004 ulp_deference_struct(ipv4_spec,
1005 hdr.type_of_service),
1006 ulp_deference_struct(ipv4_mask,
1007 hdr.type_of_service),
1008 ULP_PRSR_ACT_MASK_IGNORE);
1010 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1011 ulp_rte_prsr_fld_mask(params, &idx, size,
1012 ulp_deference_struct(ipv4_spec, hdr.total_length),
1013 ulp_deference_struct(ipv4_mask, hdr.total_length),
1014 ULP_PRSR_ACT_DEFAULT);
1016 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1017 ulp_rte_prsr_fld_mask(params, &idx, size,
1018 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1019 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1020 ULP_PRSR_ACT_DEFAULT);
1022 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1023 ulp_rte_prsr_fld_mask(params, &idx, size,
1024 ulp_deference_struct(ipv4_spec,
1025 hdr.fragment_offset),
1026 ulp_deference_struct(ipv4_mask,
1027 hdr.fragment_offset),
1028 ULP_PRSR_ACT_DEFAULT);
1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1031 ulp_rte_prsr_fld_mask(params, &idx, size,
1032 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1033 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1034 ULP_PRSR_ACT_DEFAULT);
1036 /* Ignore proto for matching templates */
1037 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1038 ulp_rte_prsr_fld_mask(params, &idx, size,
1039 ulp_deference_struct(ipv4_spec,
1041 ulp_deference_struct(ipv4_mask,
1043 ULP_PRSR_ACT_MATCH_IGNORE);
1045 proto = ipv4_spec->hdr.next_proto_id;
1047 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1048 ulp_rte_prsr_fld_mask(params, &idx, size,
1049 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1050 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1051 ULP_PRSR_ACT_DEFAULT);
1053 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1054 ulp_rte_prsr_fld_mask(params, &idx, size,
1055 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1056 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1057 ULP_PRSR_ACT_DEFAULT);
1060 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1061 ulp_rte_prsr_fld_mask(params, &idx, size,
1062 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1063 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1064 ULP_PRSR_ACT_DEFAULT);
1066 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1067 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1068 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1069 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1070 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1073 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1074 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1075 /* Update the tunnel offload dest ip offset */
1076 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1080 /* Some of the PMD applications may set the protocol field
1081 * in the IPv4 spec but don't set the mask. So, consider
1082 * the mask in the proto value calculation.
1085 proto &= ipv4_mask->hdr.next_proto_id;
1087 /* Update the field protocol hdr bitmap */
1088 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1089 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1090 return BNXT_TF_RC_SUCCESS;
1093 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1095 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1096 struct ulp_rte_parser_params *params)
1098 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1099 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1100 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1101 uint32_t idx = 0, dip_idx = 0;
1103 uint32_t ver_spec = 0, ver_mask = 0;
1104 uint32_t tc_spec = 0, tc_mask = 0;
1105 uint32_t lab_spec = 0, lab_mask = 0;
1107 uint32_t inner_flag = 0;
1110 /* validate there are no 3rd L3 header */
1111 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1113 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1114 return BNXT_TF_RC_ERROR;
1117 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1118 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1119 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1120 return BNXT_TF_RC_ERROR;
1124 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1128 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1129 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1130 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1131 proto = ipv6_spec->hdr.proto;
1135 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1136 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1137 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1139 /* Some of the PMD applications may set the protocol field
1140 * in the IPv6 spec but don't set the mask. So, consider
1141 * the mask in proto value calculation.
1143 proto &= ipv6_mask->hdr.proto;
1146 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1147 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1148 ULP_PRSR_ACT_DEFAULT);
1150 * The TC and flow label field are ignored since OVS is
1151 * setting it for match and it is not supported.
1152 * This is a work around and
1153 * shall be addressed in the future.
1155 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1156 ULP_PRSR_ACT_MASK_IGNORE);
1157 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1158 ULP_PRSR_ACT_MASK_IGNORE);
1160 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1161 ulp_rte_prsr_fld_mask(params, &idx, size,
1162 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1163 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1164 ULP_PRSR_ACT_DEFAULT);
1166 /* Ignore proto for template matching */
1167 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1168 ulp_rte_prsr_fld_mask(params, &idx, size,
1169 ulp_deference_struct(ipv6_spec, hdr.proto),
1170 ulp_deference_struct(ipv6_mask, hdr.proto),
1171 ULP_PRSR_ACT_MATCH_IGNORE);
1173 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1174 ulp_rte_prsr_fld_mask(params, &idx, size,
1175 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1176 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1177 ULP_PRSR_ACT_DEFAULT);
1179 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1180 ulp_rte_prsr_fld_mask(params, &idx, size,
1181 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1182 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1183 ULP_PRSR_ACT_DEFAULT);
1186 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1187 ulp_rte_prsr_fld_mask(params, &idx, size,
1188 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1189 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1190 ULP_PRSR_ACT_DEFAULT);
1192 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1193 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1194 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1195 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1196 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1199 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1200 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1201 /* Update the tunnel offload dest ip offset */
1202 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1206 /* Update the field protocol hdr bitmap */
1207 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1210 return BNXT_TF_RC_SUCCESS;
1213 /* Function to handle the update of proto header based on field values */
1215 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1218 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1219 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1220 BNXT_ULP_HDR_BIT_T_VXLAN);
1221 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1224 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1225 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1226 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1227 BNXT_ULP_HDR_BIT_T_GRE))
1228 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1231 /* Function to handle the parsing of RTE Flow item UDP Header. */
1233 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1234 struct ulp_rte_parser_params *params)
1236 const struct rte_flow_item_udp *udp_spec = item->spec;
1237 const struct rte_flow_item_udp *udp_mask = item->mask;
1238 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1241 uint16_t dport = 0, sport = 0;
1244 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1246 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1247 return BNXT_TF_RC_ERROR;
1251 sport = udp_spec->hdr.src_port;
1252 dport = udp_spec->hdr.dst_port;
1255 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1256 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1257 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1258 return BNXT_TF_RC_ERROR;
1262 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1265 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1266 ulp_rte_prsr_fld_mask(params, &idx, size,
1267 ulp_deference_struct(udp_spec, hdr.src_port),
1268 ulp_deference_struct(udp_mask, hdr.src_port),
1269 ULP_PRSR_ACT_DEFAULT);
1271 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1272 ulp_rte_prsr_fld_mask(params, &idx, size,
1273 ulp_deference_struct(udp_spec, hdr.dst_port),
1274 ulp_deference_struct(udp_mask, hdr.dst_port),
1275 ULP_PRSR_ACT_DEFAULT);
1277 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1278 ulp_rte_prsr_fld_mask(params, &idx, size,
1279 ulp_deference_struct(udp_spec, hdr.dgram_len),
1280 ulp_deference_struct(udp_mask, hdr.dgram_len),
1281 ULP_PRSR_ACT_DEFAULT);
1283 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1284 ulp_rte_prsr_fld_mask(params, &idx, size,
1285 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1286 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1287 ULP_PRSR_ACT_DEFAULT);
1289 /* Set the udp header bitmap and computed l4 header bitmaps */
1290 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1291 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1292 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1294 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1295 (uint32_t)rte_be_to_cpu_16(sport));
1296 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1297 (uint32_t)rte_be_to_cpu_16(dport));
1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1302 if (udp_mask && udp_mask->hdr.src_port)
1303 ULP_COMP_FLD_IDX_WR(params,
1304 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1306 if (udp_mask && udp_mask->hdr.dst_port)
1307 ULP_COMP_FLD_IDX_WR(params,
1308 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1311 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1314 (uint32_t)rte_be_to_cpu_16(sport));
1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1316 (uint32_t)rte_be_to_cpu_16(dport));
1317 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1319 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1321 if (udp_mask && udp_mask->hdr.src_port)
1322 ULP_COMP_FLD_IDX_WR(params,
1323 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1325 if (udp_mask && udp_mask->hdr.dst_port)
1326 ULP_COMP_FLD_IDX_WR(params,
1327 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1330 /* Update the field protocol hdr bitmap */
1331 ulp_rte_l4_proto_type_update(params, dport);
1333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1334 return BNXT_TF_RC_SUCCESS;
1337 /* Function to handle the parsing of RTE Flow item TCP Header. */
1339 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1340 struct ulp_rte_parser_params *params)
1342 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1343 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1344 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1346 uint16_t dport = 0, sport = 0;
1350 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1352 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1353 return BNXT_TF_RC_ERROR;
1357 sport = tcp_spec->hdr.src_port;
1358 dport = tcp_spec->hdr.dst_port;
1361 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1362 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1363 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1364 return BNXT_TF_RC_ERROR;
1368 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1371 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1372 ulp_rte_prsr_fld_mask(params, &idx, size,
1373 ulp_deference_struct(tcp_spec, hdr.src_port),
1374 ulp_deference_struct(tcp_mask, hdr.src_port),
1375 ULP_PRSR_ACT_DEFAULT);
1377 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1378 ulp_rte_prsr_fld_mask(params, &idx, size,
1379 ulp_deference_struct(tcp_spec, hdr.dst_port),
1380 ulp_deference_struct(tcp_mask, hdr.dst_port),
1381 ULP_PRSR_ACT_DEFAULT);
1383 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1384 ulp_rte_prsr_fld_mask(params, &idx, size,
1385 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1386 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1387 ULP_PRSR_ACT_DEFAULT);
1389 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1390 ulp_rte_prsr_fld_mask(params, &idx, size,
1391 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1392 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1393 ULP_PRSR_ACT_DEFAULT);
1395 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1396 ulp_rte_prsr_fld_mask(params, &idx, size,
1397 ulp_deference_struct(tcp_spec, hdr.data_off),
1398 ulp_deference_struct(tcp_mask, hdr.data_off),
1399 ULP_PRSR_ACT_DEFAULT);
1401 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1402 ulp_rte_prsr_fld_mask(params, &idx, size,
1403 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1404 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1405 ULP_PRSR_ACT_DEFAULT);
1407 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1408 ulp_rte_prsr_fld_mask(params, &idx, size,
1409 ulp_deference_struct(tcp_spec, hdr.rx_win),
1410 ulp_deference_struct(tcp_mask, hdr.rx_win),
1411 ULP_PRSR_ACT_DEFAULT);
1413 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1414 ulp_rte_prsr_fld_mask(params, &idx, size,
1415 ulp_deference_struct(tcp_spec, hdr.cksum),
1416 ulp_deference_struct(tcp_mask, hdr.cksum),
1417 ULP_PRSR_ACT_DEFAULT);
1419 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1420 ulp_rte_prsr_fld_mask(params, &idx, size,
1421 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1422 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1423 ULP_PRSR_ACT_DEFAULT);
1425 /* Set the udp header bitmap and computed l4 header bitmaps */
1426 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1427 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1428 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1429 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1430 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1431 (uint32_t)rte_be_to_cpu_16(sport));
1432 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1433 (uint32_t)rte_be_to_cpu_16(dport));
1434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1438 if (tcp_mask && tcp_mask->hdr.src_port)
1439 ULP_COMP_FLD_IDX_WR(params,
1440 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1442 if (tcp_mask && tcp_mask->hdr.dst_port)
1443 ULP_COMP_FLD_IDX_WR(params,
1444 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1447 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1450 (uint32_t)rte_be_to_cpu_16(sport));
1451 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1452 (uint32_t)rte_be_to_cpu_16(dport));
1453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1455 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1457 if (tcp_mask && tcp_mask->hdr.src_port)
1458 ULP_COMP_FLD_IDX_WR(params,
1459 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1461 if (tcp_mask && tcp_mask->hdr.dst_port)
1462 ULP_COMP_FLD_IDX_WR(params,
1463 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1467 return BNXT_TF_RC_SUCCESS;
1470 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1472 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1473 struct ulp_rte_parser_params *params)
1475 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1476 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1477 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1481 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1482 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1483 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1484 return BNXT_TF_RC_ERROR;
1488 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1491 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1492 ulp_rte_prsr_fld_mask(params, &idx, size,
1493 ulp_deference_struct(vxlan_spec, flags),
1494 ulp_deference_struct(vxlan_mask, flags),
1495 ULP_PRSR_ACT_DEFAULT);
1497 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1498 ulp_rte_prsr_fld_mask(params, &idx, size,
1499 ulp_deference_struct(vxlan_spec, rsvd0),
1500 ulp_deference_struct(vxlan_mask, rsvd0),
1501 ULP_PRSR_ACT_DEFAULT);
1503 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1504 ulp_rte_prsr_fld_mask(params, &idx, size,
1505 ulp_deference_struct(vxlan_spec, vni),
1506 ulp_deference_struct(vxlan_mask, vni),
1507 ULP_PRSR_ACT_DEFAULT);
1509 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1510 ulp_rte_prsr_fld_mask(params, &idx, size,
1511 ulp_deference_struct(vxlan_spec, rsvd1),
1512 ulp_deference_struct(vxlan_mask, rsvd1),
1513 ULP_PRSR_ACT_DEFAULT);
1515 /* Update the hdr_bitmap with vxlan */
1516 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1517 ulp_rte_l4_proto_type_update(params, 0);
1518 return BNXT_TF_RC_SUCCESS;
1521 /* Function to handle the parsing of RTE Flow item GRE Header. */
1523 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1524 struct ulp_rte_parser_params *params)
1526 const struct rte_flow_item_gre *gre_spec = item->spec;
1527 const struct rte_flow_item_gre *gre_mask = item->mask;
1528 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1532 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1533 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1534 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1535 return BNXT_TF_RC_ERROR;
1538 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1539 ulp_rte_prsr_fld_mask(params, &idx, size,
1540 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1541 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1542 ULP_PRSR_ACT_DEFAULT);
1544 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1545 ulp_rte_prsr_fld_mask(params, &idx, size,
1546 ulp_deference_struct(gre_spec, protocol),
1547 ulp_deference_struct(gre_mask, protocol),
1548 ULP_PRSR_ACT_DEFAULT);
1550 /* Update the hdr_bitmap with GRE */
1551 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1552 ulp_rte_l4_proto_type_update(params, 0);
1553 return BNXT_TF_RC_SUCCESS;
1556 /* Function to handle the parsing of RTE Flow item ANY. */
1558 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1559 struct ulp_rte_parser_params *params __rte_unused)
1561 return BNXT_TF_RC_SUCCESS;
1564 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1566 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1567 struct ulp_rte_parser_params *params)
1569 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1570 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1571 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1575 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1576 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1577 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1578 return BNXT_TF_RC_ERROR;
1581 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1582 ulp_rte_prsr_fld_mask(params, &idx, size,
1583 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1584 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1585 ULP_PRSR_ACT_DEFAULT);
1587 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1588 ulp_rte_prsr_fld_mask(params, &idx, size,
1589 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1590 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1591 ULP_PRSR_ACT_DEFAULT);
1593 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1594 ulp_rte_prsr_fld_mask(params, &idx, size,
1595 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1596 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1597 ULP_PRSR_ACT_DEFAULT);
1599 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1600 ulp_rte_prsr_fld_mask(params, &idx, size,
1601 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1602 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1603 ULP_PRSR_ACT_DEFAULT);
1605 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1606 ulp_rte_prsr_fld_mask(params, &idx, size,
1607 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1608 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1609 ULP_PRSR_ACT_DEFAULT);
1611 /* Update the hdr_bitmap with ICMP */
1612 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1613 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1615 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1616 return BNXT_TF_RC_SUCCESS;
1619 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1621 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1622 struct ulp_rte_parser_params *params)
1624 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1625 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1626 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1630 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1631 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1632 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1633 return BNXT_TF_RC_ERROR;
1636 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1637 ulp_rte_prsr_fld_mask(params, &idx, size,
1638 ulp_deference_struct(icmp_spec, type),
1639 ulp_deference_struct(icmp_mask, type),
1640 ULP_PRSR_ACT_DEFAULT);
1642 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1643 ulp_rte_prsr_fld_mask(params, &idx, size,
1644 ulp_deference_struct(icmp_spec, code),
1645 ulp_deference_struct(icmp_mask, code),
1646 ULP_PRSR_ACT_DEFAULT);
1648 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1649 ulp_rte_prsr_fld_mask(params, &idx, size,
1650 ulp_deference_struct(icmp_spec, checksum),
1651 ulp_deference_struct(icmp_mask, checksum),
1652 ULP_PRSR_ACT_DEFAULT);
1654 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1655 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1656 return BNXT_TF_RC_ERROR;
1659 /* Update the hdr_bitmap with ICMP */
1660 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1661 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1663 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1664 return BNXT_TF_RC_SUCCESS;
1667 /* Function to handle the parsing of RTE Flow item void Header */
1669 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1670 struct ulp_rte_parser_params *params __rte_unused)
1672 return BNXT_TF_RC_SUCCESS;
1675 /* Function to handle the parsing of RTE Flow action void Header. */
1677 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1678 struct ulp_rte_parser_params *params __rte_unused)
1680 return BNXT_TF_RC_SUCCESS;
1683 /* Function to handle the parsing of RTE Flow action Mark Header. */
1685 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1686 struct ulp_rte_parser_params *param)
1688 const struct rte_flow_action_mark *mark;
1689 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1692 mark = action_item->conf;
1694 mark_id = tfp_cpu_to_be_32(mark->id);
1695 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1696 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1698 /* Update the hdr_bitmap with vxlan */
1699 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1700 return BNXT_TF_RC_SUCCESS;
1702 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1703 return BNXT_TF_RC_ERROR;
1706 /* Function to handle the parsing of RTE Flow action RSS Header. */
1708 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1709 struct ulp_rte_parser_params *param)
1711 const struct rte_flow_action_rss *rss;
1712 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1714 if (action_item == NULL || action_item->conf == NULL) {
1715 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1716 return BNXT_TF_RC_ERROR;
1719 rss = action_item->conf;
1720 /* Copy the rss into the specific action properties */
1721 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1722 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1723 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1724 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1725 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1726 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1728 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1729 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1730 return BNXT_TF_RC_ERROR;
1732 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1735 /* set the RSS action header bit */
1736 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1738 return BNXT_TF_RC_SUCCESS;
1741 /* Function to handle the parsing of RTE Flow item eth Header. */
1743 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1744 const struct rte_flow_item_eth *eth_spec)
1746 struct ulp_rte_hdr_field *field;
1749 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1750 size = sizeof(eth_spec->dst.addr_bytes);
1751 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1753 size = sizeof(eth_spec->src.addr_bytes);
1754 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1756 size = sizeof(eth_spec->type);
1757 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1759 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1762 /* Function to handle the parsing of RTE Flow item vlan Header. */
1764 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1765 const struct rte_flow_item_vlan *vlan_spec,
1768 struct ulp_rte_hdr_field *field;
1772 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1773 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1774 BNXT_ULP_HDR_BIT_OO_VLAN);
1776 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1777 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1778 BNXT_ULP_HDR_BIT_OI_VLAN);
1781 size = sizeof(vlan_spec->tci);
1782 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1784 size = sizeof(vlan_spec->inner_type);
1785 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1788 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1790 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1791 const struct rte_flow_item_ipv4 *ip)
1793 struct ulp_rte_hdr_field *field;
1797 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1798 size = sizeof(ip->hdr.version_ihl);
1799 if (!ip->hdr.version_ihl)
1800 val8 = RTE_IPV4_VHL_DEF;
1802 val8 = ip->hdr.version_ihl;
1803 field = ulp_rte_parser_fld_copy(field, &val8, size);
1805 size = sizeof(ip->hdr.type_of_service);
1806 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1808 size = sizeof(ip->hdr.packet_id);
1809 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1811 size = sizeof(ip->hdr.fragment_offset);
1812 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1814 size = sizeof(ip->hdr.time_to_live);
1815 if (!ip->hdr.time_to_live)
1816 val8 = BNXT_ULP_DEFAULT_TTL;
1818 val8 = ip->hdr.time_to_live;
1819 field = ulp_rte_parser_fld_copy(field, &val8, size);
1821 size = sizeof(ip->hdr.next_proto_id);
1822 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1824 size = sizeof(ip->hdr.src_addr);
1825 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1827 size = sizeof(ip->hdr.dst_addr);
1828 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1830 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1833 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1835 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1836 const struct rte_flow_item_ipv6 *ip)
1838 struct ulp_rte_hdr_field *field;
1843 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1844 size = sizeof(ip->hdr.vtc_flow);
1845 if (!ip->hdr.vtc_flow)
1846 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1848 val32 = ip->hdr.vtc_flow;
1849 field = ulp_rte_parser_fld_copy(field, &val32, size);
1851 size = sizeof(ip->hdr.proto);
1852 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1854 size = sizeof(ip->hdr.hop_limits);
1855 if (!ip->hdr.hop_limits)
1856 val8 = BNXT_ULP_DEFAULT_TTL;
1858 val8 = ip->hdr.hop_limits;
1859 field = ulp_rte_parser_fld_copy(field, &val8, size);
1861 size = sizeof(ip->hdr.src_addr);
1862 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1864 size = sizeof(ip->hdr.dst_addr);
1865 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1867 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1870 /* Function to handle the parsing of RTE Flow item UDP Header. */
1872 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1873 const struct rte_flow_item_udp *udp_spec)
1875 struct ulp_rte_hdr_field *field;
1877 uint8_t type = IPPROTO_UDP;
1879 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1880 size = sizeof(udp_spec->hdr.src_port);
1881 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1883 size = sizeof(udp_spec->hdr.dst_port);
1884 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1886 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1888 /* Update thhe ip header protocol */
1889 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1890 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1891 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1892 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1895 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1897 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1898 struct rte_flow_item_vxlan *vxlan_spec)
1900 struct ulp_rte_hdr_field *field;
1903 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1904 size = sizeof(vxlan_spec->flags);
1905 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1907 size = sizeof(vxlan_spec->rsvd0);
1908 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1910 size = sizeof(vxlan_spec->vni);
1911 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1913 size = sizeof(vxlan_spec->rsvd1);
1914 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1916 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1919 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1921 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1922 struct ulp_rte_parser_params *params)
1924 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1925 const struct rte_flow_item *item;
1926 const struct rte_flow_item_ipv4 *ipv4_spec;
1927 const struct rte_flow_item_ipv6 *ipv6_spec;
1928 struct rte_flow_item_vxlan vxlan_spec;
1929 uint32_t vlan_num = 0, vlan_size = 0;
1930 uint32_t ip_size = 0, ip_type = 0;
1931 uint32_t vxlan_size = 0;
1932 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1933 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1935 vxlan_encap = action_item->conf;
1937 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1938 return BNXT_TF_RC_ERROR;
1941 item = vxlan_encap->definition;
1943 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1944 return BNXT_TF_RC_ERROR;
1947 if (!ulp_rte_item_skip_void(&item, 0))
1948 return BNXT_TF_RC_ERROR;
1950 /* must have ethernet header */
1951 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1952 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1953 return BNXT_TF_RC_ERROR;
1956 /* Parse the ethernet header */
1958 ulp_rte_enc_eth_hdr_handler(params, item->spec);
1960 /* Goto the next item */
1961 if (!ulp_rte_item_skip_void(&item, 1))
1962 return BNXT_TF_RC_ERROR;
1964 /* May have vlan header */
1965 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1968 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1970 if (!ulp_rte_item_skip_void(&item, 1))
1971 return BNXT_TF_RC_ERROR;
1974 /* may have two vlan headers */
1975 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1978 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1980 if (!ulp_rte_item_skip_void(&item, 1))
1981 return BNXT_TF_RC_ERROR;
1984 /* Update the vlan count and size of more than one */
1986 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1987 vlan_num = tfp_cpu_to_be_32(vlan_num);
1988 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1991 vlan_size = tfp_cpu_to_be_32(vlan_size);
1992 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1997 /* L3 must be IPv4, IPv6 */
1998 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1999 ipv4_spec = item->spec;
2000 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2002 /* Update the ip size details */
2003 ip_size = tfp_cpu_to_be_32(ip_size);
2004 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2005 &ip_size, sizeof(uint32_t));
2007 /* update the ip type */
2008 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2009 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2010 &ip_type, sizeof(uint32_t));
2012 /* update the computed field to notify it is ipv4 header */
2013 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2016 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2018 if (!ulp_rte_item_skip_void(&item, 1))
2019 return BNXT_TF_RC_ERROR;
2020 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2021 ipv6_spec = item->spec;
2022 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2024 /* Update the ip size details */
2025 ip_size = tfp_cpu_to_be_32(ip_size);
2026 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2027 &ip_size, sizeof(uint32_t));
2029 /* update the ip type */
2030 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2031 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2032 &ip_type, sizeof(uint32_t));
2034 /* update the computed field to notify it is ipv6 header */
2035 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2038 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2040 if (!ulp_rte_item_skip_void(&item, 1))
2041 return BNXT_TF_RC_ERROR;
2043 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2044 return BNXT_TF_RC_ERROR;
2048 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2049 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2050 return BNXT_TF_RC_ERROR;
2053 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2055 if (!ulp_rte_item_skip_void(&item, 1))
2056 return BNXT_TF_RC_ERROR;
2059 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2060 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2061 return BNXT_TF_RC_ERROR;
2063 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2064 /* copy the vxlan details */
2065 memcpy(&vxlan_spec, item->spec, vxlan_size);
2066 vxlan_spec.flags = 0x08;
2067 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2068 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2069 &vxlan_size, sizeof(uint32_t));
2071 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2073 /* update the hdr_bitmap with vxlan */
2074 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2075 return BNXT_TF_RC_SUCCESS;
2078 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2080 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2082 struct ulp_rte_parser_params *params)
2084 /* update the hdr_bitmap with vxlan */
2085 ULP_BITMAP_SET(params->act_bitmap.bits,
2086 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2087 /* Update computational field with tunnel decap info */
2088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2089 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2090 return BNXT_TF_RC_SUCCESS;
2093 /* Function to handle the parsing of RTE Flow action drop Header. */
2095 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2096 struct ulp_rte_parser_params *params)
2098 /* Update the hdr_bitmap with drop */
2099 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2100 return BNXT_TF_RC_SUCCESS;
2103 /* Function to handle the parsing of RTE Flow action count. */
2105 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2106 struct ulp_rte_parser_params *params)
2108 const struct rte_flow_action_count *act_count;
2109 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2111 act_count = action_item->conf;
2113 if (act_count->shared) {
2115 "Parse Error:Shared count not supported\n");
2116 return BNXT_TF_RC_PARSE_ERR;
2118 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2120 BNXT_ULP_ACT_PROP_SZ_COUNT);
2123 /* Update the hdr_bitmap with count */
2124 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2125 return BNXT_TF_RC_SUCCESS;
2128 /* Function to handle the parsing of action ports. */
2130 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2133 enum bnxt_ulp_direction_type dir;
2136 struct ulp_rte_act_prop *act = ¶m->act_prop;
2137 enum bnxt_ulp_intf_type port_type;
2140 /* Get the direction */
2141 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2142 if (dir == BNXT_ULP_DIR_EGRESS) {
2143 /* For egress direction, fill vport */
2144 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2145 return BNXT_TF_RC_ERROR;
2148 pid = rte_cpu_to_be_32(pid);
2149 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2150 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2152 /* For ingress direction, fill vnic */
2153 port_type = ULP_COMP_FLD_IDX_RD(param,
2154 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2155 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2156 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2158 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2160 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2162 return BNXT_TF_RC_ERROR;
2165 pid = rte_cpu_to_be_32(pid);
2166 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2167 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2170 /* Update the action port set bit */
2171 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2172 return BNXT_TF_RC_SUCCESS;
2175 /* Function to handle the parsing of RTE Flow action PF. */
2177 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2178 struct ulp_rte_parser_params *params)
2182 enum bnxt_ulp_intf_type intf_type;
2184 /* Get the port id of the current device */
2185 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2187 /* Get the port db ifindex */
2188 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2190 BNXT_TF_DBG(ERR, "Invalid port id\n");
2191 return BNXT_TF_RC_ERROR;
2194 /* Check the port is PF port */
2195 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2196 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2197 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2198 return BNXT_TF_RC_ERROR;
2200 /* Update the action properties */
2201 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2202 return ulp_rte_parser_act_port_set(params, ifindex);
2205 /* Function to handle the parsing of RTE Flow action VF. */
2207 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2208 struct ulp_rte_parser_params *params)
2210 const struct rte_flow_action_vf *vf_action;
2211 enum bnxt_ulp_intf_type intf_type;
2215 vf_action = action_item->conf;
2217 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2218 return BNXT_TF_RC_PARSE_ERR;
2221 if (vf_action->original) {
2222 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2223 return BNXT_TF_RC_PARSE_ERR;
2226 bp = bnxt_pmd_get_bp(params->port_id);
2228 BNXT_TF_DBG(ERR, "Invalid bp\n");
2229 return BNXT_TF_RC_ERROR;
2232 /* vf_action->id is a logical number which in this case is an
2233 * offset from the first VF. So, to get the absolute VF id, the
2234 * offset must be added to the absolute first vf id of that port.
2236 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2240 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2241 return BNXT_TF_RC_ERROR;
2243 /* Check the port is VF port */
2244 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2245 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2246 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2247 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2248 return BNXT_TF_RC_ERROR;
2251 /* Update the action properties */
2252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2253 return ulp_rte_parser_act_port_set(params, ifindex);
2256 /* Function to handle the parsing of RTE Flow action port_id. */
2258 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2259 struct ulp_rte_parser_params *param)
2261 const struct rte_flow_action_port_id *port_id = act_item->conf;
2263 enum bnxt_ulp_intf_type intf_type;
2267 "ParseErr: Invalid Argument\n");
2268 return BNXT_TF_RC_PARSE_ERR;
2270 if (port_id->original) {
2272 "ParseErr:Portid Original not supported\n");
2273 return BNXT_TF_RC_PARSE_ERR;
2276 /* Get the port db ifindex */
2277 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2279 BNXT_TF_DBG(ERR, "Invalid port id\n");
2280 return BNXT_TF_RC_ERROR;
2283 /* Get the intf type */
2284 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2286 BNXT_TF_DBG(ERR, "Invalid port type\n");
2287 return BNXT_TF_RC_ERROR;
2290 /* Set the action port */
2291 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2292 return ulp_rte_parser_act_port_set(param, ifindex);
2295 /* Function to handle the parsing of RTE Flow action phy_port. */
2297 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2298 struct ulp_rte_parser_params *prm)
2300 const struct rte_flow_action_phy_port *phy_port;
2304 enum bnxt_ulp_direction_type dir;
2306 phy_port = action_item->conf;
2309 "ParseErr: Invalid Argument\n");
2310 return BNXT_TF_RC_PARSE_ERR;
2313 if (phy_port->original) {
2315 "Parse Err:Port Original not supported\n");
2316 return BNXT_TF_RC_PARSE_ERR;
2318 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2319 if (dir != BNXT_ULP_DIR_EGRESS) {
2321 "Parse Err:Phy ports are valid only for egress\n");
2322 return BNXT_TF_RC_PARSE_ERR;
2324 /* Get the physical port details from port db */
2325 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2328 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2333 pid = rte_cpu_to_be_32(pid);
2334 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2335 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2337 /* Update the action port set bit */
2338 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2339 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2340 BNXT_ULP_INTF_TYPE_PHY_PORT);
2341 return BNXT_TF_RC_SUCCESS;
2344 /* Function to handle the parsing of RTE Flow action pop vlan. */
2346 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2347 struct ulp_rte_parser_params *params)
2349 /* Update the act_bitmap with pop */
2350 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2351 return BNXT_TF_RC_SUCCESS;
2354 /* Function to handle the parsing of RTE Flow action push vlan. */
2356 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2357 struct ulp_rte_parser_params *params)
2359 const struct rte_flow_action_of_push_vlan *push_vlan;
2361 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2363 push_vlan = action_item->conf;
2365 ethertype = push_vlan->ethertype;
2366 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2368 "Parse Err: Ethertype not supported\n");
2369 return BNXT_TF_RC_PARSE_ERR;
2371 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2372 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2373 /* Update the hdr_bitmap with push vlan */
2374 ULP_BITMAP_SET(params->act_bitmap.bits,
2375 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2376 return BNXT_TF_RC_SUCCESS;
2378 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2379 return BNXT_TF_RC_ERROR;
2382 /* Function to handle the parsing of RTE Flow action set vlan id. */
2384 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2385 struct ulp_rte_parser_params *params)
2387 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2389 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2391 vlan_vid = action_item->conf;
2392 if (vlan_vid && vlan_vid->vlan_vid) {
2393 vid = vlan_vid->vlan_vid;
2394 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2395 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2396 /* Update the hdr_bitmap with vlan vid */
2397 ULP_BITMAP_SET(params->act_bitmap.bits,
2398 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2399 return BNXT_TF_RC_SUCCESS;
2401 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2402 return BNXT_TF_RC_ERROR;
2405 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2407 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2408 struct ulp_rte_parser_params *params)
2410 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2412 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2414 vlan_pcp = action_item->conf;
2416 pcp = vlan_pcp->vlan_pcp;
2417 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2418 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2419 /* Update the hdr_bitmap with vlan vid */
2420 ULP_BITMAP_SET(params->act_bitmap.bits,
2421 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2422 return BNXT_TF_RC_SUCCESS;
2424 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2425 return BNXT_TF_RC_ERROR;
2428 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2430 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2431 struct ulp_rte_parser_params *params)
2433 const struct rte_flow_action_set_ipv4 *set_ipv4;
2434 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2436 set_ipv4 = action_item->conf;
2438 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2439 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2440 /* Update the hdr_bitmap with set ipv4 src */
2441 ULP_BITMAP_SET(params->act_bitmap.bits,
2442 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2443 return BNXT_TF_RC_SUCCESS;
2445 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2446 return BNXT_TF_RC_ERROR;
2449 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2451 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2452 struct ulp_rte_parser_params *params)
2454 const struct rte_flow_action_set_ipv4 *set_ipv4;
2455 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2457 set_ipv4 = action_item->conf;
2459 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2460 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2461 /* Update the hdr_bitmap with set ipv4 dst */
2462 ULP_BITMAP_SET(params->act_bitmap.bits,
2463 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2464 return BNXT_TF_RC_SUCCESS;
2466 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2467 return BNXT_TF_RC_ERROR;
2470 /* Function to handle the parsing of RTE Flow action set tp src.*/
2472 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2473 struct ulp_rte_parser_params *params)
2475 const struct rte_flow_action_set_tp *set_tp;
2476 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2478 set_tp = action_item->conf;
2480 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2481 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2482 /* Update the hdr_bitmap with set tp src */
2483 ULP_BITMAP_SET(params->act_bitmap.bits,
2484 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2485 return BNXT_TF_RC_SUCCESS;
2488 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2489 return BNXT_TF_RC_ERROR;
2492 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2494 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2495 struct ulp_rte_parser_params *params)
2497 const struct rte_flow_action_set_tp *set_tp;
2498 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2500 set_tp = action_item->conf;
2502 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2503 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2504 /* Update the hdr_bitmap with set tp dst */
2505 ULP_BITMAP_SET(params->act_bitmap.bits,
2506 BNXT_ULP_ACT_BIT_SET_TP_DST);
2507 return BNXT_TF_RC_SUCCESS;
2510 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2511 return BNXT_TF_RC_ERROR;
2514 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2516 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2517 struct ulp_rte_parser_params *params)
2519 /* Update the act_bitmap with dec ttl */
2520 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2521 return BNXT_TF_RC_SUCCESS;
2524 /* Function to handle the parsing of RTE Flow action JUMP */
2526 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2527 struct ulp_rte_parser_params *params)
2529 /* Update the act_bitmap with dec ttl */
2530 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2531 return BNXT_TF_RC_SUCCESS;
2535 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2536 struct ulp_rte_parser_params *params)
2538 const struct rte_flow_action_sample *sample;
2541 sample = action_item->conf;
2543 /* if SAMPLE bit is set it means this sample action is nested within the
2544 * actions of another sample action; this is not allowed
2546 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2547 BNXT_ULP_ACT_BIT_SAMPLE))
2548 return BNXT_TF_RC_ERROR;
2550 /* a sample action is only allowed as a shared action */
2551 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2552 BNXT_ULP_ACT_BIT_SHARED))
2553 return BNXT_TF_RC_ERROR;
2555 /* only a ratio of 1 i.e. 100% is supported */
2556 if (sample->ratio != 1)
2557 return BNXT_TF_RC_ERROR;
2559 if (!sample->actions)
2560 return BNXT_TF_RC_ERROR;
2562 /* parse the nested actions for a sample action */
2563 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2564 if (ret == BNXT_TF_RC_SUCCESS)
2565 /* Update the act_bitmap with sample */
2566 ULP_BITMAP_SET(params->act_bitmap.bits,
2567 BNXT_ULP_ACT_BIT_SAMPLE);
2572 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2574 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2575 struct ulp_rte_parser_params *params)
2577 /* Set the F1 flow header bit */
2578 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2579 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2582 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2584 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2585 struct ulp_rte_parser_params *params)
2588 /* Set the F2 flow header bit */
2589 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2590 return ulp_rte_vxlan_decap_act_handler(NULL, params);