1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK 0x700
23 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN 4789
26 /* Utility function to skip the void items. */
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
41 /* Utility function to update the field_bitmap */
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 struct ulp_rte_hdr_field *field;
48 field = ¶ms->hdr_field[idx];
49 if (ulp_bitmap_notzero(field->mask, field->size)) {
50 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 if (!ulp_bitmap_is_ones(field->mask, field->size))
53 ULP_COMP_FLD_IDX_WR(params,
54 BNXT_ULP_CF_IDX_WC_MATCH, 1);
56 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
67 memcpy(field->spec, buffer, field->size);
72 /* Utility function to copy field masks items */
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
79 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81 memcpy(field->mask, buffer, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx);
86 /* Utility function to ignore field masks items */
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 const void *buffer __rte_unused,
91 uint32_t size __rte_unused)
97 * Function to handle the parsing of RTE Flows and placing
98 * the RTE flow items into the ulp structures.
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102 struct ulp_rte_parser_params *params)
104 const struct rte_flow_item *item = pattern;
105 struct bnxt_ulp_rte_hdr_info *hdr_info;
107 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 /* Set the computed flags for no vlan tags before parsing */
110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 /* Parse all the items in the pattern */
114 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115 /* get the header information from the flow_hdr_info table */
116 hdr_info = &ulp_hdr_info[item->type];
117 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 "Truflow parser does not support type %d\n",
121 return BNXT_TF_RC_PARSE_ERR;
122 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123 /* call the registered callback handler */
124 if (hdr_info->proto_hdr_func) {
125 if (hdr_info->proto_hdr_func(item, params) !=
126 BNXT_TF_RC_SUCCESS) {
127 return BNXT_TF_RC_ERROR;
133 /* update the implied SVIF */
134 return ulp_rte_parser_implicit_match_port_process(params);
138 * Function to handle the parsing of RTE Flows and placing
139 * the RTE flow actions into the ulp structures.
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143 struct ulp_rte_parser_params *params)
145 const struct rte_flow_action *action_item = actions;
146 struct bnxt_ulp_rte_act_info *hdr_info;
148 /* Parse all the items in the pattern */
149 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150 /* get the header information from the flow_hdr_info table */
151 hdr_info = &ulp_act_info[action_item->type];
152 if (hdr_info->act_type ==
153 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 "Truflow parser does not support act %u\n",
157 return BNXT_TF_RC_ERROR;
158 } else if (hdr_info->act_type ==
159 BNXT_ULP_ACT_TYPE_SUPPORTED) {
160 /* call the registered callback handler */
161 if (hdr_info->proto_act_func) {
162 if (hdr_info->proto_act_func(action_item,
164 BNXT_TF_RC_SUCCESS) {
165 return BNXT_TF_RC_ERROR;
171 /* update the implied port details */
172 ulp_rte_parser_implicit_act_port_process(params);
173 return BNXT_TF_RC_SUCCESS;
177 * Function to handle the post processing of the computed
178 * fields for the interface.
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
184 uint16_t port_id, parif;
186 enum bnxt_ulp_direction_type dir;
188 /* get the direction details */
189 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 /* read the port id details */
192 port_id = ULP_COMP_FLD_IDX_RD(params,
193 BNXT_ULP_CF_IDX_INCOMING_IF);
194 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
197 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
201 if (dir == BNXT_ULP_DIR_INGRESS) {
203 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
211 /* Get the match port type */
212 mtype = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215 ULP_COMP_FLD_IDX_WR(params,
216 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 /* Set VF func PARIF */
219 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220 BNXT_ULP_VF_FUNC_PARIF,
223 "ParseErr:ifindex is not valid\n");
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
231 /* Set DRV func PARIF */
232 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233 BNXT_ULP_DRV_FUNC_PARIF,
236 "ParseErr:ifindex is not valid\n");
239 ULP_COMP_FLD_IDX_WR(params,
240 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
249 enum bnxt_ulp_intf_type match_port_type, act_port_type;
250 enum bnxt_ulp_direction_type dir;
251 uint32_t act_port_set;
253 /* Get the computed details */
254 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255 match_port_type = ULP_COMP_FLD_IDX_RD(params,
256 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257 act_port_type = ULP_COMP_FLD_IDX_RD(params,
258 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259 act_port_set = ULP_COMP_FLD_IDX_RD(params,
260 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
262 /* set the flow direction in the proto and action header */
263 if (dir == BNXT_ULP_DIR_EGRESS) {
264 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266 ULP_BITMAP_SET(params->act_bitmap.bits,
267 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
270 /* calculate the VF to VF flag */
271 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
275 /* Update the decrement ttl computational fields */
276 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277 BNXT_ULP_ACT_BIT_DEC_TTL)) {
279 * Check that vxlan proto is included and vxlan decap
280 * action is not set then decrement tunnel ttl.
281 * Similarly add GRE and NVGRE in future.
283 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284 BNXT_ULP_HDR_BIT_T_VXLAN) &&
285 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
287 ULP_COMP_FLD_IDX_WR(params,
288 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
290 ULP_COMP_FLD_IDX_WR(params,
291 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
295 /* Merge the hdr_fp_bit into the proto header bit */
296 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
298 /* Update the comp fld fid */
299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
301 /* Update the computed interface parameters */
302 bnxt_ulp_comp_fld_intf_update(params);
304 /* TBD: Handle the flow rejection scenarios */
309 * Function to handle the post processing of the parsing details
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
314 ulp_post_process_normal_flow(params);
315 return ulp_post_process_tun_flow(params);
319 * Function to compute the flow direction based on the match port details
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
324 enum bnxt_ulp_intf_type match_port_type;
326 /* Get the match port type */
327 match_port_type = ULP_COMP_FLD_IDX_RD(params,
328 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
330 /* If ingress flow and matchport is vf rep then dir is egress*/
331 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334 BNXT_ULP_DIR_EGRESS);
336 /* Assign the input direction */
337 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339 BNXT_ULP_DIR_INGRESS);
341 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342 BNXT_ULP_DIR_EGRESS);
346 /* Function to handle the parsing of RTE Flow item PF Header. */
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
353 enum bnxt_ulp_direction_type dir;
354 struct ulp_rte_hdr_field *hdr_field;
355 enum bnxt_ulp_svif_type svif_type;
356 enum bnxt_ulp_intf_type port_type;
358 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359 BNXT_ULP_INVALID_SVIF_VAL) {
361 "SVIF already set,multiple source not support'd\n");
362 return BNXT_TF_RC_ERROR;
365 /* Get port type details */
366 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368 BNXT_TF_DBG(ERR, "Invalid port type\n");
369 return BNXT_TF_RC_ERROR;
372 /* Update the match port type */
373 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
375 /* compute the direction */
376 bnxt_ulp_rte_parser_direction_compute(params);
378 /* Get the computed direction */
379 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380 if (dir == BNXT_ULP_DIR_INGRESS) {
381 svif_type = BNXT_ULP_PHY_PORT_SVIF;
383 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384 svif_type = BNXT_ULP_VF_FUNC_SVIF;
386 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
388 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
390 svif = rte_cpu_to_be_16(svif);
391 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392 memcpy(hdr_field->spec, &svif, sizeof(svif));
393 memcpy(hdr_field->mask, &mask, sizeof(mask));
394 hdr_field->size = sizeof(svif);
395 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396 rte_be_to_cpu_16(svif));
397 return BNXT_TF_RC_SUCCESS;
400 /* Function to handle the parsing of the RTE port id */
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
404 uint16_t port_id = 0;
405 uint16_t svif_mask = 0xFFFF;
407 int32_t rc = BNXT_TF_RC_ERROR;
409 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410 BNXT_ULP_INVALID_SVIF_VAL)
411 return BNXT_TF_RC_SUCCESS;
413 /* SVIF not set. So get the port id */
414 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
416 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
419 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
423 /* Update the SVIF details */
424 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
428 /* Function to handle the implicit action port id */
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
432 struct rte_flow_action action_item = {0};
433 struct rte_flow_action_port_id port_id = {0};
435 /* Read the action port set bit */
436 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437 /* Already set, so just exit */
438 return BNXT_TF_RC_SUCCESS;
440 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441 action_item.conf = &port_id;
443 /* Update the action port based on incoming port */
444 ulp_rte_port_id_act_handler(&action_item, params);
446 /* Reset the action port set bit */
447 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448 return BNXT_TF_RC_SUCCESS;
451 /* Function to handle the parsing of RTE Flow item PF Header. */
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454 struct ulp_rte_parser_params *params)
456 uint16_t port_id = 0;
457 uint16_t svif_mask = 0xFFFF;
460 /* Get the implicit port id */
461 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
463 /* perform the conversion from dpdk port to bnxt ifindex */
464 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
467 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468 return BNXT_TF_RC_ERROR;
471 /* Update the SVIF details */
472 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
475 /* Function to handle the parsing of RTE Flow item VF Header. */
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478 struct ulp_rte_parser_params *params)
480 const struct rte_flow_item_vf *vf_spec = item->spec;
481 const struct rte_flow_item_vf *vf_mask = item->mask;
484 int32_t rc = BNXT_TF_RC_PARSE_ERR;
486 /* Get VF rte_flow_item for Port details */
488 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
492 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
497 /* perform the conversion from VF Func id to bnxt ifindex */
498 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
501 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
504 /* Update the SVIF details */
505 return ulp_rte_parser_svif_set(params, ifindex, mask);
508 /* Function to handle the parsing of RTE Flow item port id Header. */
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511 struct ulp_rte_parser_params *params)
513 const struct rte_flow_item_port_id *port_spec = item->spec;
514 const struct rte_flow_item_port_id *port_mask = item->mask;
516 int32_t rc = BNXT_TF_RC_PARSE_ERR;
520 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
524 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
527 mask = port_mask->id;
529 /* perform the conversion from dpdk port to bnxt ifindex */
530 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
533 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
536 /* Update the SVIF details */
537 return ulp_rte_parser_svif_set(params, ifindex, mask);
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543 struct ulp_rte_parser_params *params)
545 const struct rte_flow_item_phy_port *port_spec = item->spec;
546 const struct rte_flow_item_phy_port *port_mask = item->mask;
548 int32_t rc = BNXT_TF_RC_ERROR;
550 enum bnxt_ulp_direction_type dir;
551 struct ulp_rte_hdr_field *hdr_field;
553 /* Copy the rte_flow_item for phy port into hdr_field */
555 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
559 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
562 mask = port_mask->index;
564 /* Update the match port type */
565 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566 BNXT_ULP_INTF_TYPE_PHY_PORT);
568 /* Compute the Hw direction */
569 bnxt_ulp_rte_parser_direction_compute(params);
571 /* Direction validation */
572 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573 if (dir == BNXT_ULP_DIR_EGRESS) {
575 "Parse Err:Phy ports are valid only for ingress\n");
576 return BNXT_TF_RC_PARSE_ERR;
579 /* Get the physical port details from port db */
580 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
583 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584 return BNXT_TF_RC_PARSE_ERR;
587 /* Update the SVIF details */
588 svif = rte_cpu_to_be_16(svif);
589 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590 memcpy(hdr_field->spec, &svif, sizeof(svif));
591 memcpy(hdr_field->mask, &mask, sizeof(mask));
592 hdr_field->size = sizeof(svif);
593 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594 rte_be_to_cpu_16(svif));
595 return BNXT_TF_RC_SUCCESS;
598 /* Function to handle the update of proto header based on field values */
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601 uint16_t type, uint32_t in_flag)
603 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
605 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606 BNXT_ULP_HDR_BIT_I_IPV4);
607 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
609 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610 BNXT_ULP_HDR_BIT_O_IPV4);
611 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
613 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
615 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616 BNXT_ULP_HDR_BIT_I_IPV6);
617 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
619 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620 BNXT_ULP_HDR_BIT_O_IPV6);
621 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
626 /* Internal Function to identify broadcast or multicast packets */
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
630 if (rte_is_multicast_ether_addr(eth_addr) ||
631 rte_is_broadcast_ether_addr(eth_addr)) {
633 "No support for bcast or mcast addr offload\n");
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642 struct ulp_rte_parser_params *params)
644 const struct rte_flow_item_eth *eth_spec = item->spec;
645 const struct rte_flow_item_eth *eth_mask = item->mask;
646 struct ulp_rte_hdr_field *field;
647 uint32_t idx = params->field_idx;
649 uint16_t eth_type = 0;
650 uint32_t inner_flag = 0;
653 * Copy the rte_flow_item for eth into hdr_field using ethernet
657 size = sizeof(eth_spec->dst.addr_bytes);
658 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
659 eth_spec->dst.addr_bytes,
661 /* Todo: work around to avoid multicast and broadcast addr */
662 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
663 return BNXT_TF_RC_PARSE_ERR;
665 size = sizeof(eth_spec->src.addr_bytes);
666 field = ulp_rte_parser_fld_copy(field,
667 eth_spec->src.addr_bytes,
669 /* Todo: work around to avoid multicast and broadcast addr */
670 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
671 return BNXT_TF_RC_PARSE_ERR;
673 field = ulp_rte_parser_fld_copy(field,
675 sizeof(eth_spec->type));
676 eth_type = eth_spec->type;
679 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680 sizeof(eth_mask->dst.addr_bytes));
681 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682 sizeof(eth_mask->src.addr_bytes));
683 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
684 sizeof(eth_mask->type));
686 /* Add number of Eth header elements */
687 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
689 /* Update the protocol hdr bitmap */
690 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691 BNXT_ULP_HDR_BIT_O_ETH) ||
692 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693 BNXT_ULP_HDR_BIT_O_IPV4) ||
694 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695 BNXT_ULP_HDR_BIT_O_IPV6) ||
696 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_UDP) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_TCP)) {
700 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
703 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
705 /* Update the field protocol hdr bitmap */
706 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
708 return BNXT_TF_RC_SUCCESS;
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714 struct ulp_rte_parser_params *params)
716 const struct rte_flow_item_vlan *vlan_spec = item->spec;
717 const struct rte_flow_item_vlan *vlan_mask = item->mask;
718 struct ulp_rte_hdr_field *field;
719 struct ulp_rte_hdr_bitmap *hdr_bit;
720 uint32_t idx = params->field_idx;
721 uint16_t vlan_tag, priority;
722 uint32_t outer_vtag_num;
723 uint32_t inner_vtag_num;
724 uint16_t eth_type = 0;
725 uint32_t inner_flag = 0;
728 * Copy the rte_flow_item for vlan into hdr_field using Vlan
732 vlan_tag = ntohs(vlan_spec->tci);
733 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734 vlan_tag &= ULP_VLAN_TAG_MASK;
735 vlan_tag = htons(vlan_tag);
737 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
740 field = ulp_rte_parser_fld_copy(field,
743 field = ulp_rte_parser_fld_copy(field,
744 &vlan_spec->inner_type,
745 sizeof(vlan_spec->inner_type));
746 eth_type = vlan_spec->inner_type;
750 vlan_tag = ntohs(vlan_mask->tci);
751 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
755 * the storage for priority and vlan tag is 2 bytes
756 * The mask of priority which is 3 bits if it is all 1's
757 * then make the rest bits 13 bits as 1's
758 * so that it is matched as exact match.
760 if (priority == ULP_VLAN_PRIORITY_MASK)
761 priority |= ~ULP_VLAN_PRIORITY_MASK;
762 if (vlan_tag == ULP_VLAN_TAG_MASK)
763 vlan_tag |= ~ULP_VLAN_TAG_MASK;
764 vlan_tag = htons(vlan_tag);
767 * The priority field is ignored since OVS is setting it as
768 * wild card match and it is not supported. This is a work
769 * around and shall be addressed in the future.
771 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
774 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
776 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777 sizeof(vlan_mask->inner_type));
779 /* Set the field index to new incremented value */
780 params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
782 /* Get the outer tag and inner tag counts */
783 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784 BNXT_ULP_CF_IDX_O_VTAG_NUM);
785 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786 BNXT_ULP_CF_IDX_I_VTAG_NUM);
788 /* Update the hdr_bitmap of the vlans */
789 hdr_bit = ¶ms->hdr_bitmap;
790 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
793 /* Update the vlan tag num */
795 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
797 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800 BNXT_ULP_HDR_BIT_OO_VLAN);
801 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803 outer_vtag_num == 1) {
804 /* update the vlan tag num */
806 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
808 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811 BNXT_ULP_HDR_BIT_OI_VLAN);
812 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
815 /* update the vlan tag num */
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822 BNXT_ULP_HDR_BIT_IO_VLAN);
824 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826 inner_vtag_num == 1) {
827 /* update the vlan tag num */
829 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
831 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834 BNXT_ULP_HDR_BIT_II_VLAN);
837 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838 return BNXT_TF_RC_ERROR;
840 /* Update the field protocol hdr bitmap */
841 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842 return BNXT_TF_RC_SUCCESS;
845 /* Function to handle the update of proto header based on field values */
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848 uint8_t proto, uint32_t in_flag)
850 if (proto == IPPROTO_UDP) {
852 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853 BNXT_ULP_HDR_BIT_I_UDP);
854 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
856 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857 BNXT_ULP_HDR_BIT_O_UDP);
858 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
860 } else if (proto == IPPROTO_TCP) {
862 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863 BNXT_ULP_HDR_BIT_I_TCP);
864 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
866 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867 BNXT_ULP_HDR_BIT_O_TCP);
868 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876 struct ulp_rte_parser_params *params)
878 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880 struct ulp_rte_hdr_field *field;
881 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
882 uint32_t idx = params->field_idx;
885 uint32_t inner_flag = 0;
888 /* validate there are no 3rd L3 header */
889 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
891 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892 return BNXT_TF_RC_ERROR;
895 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896 BNXT_ULP_HDR_BIT_O_ETH) &&
897 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898 BNXT_ULP_HDR_BIT_I_ETH)) {
899 /* Since F2 flow does not include eth item, when parser detects
900 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903 * This will allow the parser post processor to update the
904 * t_dmac in hdr_field[o_eth.dmac]
906 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907 BNXT_ULP_PROTO_HDR_VLAN_NUM);
908 params->field_idx = idx;
912 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
916 size = sizeof(ipv4_spec->hdr.version_ihl);
917 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
918 &ipv4_spec->hdr.version_ihl,
920 size = sizeof(ipv4_spec->hdr.type_of_service);
921 field = ulp_rte_parser_fld_copy(field,
922 &ipv4_spec->hdr.type_of_service,
924 size = sizeof(ipv4_spec->hdr.total_length);
925 field = ulp_rte_parser_fld_copy(field,
926 &ipv4_spec->hdr.total_length,
928 size = sizeof(ipv4_spec->hdr.packet_id);
929 field = ulp_rte_parser_fld_copy(field,
930 &ipv4_spec->hdr.packet_id,
932 size = sizeof(ipv4_spec->hdr.fragment_offset);
933 field = ulp_rte_parser_fld_copy(field,
934 &ipv4_spec->hdr.fragment_offset,
936 size = sizeof(ipv4_spec->hdr.time_to_live);
937 field = ulp_rte_parser_fld_copy(field,
938 &ipv4_spec->hdr.time_to_live,
940 size = sizeof(ipv4_spec->hdr.next_proto_id);
941 field = ulp_rte_parser_fld_copy(field,
942 &ipv4_spec->hdr.next_proto_id,
944 proto = ipv4_spec->hdr.next_proto_id;
945 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946 field = ulp_rte_parser_fld_copy(field,
947 &ipv4_spec->hdr.hdr_checksum,
949 size = sizeof(ipv4_spec->hdr.src_addr);
950 field = ulp_rte_parser_fld_copy(field,
951 &ipv4_spec->hdr.src_addr,
953 size = sizeof(ipv4_spec->hdr.dst_addr);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.dst_addr,
959 ulp_rte_prsr_mask_copy(params, &idx,
960 &ipv4_mask->hdr.version_ihl,
961 sizeof(ipv4_mask->hdr.version_ihl));
963 * The tos field is ignored since OVS is setting it as wild card
964 * match and it is not supported. This is a work around and
965 * shall be addressed in the future.
967 ulp_rte_prsr_mask_ignore(params, &idx,
968 &ipv4_mask->hdr.type_of_service,
969 sizeof(ipv4_mask->hdr.type_of_service)
972 ulp_rte_prsr_mask_copy(params, &idx,
973 &ipv4_mask->hdr.total_length,
974 sizeof(ipv4_mask->hdr.total_length));
975 ulp_rte_prsr_mask_copy(params, &idx,
976 &ipv4_mask->hdr.packet_id,
977 sizeof(ipv4_mask->hdr.packet_id));
978 ulp_rte_prsr_mask_copy(params, &idx,
979 &ipv4_mask->hdr.fragment_offset,
980 sizeof(ipv4_mask->hdr.fragment_offset));
981 ulp_rte_prsr_mask_copy(params, &idx,
982 &ipv4_mask->hdr.time_to_live,
983 sizeof(ipv4_mask->hdr.time_to_live));
984 ulp_rte_prsr_mask_copy(params, &idx,
985 &ipv4_mask->hdr.next_proto_id,
986 sizeof(ipv4_mask->hdr.next_proto_id));
987 ulp_rte_prsr_mask_copy(params, &idx,
988 &ipv4_mask->hdr.hdr_checksum,
989 sizeof(ipv4_mask->hdr.hdr_checksum));
990 ulp_rte_prsr_mask_copy(params, &idx,
991 &ipv4_mask->hdr.src_addr,
992 sizeof(ipv4_mask->hdr.src_addr));
993 ulp_rte_prsr_mask_copy(params, &idx,
994 &ipv4_mask->hdr.dst_addr,
995 sizeof(ipv4_mask->hdr.dst_addr));
997 /* Add the number of ipv4 header elements */
998 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1000 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1007 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1011 if (proto == IPPROTO_GRE)
1012 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1014 /* Update the field protocol hdr bitmap */
1015 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1016 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1017 return BNXT_TF_RC_SUCCESS;
1020 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1022 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1023 struct ulp_rte_parser_params *params)
1025 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1026 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1027 struct ulp_rte_hdr_field *field;
1028 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1029 uint32_t idx = params->field_idx;
1031 uint32_t vtcf, vtcf_mask;
1033 uint32_t inner_flag = 0;
1036 /* validate there are no 3rd L3 header */
1037 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1039 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1040 return BNXT_TF_RC_ERROR;
1043 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1044 BNXT_ULP_HDR_BIT_O_ETH) &&
1045 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1046 BNXT_ULP_HDR_BIT_I_ETH)) {
1047 /* Since F2 flow does not include eth item, when parser detects
1048 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1049 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1050 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1051 * This will allow the parser post processor to update the
1052 * t_dmac in hdr_field[o_eth.dmac]
1054 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1055 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1056 params->field_idx = idx;
1060 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1064 size = sizeof(ipv6_spec->hdr.vtc_flow);
1066 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1067 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1071 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1072 field = ulp_rte_parser_fld_copy(field,
1076 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1077 field = ulp_rte_parser_fld_copy(field,
1081 size = sizeof(ipv6_spec->hdr.payload_len);
1082 field = ulp_rte_parser_fld_copy(field,
1083 &ipv6_spec->hdr.payload_len,
1085 size = sizeof(ipv6_spec->hdr.proto);
1086 field = ulp_rte_parser_fld_copy(field,
1087 &ipv6_spec->hdr.proto,
1089 proto = ipv6_spec->hdr.proto;
1090 size = sizeof(ipv6_spec->hdr.hop_limits);
1091 field = ulp_rte_parser_fld_copy(field,
1092 &ipv6_spec->hdr.hop_limits,
1094 size = sizeof(ipv6_spec->hdr.src_addr);
1095 field = ulp_rte_parser_fld_copy(field,
1096 &ipv6_spec->hdr.src_addr,
1098 size = sizeof(ipv6_spec->hdr.dst_addr);
1099 field = ulp_rte_parser_fld_copy(field,
1100 &ipv6_spec->hdr.dst_addr,
1104 size = sizeof(ipv6_mask->hdr.vtc_flow);
1106 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1107 ulp_rte_prsr_mask_copy(params, &idx,
1111 * The TC and flow label field are ignored since OVS is setting
1112 * it for match and it is not supported.
1113 * This is a work around and
1114 * shall be addressed in the future.
1116 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1117 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1119 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1120 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1122 ulp_rte_prsr_mask_copy(params, &idx,
1123 &ipv6_mask->hdr.payload_len,
1124 sizeof(ipv6_mask->hdr.payload_len));
1125 ulp_rte_prsr_mask_copy(params, &idx,
1126 &ipv6_mask->hdr.proto,
1127 sizeof(ipv6_mask->hdr.proto));
1128 ulp_rte_prsr_mask_copy(params, &idx,
1129 &ipv6_mask->hdr.hop_limits,
1130 sizeof(ipv6_mask->hdr.hop_limits));
1131 ulp_rte_prsr_mask_copy(params, &idx,
1132 &ipv6_mask->hdr.src_addr,
1133 sizeof(ipv6_mask->hdr.src_addr));
1134 ulp_rte_prsr_mask_copy(params, &idx,
1135 &ipv6_mask->hdr.dst_addr,
1136 sizeof(ipv6_mask->hdr.dst_addr));
1138 /* add number of ipv6 header elements */
1139 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1141 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1142 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1143 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1144 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1145 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1148 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1149 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1152 if (proto == IPPROTO_GRE)
1153 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1155 /* Update the field protocol hdr bitmap */
1156 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1157 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1159 return BNXT_TF_RC_SUCCESS;
1162 /* Function to handle the update of proto header based on field values */
1164 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1167 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1168 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1169 BNXT_ULP_HDR_BIT_T_VXLAN);
1170 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1174 /* Function to handle the parsing of RTE Flow item UDP Header. */
1176 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1177 struct ulp_rte_parser_params *params)
1179 const struct rte_flow_item_udp *udp_spec = item->spec;
1180 const struct rte_flow_item_udp *udp_mask = item->mask;
1181 struct ulp_rte_hdr_field *field;
1182 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1183 uint32_t idx = params->field_idx;
1185 uint16_t dport = 0, sport = 0;
1188 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1190 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1191 return BNXT_TF_RC_ERROR;
1195 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1199 size = sizeof(udp_spec->hdr.src_port);
1200 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1201 &udp_spec->hdr.src_port,
1203 sport = udp_spec->hdr.src_port;
1204 size = sizeof(udp_spec->hdr.dst_port);
1205 field = ulp_rte_parser_fld_copy(field,
1206 &udp_spec->hdr.dst_port,
1208 dport = udp_spec->hdr.dst_port;
1209 size = sizeof(udp_spec->hdr.dgram_len);
1210 field = ulp_rte_parser_fld_copy(field,
1211 &udp_spec->hdr.dgram_len,
1213 size = sizeof(udp_spec->hdr.dgram_cksum);
1214 field = ulp_rte_parser_fld_copy(field,
1215 &udp_spec->hdr.dgram_cksum,
1219 ulp_rte_prsr_mask_copy(params, &idx,
1220 &udp_mask->hdr.src_port,
1221 sizeof(udp_mask->hdr.src_port));
1222 ulp_rte_prsr_mask_copy(params, &idx,
1223 &udp_mask->hdr.dst_port,
1224 sizeof(udp_mask->hdr.dst_port));
1225 ulp_rte_prsr_mask_copy(params, &idx,
1226 &udp_mask->hdr.dgram_len,
1227 sizeof(udp_mask->hdr.dgram_len));
1228 ulp_rte_prsr_mask_copy(params, &idx,
1229 &udp_mask->hdr.dgram_cksum,
1230 sizeof(udp_mask->hdr.dgram_cksum));
1233 /* Add number of UDP header elements */
1234 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1236 /* Set the udp header bitmap and computed l4 header bitmaps */
1237 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1238 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1239 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1240 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1242 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1245 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1246 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1247 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1248 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1250 /* Update the field protocol hdr bitmap */
1251 ulp_rte_l4_proto_type_update(params, dport);
1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1254 return BNXT_TF_RC_SUCCESS;
1257 /* Function to handle the parsing of RTE Flow item TCP Header. */
1259 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1260 struct ulp_rte_parser_params *params)
1262 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1263 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1264 struct ulp_rte_hdr_field *field;
1265 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1266 uint32_t idx = params->field_idx;
1267 uint16_t dport = 0, sport = 0;
1271 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1273 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1274 return BNXT_TF_RC_ERROR;
1278 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1282 sport = tcp_spec->hdr.src_port;
1283 size = sizeof(tcp_spec->hdr.src_port);
1284 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1285 &tcp_spec->hdr.src_port,
1287 dport = tcp_spec->hdr.dst_port;
1288 size = sizeof(tcp_spec->hdr.dst_port);
1289 field = ulp_rte_parser_fld_copy(field,
1290 &tcp_spec->hdr.dst_port,
1292 size = sizeof(tcp_spec->hdr.sent_seq);
1293 field = ulp_rte_parser_fld_copy(field,
1294 &tcp_spec->hdr.sent_seq,
1296 size = sizeof(tcp_spec->hdr.recv_ack);
1297 field = ulp_rte_parser_fld_copy(field,
1298 &tcp_spec->hdr.recv_ack,
1300 size = sizeof(tcp_spec->hdr.data_off);
1301 field = ulp_rte_parser_fld_copy(field,
1302 &tcp_spec->hdr.data_off,
1304 size = sizeof(tcp_spec->hdr.tcp_flags);
1305 field = ulp_rte_parser_fld_copy(field,
1306 &tcp_spec->hdr.tcp_flags,
1308 size = sizeof(tcp_spec->hdr.rx_win);
1309 field = ulp_rte_parser_fld_copy(field,
1310 &tcp_spec->hdr.rx_win,
1312 size = sizeof(tcp_spec->hdr.cksum);
1313 field = ulp_rte_parser_fld_copy(field,
1314 &tcp_spec->hdr.cksum,
1316 size = sizeof(tcp_spec->hdr.tcp_urp);
1317 field = ulp_rte_parser_fld_copy(field,
1318 &tcp_spec->hdr.tcp_urp,
1321 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1325 ulp_rte_prsr_mask_copy(params, &idx,
1326 &tcp_mask->hdr.src_port,
1327 sizeof(tcp_mask->hdr.src_port));
1328 ulp_rte_prsr_mask_copy(params, &idx,
1329 &tcp_mask->hdr.dst_port,
1330 sizeof(tcp_mask->hdr.dst_port));
1331 ulp_rte_prsr_mask_copy(params, &idx,
1332 &tcp_mask->hdr.sent_seq,
1333 sizeof(tcp_mask->hdr.sent_seq));
1334 ulp_rte_prsr_mask_copy(params, &idx,
1335 &tcp_mask->hdr.recv_ack,
1336 sizeof(tcp_mask->hdr.recv_ack));
1337 ulp_rte_prsr_mask_copy(params, &idx,
1338 &tcp_mask->hdr.data_off,
1339 sizeof(tcp_mask->hdr.data_off));
1340 ulp_rte_prsr_mask_copy(params, &idx,
1341 &tcp_mask->hdr.tcp_flags,
1342 sizeof(tcp_mask->hdr.tcp_flags));
1343 ulp_rte_prsr_mask_copy(params, &idx,
1344 &tcp_mask->hdr.rx_win,
1345 sizeof(tcp_mask->hdr.rx_win));
1346 ulp_rte_prsr_mask_copy(params, &idx,
1347 &tcp_mask->hdr.cksum,
1348 sizeof(tcp_mask->hdr.cksum));
1349 ulp_rte_prsr_mask_copy(params, &idx,
1350 &tcp_mask->hdr.tcp_urp,
1351 sizeof(tcp_mask->hdr.tcp_urp));
1353 /* add number of TCP header elements */
1354 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1356 /* Set the udp header bitmap and computed l4 header bitmaps */
1357 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1358 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1359 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1360 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1361 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1362 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1364 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1365 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1366 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1367 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1369 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1370 return BNXT_TF_RC_SUCCESS;
1373 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1375 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1376 struct ulp_rte_parser_params *params)
1378 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1379 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1380 struct ulp_rte_hdr_field *field;
1381 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1382 uint32_t idx = params->field_idx;
1386 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1390 size = sizeof(vxlan_spec->flags);
1391 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1394 size = sizeof(vxlan_spec->rsvd0);
1395 field = ulp_rte_parser_fld_copy(field,
1398 size = sizeof(vxlan_spec->vni);
1399 field = ulp_rte_parser_fld_copy(field,
1402 size = sizeof(vxlan_spec->rsvd1);
1403 field = ulp_rte_parser_fld_copy(field,
1408 ulp_rte_prsr_mask_copy(params, &idx,
1410 sizeof(vxlan_mask->flags));
1411 ulp_rte_prsr_mask_copy(params, &idx,
1413 sizeof(vxlan_mask->rsvd0));
1414 ulp_rte_prsr_mask_copy(params, &idx,
1416 sizeof(vxlan_mask->vni));
1417 ulp_rte_prsr_mask_copy(params, &idx,
1419 sizeof(vxlan_mask->rsvd1));
1421 /* Add number of vxlan header elements */
1422 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1424 /* Update the hdr_bitmap with vxlan */
1425 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1426 return BNXT_TF_RC_SUCCESS;
1429 /* Function to handle the parsing of RTE Flow item GRE Header. */
1431 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1432 struct ulp_rte_parser_params *params)
1434 const struct rte_flow_item_gre *gre_spec = item->spec;
1435 const struct rte_flow_item_gre *gre_mask = item->mask;
1436 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1437 uint32_t idx = params->field_idx;
1439 struct ulp_rte_hdr_field *field;
1441 if (!gre_spec && !gre_mask) {
1442 BNXT_TF_DBG(ERR, "Parse Error: GRE item is invalid\n");
1443 return BNXT_TF_RC_ERROR;
1447 size = sizeof(gre_spec->c_rsvd0_ver);
1448 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1449 &gre_spec->c_rsvd0_ver,
1451 size = sizeof(gre_spec->protocol);
1452 field = ulp_rte_parser_fld_copy(field,
1453 &gre_spec->protocol,
1457 ulp_rte_prsr_mask_copy(params, &idx,
1458 &gre_mask->c_rsvd0_ver,
1459 sizeof(gre_mask->c_rsvd0_ver));
1460 ulp_rte_prsr_mask_copy(params, &idx,
1461 &gre_mask->protocol,
1462 sizeof(gre_mask->protocol));
1464 /* Add number of GRE header elements */
1465 params->field_idx += BNXT_ULP_PROTO_HDR_GRE_NUM;
1467 /* Update the hdr_bitmap with GRE */
1468 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1469 return BNXT_TF_RC_SUCCESS;
1472 /* Function to handle the parsing of RTE Flow item ANY. */
1474 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1475 struct ulp_rte_parser_params *params __rte_unused)
1477 return BNXT_TF_RC_SUCCESS;
1480 /* Function to handle the parsing of RTE Flow item void Header */
1482 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1483 struct ulp_rte_parser_params *params __rte_unused)
1485 return BNXT_TF_RC_SUCCESS;
1488 /* Function to handle the parsing of RTE Flow action void Header. */
1490 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1491 struct ulp_rte_parser_params *params __rte_unused)
1493 return BNXT_TF_RC_SUCCESS;
1496 /* Function to handle the parsing of RTE Flow action Mark Header. */
1498 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1499 struct ulp_rte_parser_params *param)
1501 const struct rte_flow_action_mark *mark;
1502 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1505 mark = action_item->conf;
1507 mark_id = tfp_cpu_to_be_32(mark->id);
1508 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1509 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1511 /* Update the hdr_bitmap with vxlan */
1512 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1513 return BNXT_TF_RC_SUCCESS;
1515 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1516 return BNXT_TF_RC_ERROR;
1519 /* Function to handle the parsing of RTE Flow action RSS Header. */
1521 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1522 struct ulp_rte_parser_params *param)
1524 const struct rte_flow_action_rss *rss = action_item->conf;
1527 /* Update the hdr_bitmap with vxlan */
1528 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1529 return BNXT_TF_RC_SUCCESS;
1531 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1532 return BNXT_TF_RC_ERROR;
1535 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1537 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1538 struct ulp_rte_parser_params *params)
1540 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1541 const struct rte_flow_item *item;
1542 const struct rte_flow_item_eth *eth_spec;
1543 const struct rte_flow_item_ipv4 *ipv4_spec;
1544 const struct rte_flow_item_ipv6 *ipv6_spec;
1545 struct rte_flow_item_vxlan vxlan_spec;
1546 uint32_t vlan_num = 0, vlan_size = 0;
1547 uint32_t ip_size = 0, ip_type = 0;
1548 uint32_t vxlan_size = 0;
1550 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1551 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1553 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1554 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1556 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1557 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1558 const uint8_t *tmp_buff;
1560 vxlan_encap = action_item->conf;
1562 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1563 return BNXT_TF_RC_ERROR;
1566 item = vxlan_encap->definition;
1568 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1569 return BNXT_TF_RC_ERROR;
1572 if (!ulp_rte_item_skip_void(&item, 0))
1573 return BNXT_TF_RC_ERROR;
1575 /* must have ethernet header */
1576 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1577 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1578 return BNXT_TF_RC_ERROR;
1580 eth_spec = item->spec;
1581 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1582 ulp_encap_buffer_copy(buff,
1583 eth_spec->dst.addr_bytes,
1584 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1585 ULP_BUFFER_ALIGN_8_BYTE);
1587 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1588 ulp_encap_buffer_copy(buff,
1589 eth_spec->src.addr_bytes,
1590 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1591 ULP_BUFFER_ALIGN_8_BYTE);
1593 /* Goto the next item */
1594 if (!ulp_rte_item_skip_void(&item, 1))
1595 return BNXT_TF_RC_ERROR;
1597 /* May have vlan header */
1598 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1600 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1601 ulp_encap_buffer_copy(buff,
1603 sizeof(struct rte_flow_item_vlan),
1604 ULP_BUFFER_ALIGN_8_BYTE);
1606 if (!ulp_rte_item_skip_void(&item, 1))
1607 return BNXT_TF_RC_ERROR;
1610 /* may have two vlan headers */
1611 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1613 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1614 sizeof(struct rte_flow_item_vlan)],
1616 sizeof(struct rte_flow_item_vlan));
1617 if (!ulp_rte_item_skip_void(&item, 1))
1618 return BNXT_TF_RC_ERROR;
1620 /* Update the vlan count and size of more than one */
1622 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1623 vlan_num = tfp_cpu_to_be_32(vlan_num);
1624 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1627 vlan_size = tfp_cpu_to_be_32(vlan_size);
1628 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1633 /* L3 must be IPv4, IPv6 */
1634 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1635 ipv4_spec = item->spec;
1636 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1638 /* copy the ipv4 details */
1639 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1640 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1641 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1642 ulp_encap_buffer_copy(buff,
1644 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1645 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1646 ULP_BUFFER_ALIGN_8_BYTE);
1648 /* Total length being ignored in the ip hdr. */
1649 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1650 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1651 ulp_encap_buffer_copy(buff,
1653 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1654 ULP_BUFFER_ALIGN_8_BYTE);
1655 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1656 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1657 ulp_encap_buffer_copy(buff,
1658 &ipv4_spec->hdr.version_ihl,
1659 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1660 ULP_BUFFER_ALIGN_8_BYTE);
1663 /* Update the dst ip address in ip encap buffer */
1664 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1665 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1666 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1667 ulp_encap_buffer_copy(buff,
1668 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1669 sizeof(ipv4_spec->hdr.dst_addr),
1670 ULP_BUFFER_ALIGN_8_BYTE);
1672 /* Update the src ip address */
1673 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1674 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1675 sizeof(ipv4_spec->hdr.src_addr)];
1676 ulp_encap_buffer_copy(buff,
1677 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1678 sizeof(ipv4_spec->hdr.src_addr),
1679 ULP_BUFFER_ALIGN_8_BYTE);
1681 /* Update the ip size details */
1682 ip_size = tfp_cpu_to_be_32(ip_size);
1683 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1684 &ip_size, sizeof(uint32_t));
1686 /* update the ip type */
1687 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1688 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1689 &ip_type, sizeof(uint32_t));
1691 /* update the computed field to notify it is ipv4 header */
1692 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1695 if (!ulp_rte_item_skip_void(&item, 1))
1696 return BNXT_TF_RC_ERROR;
1697 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1698 ipv6_spec = item->spec;
1699 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1701 /* copy the ipv6 details */
1702 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1703 if (ulp_buffer_is_empty(tmp_buff,
1704 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1705 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1706 ulp_encap_buffer_copy(buff,
1708 sizeof(def_ipv6_hdr),
1709 ULP_BUFFER_ALIGN_8_BYTE);
1711 /* The payload length being ignored in the ip hdr. */
1712 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1713 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1714 ulp_encap_buffer_copy(buff,
1716 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1717 ULP_BUFFER_ALIGN_8_BYTE);
1718 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1719 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1720 BNXT_ULP_ENCAP_IPV6_DO];
1721 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1722 ulp_encap_buffer_copy(buff,
1724 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1725 ULP_BUFFER_ALIGN_8_BYTE);
1727 /* Update the dst ip address in ip encap buffer */
1728 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1729 sizeof(def_ipv6_hdr)];
1730 ulp_encap_buffer_copy(buff,
1731 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1732 sizeof(ipv6_spec->hdr.dst_addr),
1733 ULP_BUFFER_ALIGN_8_BYTE);
1735 /* Update the src ip address */
1736 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1737 ulp_encap_buffer_copy(buff,
1738 (const uint8_t *)ipv6_spec->hdr.src_addr,
1739 sizeof(ipv6_spec->hdr.src_addr),
1740 ULP_BUFFER_ALIGN_16_BYTE);
1742 /* Update the ip size details */
1743 ip_size = tfp_cpu_to_be_32(ip_size);
1744 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1745 &ip_size, sizeof(uint32_t));
1747 /* update the ip type */
1748 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1749 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1750 &ip_type, sizeof(uint32_t));
1752 /* update the computed field to notify it is ipv6 header */
1753 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1756 if (!ulp_rte_item_skip_void(&item, 1))
1757 return BNXT_TF_RC_ERROR;
1759 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1760 return BNXT_TF_RC_ERROR;
1764 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1765 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1766 return BNXT_TF_RC_ERROR;
1768 /* copy the udp details */
1769 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1770 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1771 ULP_BUFFER_ALIGN_8_BYTE);
1773 if (!ulp_rte_item_skip_void(&item, 1))
1774 return BNXT_TF_RC_ERROR;
1777 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1778 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1779 return BNXT_TF_RC_ERROR;
1781 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1782 /* copy the vxlan details */
1783 memcpy(&vxlan_spec, item->spec, vxlan_size);
1784 vxlan_spec.flags = 0x08;
1785 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1786 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1787 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1788 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1790 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1791 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1792 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1793 (const uint8_t *)&vxlan_spec.vni,
1794 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1796 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1797 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1798 &vxlan_size, sizeof(uint32_t));
1800 /* update the hdr_bitmap with vxlan */
1801 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1802 return BNXT_TF_RC_SUCCESS;
1805 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1807 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1809 struct ulp_rte_parser_params *params)
1811 /* update the hdr_bitmap with vxlan */
1812 ULP_BITMAP_SET(params->act_bitmap.bits,
1813 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1814 /* Update computational field with tunnel decap info */
1815 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1817 return BNXT_TF_RC_SUCCESS;
1820 /* Function to handle the parsing of RTE Flow action drop Header. */
1822 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1823 struct ulp_rte_parser_params *params)
1825 /* Update the hdr_bitmap with drop */
1826 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1827 return BNXT_TF_RC_SUCCESS;
1830 /* Function to handle the parsing of RTE Flow action count. */
1832 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1833 struct ulp_rte_parser_params *params)
1836 const struct rte_flow_action_count *act_count;
1837 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1839 act_count = action_item->conf;
1841 if (act_count->shared) {
1843 "Parse Error:Shared count not supported\n");
1844 return BNXT_TF_RC_PARSE_ERR;
1846 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1848 BNXT_ULP_ACT_PROP_SZ_COUNT);
1851 /* Update the hdr_bitmap with count */
1852 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1853 return BNXT_TF_RC_SUCCESS;
1856 /* Function to handle the parsing of action ports. */
1858 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1861 enum bnxt_ulp_direction_type dir;
1864 struct ulp_rte_act_prop *act = ¶m->act_prop;
1865 enum bnxt_ulp_intf_type port_type;
1868 /* Get the direction */
1869 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1870 if (dir == BNXT_ULP_DIR_EGRESS) {
1871 /* For egress direction, fill vport */
1872 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1873 return BNXT_TF_RC_ERROR;
1876 pid = rte_cpu_to_be_32(pid);
1877 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1878 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1880 /* For ingress direction, fill vnic */
1881 port_type = ULP_COMP_FLD_IDX_RD(param,
1882 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1883 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1884 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1886 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1888 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1890 return BNXT_TF_RC_ERROR;
1893 pid = rte_cpu_to_be_32(pid);
1894 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1895 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1898 /* Update the action port set bit */
1899 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1900 return BNXT_TF_RC_SUCCESS;
1903 /* Function to handle the parsing of RTE Flow action PF. */
1905 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1906 struct ulp_rte_parser_params *params)
1910 enum bnxt_ulp_intf_type intf_type;
1912 /* Get the port id of the current device */
1913 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1915 /* Get the port db ifindex */
1916 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1918 BNXT_TF_DBG(ERR, "Invalid port id\n");
1919 return BNXT_TF_RC_ERROR;
1922 /* Check the port is PF port */
1923 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1924 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1925 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1926 return BNXT_TF_RC_ERROR;
1928 /* Update the action properties */
1929 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1930 return ulp_rte_parser_act_port_set(params, ifindex);
1933 /* Function to handle the parsing of RTE Flow action VF. */
1935 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1936 struct ulp_rte_parser_params *params)
1938 const struct rte_flow_action_vf *vf_action;
1939 enum bnxt_ulp_intf_type intf_type;
1943 vf_action = action_item->conf;
1945 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1946 return BNXT_TF_RC_PARSE_ERR;
1949 if (vf_action->original) {
1950 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1951 return BNXT_TF_RC_PARSE_ERR;
1954 bp = bnxt_get_bp(params->port_id);
1956 BNXT_TF_DBG(ERR, "Invalid bp\n");
1957 return BNXT_TF_RC_ERROR;
1960 /* vf_action->id is a logical number which in this case is an
1961 * offset from the first VF. So, to get the absolute VF id, the
1962 * offset must be added to the absolute first vf id of that port.
1964 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
1965 bp->first_vf_id + vf_action->id,
1967 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1968 return BNXT_TF_RC_ERROR;
1970 /* Check the port is VF port */
1971 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1972 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1973 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1974 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1975 return BNXT_TF_RC_ERROR;
1978 /* Update the action properties */
1979 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1980 return ulp_rte_parser_act_port_set(params, ifindex);
1983 /* Function to handle the parsing of RTE Flow action port_id. */
1985 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1986 struct ulp_rte_parser_params *param)
1988 const struct rte_flow_action_port_id *port_id = act_item->conf;
1990 enum bnxt_ulp_intf_type intf_type;
1994 "ParseErr: Invalid Argument\n");
1995 return BNXT_TF_RC_PARSE_ERR;
1997 if (port_id->original) {
1999 "ParseErr:Portid Original not supported\n");
2000 return BNXT_TF_RC_PARSE_ERR;
2003 /* Get the port db ifindex */
2004 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2006 BNXT_TF_DBG(ERR, "Invalid port id\n");
2007 return BNXT_TF_RC_ERROR;
2010 /* Get the intf type */
2011 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2013 BNXT_TF_DBG(ERR, "Invalid port type\n");
2014 return BNXT_TF_RC_ERROR;
2017 /* Set the action port */
2018 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2019 return ulp_rte_parser_act_port_set(param, ifindex);
2022 /* Function to handle the parsing of RTE Flow action phy_port. */
2024 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2025 struct ulp_rte_parser_params *prm)
2027 const struct rte_flow_action_phy_port *phy_port;
2031 enum bnxt_ulp_direction_type dir;
2033 phy_port = action_item->conf;
2036 "ParseErr: Invalid Argument\n");
2037 return BNXT_TF_RC_PARSE_ERR;
2040 if (phy_port->original) {
2042 "Parse Err:Port Original not supported\n");
2043 return BNXT_TF_RC_PARSE_ERR;
2045 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2046 if (dir != BNXT_ULP_DIR_EGRESS) {
2048 "Parse Err:Phy ports are valid only for egress\n");
2049 return BNXT_TF_RC_PARSE_ERR;
2051 /* Get the physical port details from port db */
2052 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2055 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2060 pid = rte_cpu_to_be_32(pid);
2061 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2062 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2064 /* Update the action port set bit */
2065 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2066 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2067 BNXT_ULP_INTF_TYPE_PHY_PORT);
2068 return BNXT_TF_RC_SUCCESS;
2071 /* Function to handle the parsing of RTE Flow action pop vlan. */
2073 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2074 struct ulp_rte_parser_params *params)
2076 /* Update the act_bitmap with pop */
2077 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2078 return BNXT_TF_RC_SUCCESS;
2081 /* Function to handle the parsing of RTE Flow action push vlan. */
2083 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2084 struct ulp_rte_parser_params *params)
2086 const struct rte_flow_action_of_push_vlan *push_vlan;
2088 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2090 push_vlan = action_item->conf;
2092 ethertype = push_vlan->ethertype;
2093 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2095 "Parse Err: Ethertype not supported\n");
2096 return BNXT_TF_RC_PARSE_ERR;
2098 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2099 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2100 /* Update the hdr_bitmap with push vlan */
2101 ULP_BITMAP_SET(params->act_bitmap.bits,
2102 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2103 return BNXT_TF_RC_SUCCESS;
2105 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2106 return BNXT_TF_RC_ERROR;
2109 /* Function to handle the parsing of RTE Flow action set vlan id. */
2111 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2112 struct ulp_rte_parser_params *params)
2114 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2116 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2118 vlan_vid = action_item->conf;
2119 if (vlan_vid && vlan_vid->vlan_vid) {
2120 vid = vlan_vid->vlan_vid;
2121 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2122 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2123 /* Update the hdr_bitmap with vlan vid */
2124 ULP_BITMAP_SET(params->act_bitmap.bits,
2125 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2126 return BNXT_TF_RC_SUCCESS;
2128 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2129 return BNXT_TF_RC_ERROR;
2132 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2134 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2135 struct ulp_rte_parser_params *params)
2137 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2139 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2141 vlan_pcp = action_item->conf;
2143 pcp = vlan_pcp->vlan_pcp;
2144 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2145 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2146 /* Update the hdr_bitmap with vlan vid */
2147 ULP_BITMAP_SET(params->act_bitmap.bits,
2148 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2149 return BNXT_TF_RC_SUCCESS;
2151 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2152 return BNXT_TF_RC_ERROR;
2155 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2157 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2158 struct ulp_rte_parser_params *params)
2160 const struct rte_flow_action_set_ipv4 *set_ipv4;
2161 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2163 set_ipv4 = action_item->conf;
2165 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2166 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2167 /* Update the hdr_bitmap with set ipv4 src */
2168 ULP_BITMAP_SET(params->act_bitmap.bits,
2169 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2170 return BNXT_TF_RC_SUCCESS;
2172 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2173 return BNXT_TF_RC_ERROR;
2176 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2178 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2179 struct ulp_rte_parser_params *params)
2181 const struct rte_flow_action_set_ipv4 *set_ipv4;
2182 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2184 set_ipv4 = action_item->conf;
2186 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2187 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2188 /* Update the hdr_bitmap with set ipv4 dst */
2189 ULP_BITMAP_SET(params->act_bitmap.bits,
2190 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2191 return BNXT_TF_RC_SUCCESS;
2193 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2194 return BNXT_TF_RC_ERROR;
2197 /* Function to handle the parsing of RTE Flow action set tp src.*/
2199 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2200 struct ulp_rte_parser_params *params)
2202 const struct rte_flow_action_set_tp *set_tp;
2203 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2205 set_tp = action_item->conf;
2207 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2208 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2209 /* Update the hdr_bitmap with set tp src */
2210 ULP_BITMAP_SET(params->act_bitmap.bits,
2211 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2212 return BNXT_TF_RC_SUCCESS;
2215 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2216 return BNXT_TF_RC_ERROR;
2219 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2221 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2222 struct ulp_rte_parser_params *params)
2224 const struct rte_flow_action_set_tp *set_tp;
2225 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2227 set_tp = action_item->conf;
2229 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2230 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2231 /* Update the hdr_bitmap with set tp dst */
2232 ULP_BITMAP_SET(params->act_bitmap.bits,
2233 BNXT_ULP_ACT_BIT_SET_TP_DST);
2234 return BNXT_TF_RC_SUCCESS;
2237 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2238 return BNXT_TF_RC_ERROR;
2241 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2243 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2244 struct ulp_rte_parser_params *params)
2246 /* Update the act_bitmap with dec ttl */
2247 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2248 return BNXT_TF_RC_SUCCESS;
2251 /* Function to handle the parsing of RTE Flow action JUMP */
2253 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2254 struct ulp_rte_parser_params *params)
2256 /* Update the act_bitmap with dec ttl */
2257 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2258 return BNXT_TF_RC_SUCCESS;