1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
19 #include "ulp_template_db_tbl.h"
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK 0x700
24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN 4789
27 /* Utility function to skip the void items. */
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42 /* Utility function to update the field_bitmap */
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
47 struct ulp_rte_hdr_field *field;
49 field = ¶ms->hdr_field[idx];
50 if (ulp_bitmap_notzero(field->mask, field->size)) {
51 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
53 if (!ulp_bitmap_is_ones(field->mask, field->size))
54 ULP_COMP_FLD_IDX_WR(params,
55 BNXT_ULP_CF_IDX_WC_MATCH, 1);
57 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
61 /* Utility function to copy field spec items */
62 static struct ulp_rte_hdr_field *
63 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
68 memcpy(field->spec, buffer, field->size);
73 /* Utility function to copy field masks items */
75 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
80 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
82 memcpy(field->mask, buffer, size);
83 ulp_rte_parser_field_bitmap_update(params, *idx);
87 /* Utility function to ignore field masks items */
89 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
91 const void *buffer __rte_unused,
92 uint32_t size __rte_unused)
98 * Function to handle the parsing of RTE Flows and placing
99 * the RTE flow items into the ulp structures.
102 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103 struct ulp_rte_parser_params *params)
105 const struct rte_flow_item *item = pattern;
106 struct bnxt_ulp_rte_hdr_info *hdr_info;
108 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
110 /* Set the computed flags for no vlan tags before parsing */
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
114 /* Parse all the items in the pattern */
115 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116 /* get the header information from the flow_hdr_info table */
117 hdr_info = &ulp_hdr_info[item->type];
118 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
120 "Truflow parser does not support type %d\n",
122 return BNXT_TF_RC_PARSE_ERR;
123 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124 /* call the registered callback handler */
125 if (hdr_info->proto_hdr_func) {
126 if (hdr_info->proto_hdr_func(item, params) !=
127 BNXT_TF_RC_SUCCESS) {
128 return BNXT_TF_RC_ERROR;
134 /* update the implied SVIF */
135 return ulp_rte_parser_implicit_match_port_process(params);
139 * Function to handle the parsing of RTE Flows and placing
140 * the RTE flow actions into the ulp structures.
143 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144 struct ulp_rte_parser_params *params)
146 const struct rte_flow_action *action_item = actions;
147 struct bnxt_ulp_rte_act_info *hdr_info;
149 /* Parse all the items in the pattern */
150 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151 /* get the header information from the flow_hdr_info table */
152 hdr_info = &ulp_act_info[action_item->type];
153 if (hdr_info->act_type ==
154 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
156 "Truflow parser does not support act %u\n",
158 return BNXT_TF_RC_ERROR;
159 } else if (hdr_info->act_type ==
160 BNXT_ULP_ACT_TYPE_SUPPORTED) {
161 /* call the registered callback handler */
162 if (hdr_info->proto_act_func) {
163 if (hdr_info->proto_act_func(action_item,
165 BNXT_TF_RC_SUCCESS) {
166 return BNXT_TF_RC_ERROR;
172 /* update the implied port details */
173 ulp_rte_parser_implicit_act_port_process(params);
174 return BNXT_TF_RC_SUCCESS;
178 * Function to handle the post processing of the computed
179 * fields for the interface.
182 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
185 uint16_t port_id, parif;
187 enum bnxt_ulp_direction_type dir;
189 /* get the direction details */
190 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
192 /* read the port id details */
193 port_id = ULP_COMP_FLD_IDX_RD(params,
194 BNXT_ULP_CF_IDX_INCOMING_IF);
195 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
198 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
202 if (dir == BNXT_ULP_DIR_INGRESS) {
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
209 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
212 /* Get the match port type */
213 mtype = ULP_COMP_FLD_IDX_RD(params,
214 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216 ULP_COMP_FLD_IDX_WR(params,
217 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
219 /* Set VF func PARIF */
220 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221 BNXT_ULP_VF_FUNC_PARIF,
224 "ParseErr:ifindex is not valid\n");
227 ULP_COMP_FLD_IDX_WR(params,
228 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
232 /* Set DRV func PARIF */
233 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
234 BNXT_ULP_DRV_FUNC_PARIF,
237 "ParseErr:ifindex is not valid\n");
240 ULP_COMP_FLD_IDX_WR(params,
241 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
244 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
245 ULP_COMP_FLD_IDX_WR(params,
246 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
253 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
255 enum bnxt_ulp_intf_type match_port_type, act_port_type;
256 enum bnxt_ulp_direction_type dir;
257 uint32_t act_port_set;
259 /* Get the computed details */
260 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261 match_port_type = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 act_port_type = ULP_COMP_FLD_IDX_RD(params,
264 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265 act_port_set = ULP_COMP_FLD_IDX_RD(params,
266 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
268 /* set the flow direction in the proto and action header */
269 if (dir == BNXT_ULP_DIR_EGRESS) {
270 ULP_BITMAP_SET(params->hdr_bitmap.bits,
271 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272 ULP_BITMAP_SET(params->act_bitmap.bits,
273 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
276 /* calculate the VF to VF flag */
277 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
281 /* Update the decrement ttl computational fields */
282 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283 BNXT_ULP_ACT_BIT_DEC_TTL)) {
285 * Check that vxlan proto is included and vxlan decap
286 * action is not set then decrement tunnel ttl.
287 * Similarly add GRE and NVGRE in future.
289 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290 BNXT_ULP_HDR_BIT_T_VXLAN) &&
291 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
296 ULP_COMP_FLD_IDX_WR(params,
297 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
301 /* Merge the hdr_fp_bit into the proto header bit */
302 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
304 /* Update the comp fld fid */
305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
307 /* Update the computed interface parameters */
308 bnxt_ulp_comp_fld_intf_update(params);
310 /* TBD: Handle the flow rejection scenarios */
315 * Function to handle the post processing of the parsing details
318 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
320 ulp_post_process_normal_flow(params);
321 return ulp_post_process_tun_flow(params);
325 * Function to compute the flow direction based on the match port details
328 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
330 enum bnxt_ulp_intf_type match_port_type;
332 /* Get the match port type */
333 match_port_type = ULP_COMP_FLD_IDX_RD(params,
334 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
336 /* If ingress flow and matchport is vf rep then dir is egress*/
337 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
338 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
339 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
340 BNXT_ULP_DIR_EGRESS);
342 /* Assign the input direction */
343 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
344 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345 BNXT_ULP_DIR_INGRESS);
347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
348 BNXT_ULP_DIR_EGRESS);
352 /* Function to handle the parsing of RTE Flow item PF Header. */
354 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
359 enum bnxt_ulp_direction_type dir;
360 struct ulp_rte_hdr_field *hdr_field;
361 enum bnxt_ulp_svif_type svif_type;
362 enum bnxt_ulp_intf_type port_type;
364 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
365 BNXT_ULP_INVALID_SVIF_VAL) {
367 "SVIF already set,multiple source not support'd\n");
368 return BNXT_TF_RC_ERROR;
371 /* Get port type details */
372 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
373 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
374 BNXT_TF_DBG(ERR, "Invalid port type\n");
375 return BNXT_TF_RC_ERROR;
378 /* Update the match port type */
379 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
381 /* compute the direction */
382 bnxt_ulp_rte_parser_direction_compute(params);
384 /* Get the computed direction */
385 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
386 if (dir == BNXT_ULP_DIR_INGRESS) {
387 svif_type = BNXT_ULP_PHY_PORT_SVIF;
389 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
390 svif_type = BNXT_ULP_VF_FUNC_SVIF;
392 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
394 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
396 svif = rte_cpu_to_be_16(svif);
397 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
398 memcpy(hdr_field->spec, &svif, sizeof(svif));
399 memcpy(hdr_field->mask, &mask, sizeof(mask));
400 hdr_field->size = sizeof(svif);
401 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
402 rte_be_to_cpu_16(svif));
403 return BNXT_TF_RC_SUCCESS;
406 /* Function to handle the parsing of the RTE port id */
408 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
410 uint16_t port_id = 0;
411 uint16_t svif_mask = 0xFFFF;
413 int32_t rc = BNXT_TF_RC_ERROR;
415 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
416 BNXT_ULP_INVALID_SVIF_VAL)
417 return BNXT_TF_RC_SUCCESS;
419 /* SVIF not set. So get the port id */
420 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
422 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
425 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
429 /* Update the SVIF details */
430 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
434 /* Function to handle the implicit action port id */
436 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
438 struct rte_flow_action action_item = {0};
439 struct rte_flow_action_port_id port_id = {0};
441 /* Read the action port set bit */
442 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
443 /* Already set, so just exit */
444 return BNXT_TF_RC_SUCCESS;
446 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
447 action_item.conf = &port_id;
449 /* Update the action port based on incoming port */
450 ulp_rte_port_id_act_handler(&action_item, params);
452 /* Reset the action port set bit */
453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
454 return BNXT_TF_RC_SUCCESS;
457 /* Function to handle the parsing of RTE Flow item PF Header. */
459 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
460 struct ulp_rte_parser_params *params)
462 uint16_t port_id = 0;
463 uint16_t svif_mask = 0xFFFF;
466 /* Get the implicit port id */
467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
469 /* perform the conversion from dpdk port to bnxt ifindex */
470 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
473 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
474 return BNXT_TF_RC_ERROR;
477 /* Update the SVIF details */
478 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
481 /* Function to handle the parsing of RTE Flow item VF Header. */
483 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
484 struct ulp_rte_parser_params *params)
486 const struct rte_flow_item_vf *vf_spec = item->spec;
487 const struct rte_flow_item_vf *vf_mask = item->mask;
490 int32_t rc = BNXT_TF_RC_PARSE_ERR;
492 /* Get VF rte_flow_item for Port details */
494 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
498 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
503 /* perform the conversion from VF Func id to bnxt ifindex */
504 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
507 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
510 /* Update the SVIF details */
511 return ulp_rte_parser_svif_set(params, ifindex, mask);
514 /* Function to handle the parsing of RTE Flow item port id Header. */
516 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
517 struct ulp_rte_parser_params *params)
519 const struct rte_flow_item_port_id *port_spec = item->spec;
520 const struct rte_flow_item_port_id *port_mask = item->mask;
522 int32_t rc = BNXT_TF_RC_PARSE_ERR;
526 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
530 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
533 mask = port_mask->id;
535 /* perform the conversion from dpdk port to bnxt ifindex */
536 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
539 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
542 /* Update the SVIF details */
543 return ulp_rte_parser_svif_set(params, ifindex, mask);
546 /* Function to handle the parsing of RTE Flow item phy port Header. */
548 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
549 struct ulp_rte_parser_params *params)
551 const struct rte_flow_item_phy_port *port_spec = item->spec;
552 const struct rte_flow_item_phy_port *port_mask = item->mask;
554 int32_t rc = BNXT_TF_RC_ERROR;
556 enum bnxt_ulp_direction_type dir;
557 struct ulp_rte_hdr_field *hdr_field;
559 /* Copy the rte_flow_item for phy port into hdr_field */
561 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
565 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
568 mask = port_mask->index;
570 /* Update the match port type */
571 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
572 BNXT_ULP_INTF_TYPE_PHY_PORT);
574 /* Compute the Hw direction */
575 bnxt_ulp_rte_parser_direction_compute(params);
577 /* Direction validation */
578 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
579 if (dir == BNXT_ULP_DIR_EGRESS) {
581 "Parse Err:Phy ports are valid only for ingress\n");
582 return BNXT_TF_RC_PARSE_ERR;
585 /* Get the physical port details from port db */
586 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
589 BNXT_TF_DBG(ERR, "Failed to get port details\n");
590 return BNXT_TF_RC_PARSE_ERR;
593 /* Update the SVIF details */
594 svif = rte_cpu_to_be_16(svif);
595 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
596 memcpy(hdr_field->spec, &svif, sizeof(svif));
597 memcpy(hdr_field->mask, &mask, sizeof(mask));
598 hdr_field->size = sizeof(svif);
599 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
600 rte_be_to_cpu_16(svif));
601 return BNXT_TF_RC_SUCCESS;
604 /* Function to handle the update of proto header based on field values */
606 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
607 uint16_t type, uint32_t in_flag)
609 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
611 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612 BNXT_ULP_HDR_BIT_I_IPV4);
613 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
615 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616 BNXT_ULP_HDR_BIT_O_IPV4);
617 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
619 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
621 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 BNXT_ULP_HDR_BIT_I_IPV6);
623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
625 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
626 BNXT_ULP_HDR_BIT_O_IPV6);
627 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
632 /* Internal Function to identify broadcast or multicast packets */
634 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
636 if (rte_is_multicast_ether_addr(eth_addr) ||
637 rte_is_broadcast_ether_addr(eth_addr)) {
639 "No support for bcast or mcast addr offload\n");
645 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
647 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
648 struct ulp_rte_parser_params *params)
650 const struct rte_flow_item_eth *eth_spec = item->spec;
651 const struct rte_flow_item_eth *eth_mask = item->mask;
652 struct ulp_rte_hdr_field *field;
653 uint32_t idx = params->field_idx;
655 uint16_t eth_type = 0;
656 uint32_t inner_flag = 0;
659 * Copy the rte_flow_item for eth into hdr_field using ethernet
663 size = sizeof(eth_spec->dst.addr_bytes);
664 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
665 eth_spec->dst.addr_bytes,
667 /* Todo: work around to avoid multicast and broadcast addr */
668 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
669 return BNXT_TF_RC_PARSE_ERR;
671 size = sizeof(eth_spec->src.addr_bytes);
672 field = ulp_rte_parser_fld_copy(field,
673 eth_spec->src.addr_bytes,
675 /* Todo: work around to avoid multicast and broadcast addr */
676 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
677 return BNXT_TF_RC_PARSE_ERR;
679 field = ulp_rte_parser_fld_copy(field,
681 sizeof(eth_spec->type));
682 eth_type = eth_spec->type;
685 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
686 sizeof(eth_mask->dst.addr_bytes));
687 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
688 sizeof(eth_mask->src.addr_bytes));
689 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
690 sizeof(eth_mask->type));
692 /* Add number of Eth header elements */
693 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
695 /* Update the protocol hdr bitmap */
696 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_ETH) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_IPV4) ||
700 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701 BNXT_ULP_HDR_BIT_O_IPV6) ||
702 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703 BNXT_ULP_HDR_BIT_O_UDP) ||
704 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
705 BNXT_ULP_HDR_BIT_O_TCP)) {
706 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
709 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
711 /* Update the field protocol hdr bitmap */
712 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
714 return BNXT_TF_RC_SUCCESS;
717 /* Function to handle the parsing of RTE Flow item Vlan Header. */
719 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
720 struct ulp_rte_parser_params *params)
722 const struct rte_flow_item_vlan *vlan_spec = item->spec;
723 const struct rte_flow_item_vlan *vlan_mask = item->mask;
724 struct ulp_rte_hdr_field *field;
725 struct ulp_rte_hdr_bitmap *hdr_bit;
726 uint32_t idx = params->field_idx;
727 uint16_t vlan_tag, priority;
728 uint32_t outer_vtag_num;
729 uint32_t inner_vtag_num;
730 uint16_t eth_type = 0;
731 uint32_t inner_flag = 0;
734 * Copy the rte_flow_item for vlan into hdr_field using Vlan
738 vlan_tag = ntohs(vlan_spec->tci);
739 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
740 vlan_tag &= ULP_VLAN_TAG_MASK;
741 vlan_tag = htons(vlan_tag);
743 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
746 field = ulp_rte_parser_fld_copy(field,
750 field = ulp_rte_parser_fld_copy(field,
751 &vlan_spec->inner_type,
752 sizeof(vlan_spec->inner_type));
753 eth_type = vlan_spec->inner_type;
757 vlan_tag = ntohs(vlan_mask->tci);
758 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
762 * the storage for priority and vlan tag is 2 bytes
763 * The mask of priority which is 3 bits if it is all 1's
764 * then make the rest bits 13 bits as 1's
765 * so that it is matched as exact match.
767 if (priority == ULP_VLAN_PRIORITY_MASK)
768 priority |= ~ULP_VLAN_PRIORITY_MASK;
769 if (vlan_tag == ULP_VLAN_TAG_MASK)
770 vlan_tag |= ~ULP_VLAN_TAG_MASK;
771 vlan_tag = htons(vlan_tag);
774 * The priority field is ignored since OVS is setting it as
775 * wild card match and it is not supported. This is a work
776 * around and shall be addressed in the future.
778 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
781 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
783 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
784 sizeof(vlan_mask->inner_type));
786 /* Set the field index to new incremented value */
787 params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
789 /* Get the outer tag and inner tag counts */
790 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791 BNXT_ULP_CF_IDX_O_VTAG_NUM);
792 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
793 BNXT_ULP_CF_IDX_I_VTAG_NUM);
795 /* Update the hdr_bitmap of the vlans */
796 hdr_bit = ¶ms->hdr_bitmap;
797 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
798 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
800 /* Update the vlan tag num */
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
804 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
805 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
806 ULP_BITMAP_SET(params->hdr_bitmap.bits,
807 BNXT_ULP_HDR_BIT_OO_VLAN);
808 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
809 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
810 outer_vtag_num == 1) {
811 /* update the vlan tag num */
813 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
815 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
817 ULP_BITMAP_SET(params->hdr_bitmap.bits,
818 BNXT_ULP_HDR_BIT_OI_VLAN);
819 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
820 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
822 /* update the vlan tag num */
824 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
826 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
827 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
828 ULP_BITMAP_SET(params->hdr_bitmap.bits,
829 BNXT_ULP_HDR_BIT_IO_VLAN);
831 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
832 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
833 inner_vtag_num == 1) {
834 /* update the vlan tag num */
836 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
838 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
839 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
840 ULP_BITMAP_SET(params->hdr_bitmap.bits,
841 BNXT_ULP_HDR_BIT_II_VLAN);
844 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
845 return BNXT_TF_RC_ERROR;
847 /* Update the field protocol hdr bitmap */
848 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
849 return BNXT_TF_RC_SUCCESS;
852 /* Function to handle the update of proto header based on field values */
854 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
855 uint8_t proto, uint32_t in_flag)
857 if (proto == IPPROTO_UDP) {
859 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
860 BNXT_ULP_HDR_BIT_I_UDP);
861 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
863 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
864 BNXT_ULP_HDR_BIT_O_UDP);
865 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
867 } else if (proto == IPPROTO_TCP) {
869 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
870 BNXT_ULP_HDR_BIT_I_TCP);
871 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
873 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
874 BNXT_ULP_HDR_BIT_O_TCP);
875 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
880 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
882 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
883 struct ulp_rte_parser_params *params)
885 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
886 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
887 struct ulp_rte_hdr_field *field;
888 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
889 uint32_t idx = params->field_idx;
892 uint32_t inner_flag = 0;
895 /* validate there are no 3rd L3 header */
896 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
898 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
899 return BNXT_TF_RC_ERROR;
902 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
903 BNXT_ULP_HDR_BIT_O_ETH) &&
904 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
905 BNXT_ULP_HDR_BIT_I_ETH)) {
906 /* Since F2 flow does not include eth item, when parser detects
907 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
908 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
909 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
910 * This will allow the parser post processor to update the
911 * t_dmac in hdr_field[o_eth.dmac]
913 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
914 BNXT_ULP_PROTO_HDR_VLAN_NUM);
915 params->field_idx = idx;
919 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
923 size = sizeof(ipv4_spec->hdr.version_ihl);
924 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
925 &ipv4_spec->hdr.version_ihl,
927 size = sizeof(ipv4_spec->hdr.type_of_service);
928 field = ulp_rte_parser_fld_copy(field,
929 &ipv4_spec->hdr.type_of_service,
931 size = sizeof(ipv4_spec->hdr.total_length);
932 field = ulp_rte_parser_fld_copy(field,
933 &ipv4_spec->hdr.total_length,
935 size = sizeof(ipv4_spec->hdr.packet_id);
936 field = ulp_rte_parser_fld_copy(field,
937 &ipv4_spec->hdr.packet_id,
939 size = sizeof(ipv4_spec->hdr.fragment_offset);
940 field = ulp_rte_parser_fld_copy(field,
941 &ipv4_spec->hdr.fragment_offset,
943 size = sizeof(ipv4_spec->hdr.time_to_live);
944 field = ulp_rte_parser_fld_copy(field,
945 &ipv4_spec->hdr.time_to_live,
947 size = sizeof(ipv4_spec->hdr.next_proto_id);
948 field = ulp_rte_parser_fld_copy(field,
949 &ipv4_spec->hdr.next_proto_id,
951 proto = ipv4_spec->hdr.next_proto_id;
952 size = sizeof(ipv4_spec->hdr.hdr_checksum);
953 field = ulp_rte_parser_fld_copy(field,
954 &ipv4_spec->hdr.hdr_checksum,
956 size = sizeof(ipv4_spec->hdr.src_addr);
957 field = ulp_rte_parser_fld_copy(field,
958 &ipv4_spec->hdr.src_addr,
960 size = sizeof(ipv4_spec->hdr.dst_addr);
961 field = ulp_rte_parser_fld_copy(field,
962 &ipv4_spec->hdr.dst_addr,
966 ulp_rte_prsr_mask_copy(params, &idx,
967 &ipv4_mask->hdr.version_ihl,
968 sizeof(ipv4_mask->hdr.version_ihl));
970 * The tos field is ignored since OVS is setting it as wild card
971 * match and it is not supported. This is a work around and
972 * shall be addressed in the future.
974 ulp_rte_prsr_mask_ignore(params, &idx,
975 &ipv4_mask->hdr.type_of_service,
976 sizeof(ipv4_mask->hdr.type_of_service)
979 ulp_rte_prsr_mask_copy(params, &idx,
980 &ipv4_mask->hdr.total_length,
981 sizeof(ipv4_mask->hdr.total_length));
982 ulp_rte_prsr_mask_copy(params, &idx,
983 &ipv4_mask->hdr.packet_id,
984 sizeof(ipv4_mask->hdr.packet_id));
985 ulp_rte_prsr_mask_copy(params, &idx,
986 &ipv4_mask->hdr.fragment_offset,
987 sizeof(ipv4_mask->hdr.fragment_offset));
988 ulp_rte_prsr_mask_copy(params, &idx,
989 &ipv4_mask->hdr.time_to_live,
990 sizeof(ipv4_mask->hdr.time_to_live));
991 ulp_rte_prsr_mask_copy(params, &idx,
992 &ipv4_mask->hdr.next_proto_id,
993 sizeof(ipv4_mask->hdr.next_proto_id));
994 ulp_rte_prsr_mask_copy(params, &idx,
995 &ipv4_mask->hdr.hdr_checksum,
996 sizeof(ipv4_mask->hdr.hdr_checksum));
997 ulp_rte_prsr_mask_copy(params, &idx,
998 &ipv4_mask->hdr.src_addr,
999 sizeof(ipv4_mask->hdr.src_addr));
1000 ulp_rte_prsr_mask_copy(params, &idx,
1001 &ipv4_mask->hdr.dst_addr,
1002 sizeof(ipv4_mask->hdr.dst_addr));
1004 /* Add the number of ipv4 header elements */
1005 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1007 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1008 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1009 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1010 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1011 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1014 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1015 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1018 /* Some of the PMD applications may set the protocol field
1019 * in the IPv4 spec but don't set the mask. So, consider
1020 * the mask in the proto value calculation.
1023 proto &= ipv4_mask->hdr.next_proto_id;
1025 if (proto == IPPROTO_GRE)
1026 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1028 /* Update the field protocol hdr bitmap */
1029 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1030 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1031 return BNXT_TF_RC_SUCCESS;
1034 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1036 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1037 struct ulp_rte_parser_params *params)
1039 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1040 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1041 struct ulp_rte_hdr_field *field;
1042 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1043 uint32_t idx = params->field_idx;
1045 uint32_t vtcf, vtcf_mask;
1047 uint32_t inner_flag = 0;
1050 /* validate there are no 3rd L3 header */
1051 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1053 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1054 return BNXT_TF_RC_ERROR;
1057 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1058 BNXT_ULP_HDR_BIT_O_ETH) &&
1059 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1060 BNXT_ULP_HDR_BIT_I_ETH)) {
1061 /* Since F2 flow does not include eth item, when parser detects
1062 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1063 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1064 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1065 * This will allow the parser post processor to update the
1066 * t_dmac in hdr_field[o_eth.dmac]
1068 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1069 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1070 params->field_idx = idx;
1074 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1078 size = sizeof(ipv6_spec->hdr.vtc_flow);
1080 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1081 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1085 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1086 field = ulp_rte_parser_fld_copy(field,
1090 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1091 field = ulp_rte_parser_fld_copy(field,
1095 size = sizeof(ipv6_spec->hdr.payload_len);
1096 field = ulp_rte_parser_fld_copy(field,
1097 &ipv6_spec->hdr.payload_len,
1099 size = sizeof(ipv6_spec->hdr.proto);
1100 field = ulp_rte_parser_fld_copy(field,
1101 &ipv6_spec->hdr.proto,
1103 proto = ipv6_spec->hdr.proto;
1104 size = sizeof(ipv6_spec->hdr.hop_limits);
1105 field = ulp_rte_parser_fld_copy(field,
1106 &ipv6_spec->hdr.hop_limits,
1108 size = sizeof(ipv6_spec->hdr.src_addr);
1109 field = ulp_rte_parser_fld_copy(field,
1110 &ipv6_spec->hdr.src_addr,
1112 size = sizeof(ipv6_spec->hdr.dst_addr);
1113 field = ulp_rte_parser_fld_copy(field,
1114 &ipv6_spec->hdr.dst_addr,
1118 size = sizeof(ipv6_mask->hdr.vtc_flow);
1120 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1121 ulp_rte_prsr_mask_copy(params, &idx,
1125 * The TC and flow label field are ignored since OVS is setting
1126 * it for match and it is not supported.
1127 * This is a work around and
1128 * shall be addressed in the future.
1130 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1131 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1133 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1134 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1136 ulp_rte_prsr_mask_copy(params, &idx,
1137 &ipv6_mask->hdr.payload_len,
1138 sizeof(ipv6_mask->hdr.payload_len));
1139 ulp_rte_prsr_mask_copy(params, &idx,
1140 &ipv6_mask->hdr.proto,
1141 sizeof(ipv6_mask->hdr.proto));
1142 ulp_rte_prsr_mask_copy(params, &idx,
1143 &ipv6_mask->hdr.hop_limits,
1144 sizeof(ipv6_mask->hdr.hop_limits));
1145 ulp_rte_prsr_mask_copy(params, &idx,
1146 &ipv6_mask->hdr.src_addr,
1147 sizeof(ipv6_mask->hdr.src_addr));
1148 ulp_rte_prsr_mask_copy(params, &idx,
1149 &ipv6_mask->hdr.dst_addr,
1150 sizeof(ipv6_mask->hdr.dst_addr));
1152 /* add number of ipv6 header elements */
1153 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1155 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1156 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1157 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1158 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1159 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1162 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1163 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1166 /* Some of the PMD applications may set the protocol field
1167 * in the IPv6 spec but don't set the mask. So, consider
1168 * the mask in proto value calculation.
1171 proto &= ipv6_mask->hdr.proto;
1173 if (proto == IPPROTO_GRE)
1174 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1176 /* Update the field protocol hdr bitmap */
1177 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1178 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1180 return BNXT_TF_RC_SUCCESS;
1183 /* Function to handle the update of proto header based on field values */
1185 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1188 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1189 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1190 BNXT_ULP_HDR_BIT_T_VXLAN);
1191 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1195 /* Function to handle the parsing of RTE Flow item UDP Header. */
1197 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1198 struct ulp_rte_parser_params *params)
1200 const struct rte_flow_item_udp *udp_spec = item->spec;
1201 const struct rte_flow_item_udp *udp_mask = item->mask;
1202 struct ulp_rte_hdr_field *field;
1203 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1204 uint32_t idx = params->field_idx;
1209 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1211 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1212 return BNXT_TF_RC_ERROR;
1216 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1220 size = sizeof(udp_spec->hdr.src_port);
1221 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1222 &udp_spec->hdr.src_port,
1224 size = sizeof(udp_spec->hdr.dst_port);
1225 field = ulp_rte_parser_fld_copy(field,
1226 &udp_spec->hdr.dst_port,
1228 dport = udp_spec->hdr.dst_port;
1229 size = sizeof(udp_spec->hdr.dgram_len);
1230 field = ulp_rte_parser_fld_copy(field,
1231 &udp_spec->hdr.dgram_len,
1233 size = sizeof(udp_spec->hdr.dgram_cksum);
1234 field = ulp_rte_parser_fld_copy(field,
1235 &udp_spec->hdr.dgram_cksum,
1239 ulp_rte_prsr_mask_copy(params, &idx,
1240 &udp_mask->hdr.src_port,
1241 sizeof(udp_mask->hdr.src_port));
1242 ulp_rte_prsr_mask_copy(params, &idx,
1243 &udp_mask->hdr.dst_port,
1244 sizeof(udp_mask->hdr.dst_port));
1245 ulp_rte_prsr_mask_copy(params, &idx,
1246 &udp_mask->hdr.dgram_len,
1247 sizeof(udp_mask->hdr.dgram_len));
1248 ulp_rte_prsr_mask_copy(params, &idx,
1249 &udp_mask->hdr.dgram_cksum,
1250 sizeof(udp_mask->hdr.dgram_cksum));
1253 /* Add number of UDP header elements */
1254 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1256 /* Set the udp header bitmap and computed l4 header bitmaps */
1257 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1258 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1259 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1261 if (udp_mask && udp_mask->hdr.src_port)
1262 ULP_COMP_FLD_IDX_WR(params,
1263 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1265 if (udp_mask && udp_mask->hdr.dst_port)
1266 ULP_COMP_FLD_IDX_WR(params,
1267 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1271 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1272 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1273 if (udp_mask && udp_mask->hdr.src_port)
1274 ULP_COMP_FLD_IDX_WR(params,
1275 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1277 if (udp_mask && udp_mask->hdr.dst_port)
1278 ULP_COMP_FLD_IDX_WR(params,
1279 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1282 /* Update the field protocol hdr bitmap */
1283 ulp_rte_l4_proto_type_update(params, dport);
1285 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1286 return BNXT_TF_RC_SUCCESS;
1289 /* Function to handle the parsing of RTE Flow item TCP Header. */
1291 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1292 struct ulp_rte_parser_params *params)
1294 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1295 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1296 struct ulp_rte_hdr_field *field;
1297 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1298 uint32_t idx = params->field_idx;
1302 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1304 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1305 return BNXT_TF_RC_ERROR;
1309 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1313 size = sizeof(tcp_spec->hdr.src_port);
1314 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1315 &tcp_spec->hdr.src_port,
1317 size = sizeof(tcp_spec->hdr.dst_port);
1318 field = ulp_rte_parser_fld_copy(field,
1319 &tcp_spec->hdr.dst_port,
1321 size = sizeof(tcp_spec->hdr.sent_seq);
1322 field = ulp_rte_parser_fld_copy(field,
1323 &tcp_spec->hdr.sent_seq,
1325 size = sizeof(tcp_spec->hdr.recv_ack);
1326 field = ulp_rte_parser_fld_copy(field,
1327 &tcp_spec->hdr.recv_ack,
1329 size = sizeof(tcp_spec->hdr.data_off);
1330 field = ulp_rte_parser_fld_copy(field,
1331 &tcp_spec->hdr.data_off,
1333 size = sizeof(tcp_spec->hdr.tcp_flags);
1334 field = ulp_rte_parser_fld_copy(field,
1335 &tcp_spec->hdr.tcp_flags,
1337 size = sizeof(tcp_spec->hdr.rx_win);
1338 field = ulp_rte_parser_fld_copy(field,
1339 &tcp_spec->hdr.rx_win,
1341 size = sizeof(tcp_spec->hdr.cksum);
1342 field = ulp_rte_parser_fld_copy(field,
1343 &tcp_spec->hdr.cksum,
1345 size = sizeof(tcp_spec->hdr.tcp_urp);
1346 field = ulp_rte_parser_fld_copy(field,
1347 &tcp_spec->hdr.tcp_urp,
1350 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1354 ulp_rte_prsr_mask_copy(params, &idx,
1355 &tcp_mask->hdr.src_port,
1356 sizeof(tcp_mask->hdr.src_port));
1357 ulp_rte_prsr_mask_copy(params, &idx,
1358 &tcp_mask->hdr.dst_port,
1359 sizeof(tcp_mask->hdr.dst_port));
1360 ulp_rte_prsr_mask_copy(params, &idx,
1361 &tcp_mask->hdr.sent_seq,
1362 sizeof(tcp_mask->hdr.sent_seq));
1363 ulp_rte_prsr_mask_copy(params, &idx,
1364 &tcp_mask->hdr.recv_ack,
1365 sizeof(tcp_mask->hdr.recv_ack));
1366 ulp_rte_prsr_mask_copy(params, &idx,
1367 &tcp_mask->hdr.data_off,
1368 sizeof(tcp_mask->hdr.data_off));
1369 ulp_rte_prsr_mask_copy(params, &idx,
1370 &tcp_mask->hdr.tcp_flags,
1371 sizeof(tcp_mask->hdr.tcp_flags));
1372 ulp_rte_prsr_mask_copy(params, &idx,
1373 &tcp_mask->hdr.rx_win,
1374 sizeof(tcp_mask->hdr.rx_win));
1375 ulp_rte_prsr_mask_copy(params, &idx,
1376 &tcp_mask->hdr.cksum,
1377 sizeof(tcp_mask->hdr.cksum));
1378 ulp_rte_prsr_mask_copy(params, &idx,
1379 &tcp_mask->hdr.tcp_urp,
1380 sizeof(tcp_mask->hdr.tcp_urp));
1382 /* add number of TCP header elements */
1383 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1385 /* Set the udp header bitmap and computed l4 header bitmaps */
1386 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1387 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1388 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1389 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1390 if (tcp_mask && tcp_mask->hdr.src_port)
1391 ULP_COMP_FLD_IDX_WR(params,
1392 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1394 if (tcp_mask && tcp_mask->hdr.dst_port)
1395 ULP_COMP_FLD_IDX_WR(params,
1396 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1399 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1400 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1401 if (tcp_mask && tcp_mask->hdr.src_port)
1402 ULP_COMP_FLD_IDX_WR(params,
1403 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1405 if (tcp_mask && tcp_mask->hdr.dst_port)
1406 ULP_COMP_FLD_IDX_WR(params,
1407 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1410 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1411 return BNXT_TF_RC_SUCCESS;
1414 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1416 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1417 struct ulp_rte_parser_params *params)
1419 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1420 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1421 struct ulp_rte_hdr_field *field;
1422 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1423 uint32_t idx = params->field_idx;
1427 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1431 size = sizeof(vxlan_spec->flags);
1432 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1435 size = sizeof(vxlan_spec->rsvd0);
1436 field = ulp_rte_parser_fld_copy(field,
1439 size = sizeof(vxlan_spec->vni);
1440 field = ulp_rte_parser_fld_copy(field,
1443 size = sizeof(vxlan_spec->rsvd1);
1444 field = ulp_rte_parser_fld_copy(field,
1449 ulp_rte_prsr_mask_copy(params, &idx,
1451 sizeof(vxlan_mask->flags));
1452 ulp_rte_prsr_mask_copy(params, &idx,
1454 sizeof(vxlan_mask->rsvd0));
1455 ulp_rte_prsr_mask_copy(params, &idx,
1457 sizeof(vxlan_mask->vni));
1458 ulp_rte_prsr_mask_copy(params, &idx,
1460 sizeof(vxlan_mask->rsvd1));
1462 /* Add number of vxlan header elements */
1463 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1465 /* Update the hdr_bitmap with vxlan */
1466 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1467 return BNXT_TF_RC_SUCCESS;
1470 /* Function to handle the parsing of RTE Flow item GRE Header. */
1472 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1473 struct ulp_rte_parser_params *params)
1475 const struct rte_flow_item_gre *gre_spec = item->spec;
1476 const struct rte_flow_item_gre *gre_mask = item->mask;
1477 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1478 uint32_t idx = params->field_idx;
1480 struct ulp_rte_hdr_field *field;
1482 if (!gre_spec && !gre_mask) {
1483 BNXT_TF_DBG(ERR, "Parse Error: GRE item is invalid\n");
1484 return BNXT_TF_RC_ERROR;
1488 size = sizeof(gre_spec->c_rsvd0_ver);
1489 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1490 &gre_spec->c_rsvd0_ver,
1492 size = sizeof(gre_spec->protocol);
1493 field = ulp_rte_parser_fld_copy(field,
1494 &gre_spec->protocol,
1498 ulp_rte_prsr_mask_copy(params, &idx,
1499 &gre_mask->c_rsvd0_ver,
1500 sizeof(gre_mask->c_rsvd0_ver));
1501 ulp_rte_prsr_mask_copy(params, &idx,
1502 &gre_mask->protocol,
1503 sizeof(gre_mask->protocol));
1505 /* Add number of GRE header elements */
1506 params->field_idx += BNXT_ULP_PROTO_HDR_GRE_NUM;
1508 /* Update the hdr_bitmap with GRE */
1509 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1510 return BNXT_TF_RC_SUCCESS;
1513 /* Function to handle the parsing of RTE Flow item ANY. */
1515 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1516 struct ulp_rte_parser_params *params __rte_unused)
1518 return BNXT_TF_RC_SUCCESS;
1521 /* Function to handle the parsing of RTE Flow item void Header */
1523 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1524 struct ulp_rte_parser_params *params __rte_unused)
1526 return BNXT_TF_RC_SUCCESS;
1529 /* Function to handle the parsing of RTE Flow action void Header. */
1531 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1532 struct ulp_rte_parser_params *params __rte_unused)
1534 return BNXT_TF_RC_SUCCESS;
1537 /* Function to handle the parsing of RTE Flow action Mark Header. */
1539 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1540 struct ulp_rte_parser_params *param)
1542 const struct rte_flow_action_mark *mark;
1543 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1546 mark = action_item->conf;
1548 mark_id = tfp_cpu_to_be_32(mark->id);
1549 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1550 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1552 /* Update the hdr_bitmap with vxlan */
1553 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1554 return BNXT_TF_RC_SUCCESS;
1556 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1557 return BNXT_TF_RC_ERROR;
1560 /* Function to handle the parsing of RTE Flow action RSS Header. */
1562 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1563 struct ulp_rte_parser_params *param)
1565 const struct rte_flow_action_rss *rss = action_item->conf;
1568 /* Update the hdr_bitmap with vxlan */
1569 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1570 return BNXT_TF_RC_SUCCESS;
1572 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1573 return BNXT_TF_RC_ERROR;
1576 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1578 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1579 struct ulp_rte_parser_params *params)
1581 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1582 const struct rte_flow_item *item;
1583 const struct rte_flow_item_eth *eth_spec;
1584 const struct rte_flow_item_ipv4 *ipv4_spec;
1585 const struct rte_flow_item_ipv6 *ipv6_spec;
1586 struct rte_flow_item_vxlan vxlan_spec;
1587 uint32_t vlan_num = 0, vlan_size = 0;
1588 uint32_t ip_size = 0, ip_type = 0;
1589 uint32_t vxlan_size = 0;
1591 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1592 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1594 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1595 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1597 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1598 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1599 const uint8_t *tmp_buff;
1601 vxlan_encap = action_item->conf;
1603 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1604 return BNXT_TF_RC_ERROR;
1607 item = vxlan_encap->definition;
1609 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1610 return BNXT_TF_RC_ERROR;
1613 if (!ulp_rte_item_skip_void(&item, 0))
1614 return BNXT_TF_RC_ERROR;
1616 /* must have ethernet header */
1617 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1618 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1619 return BNXT_TF_RC_ERROR;
1621 eth_spec = item->spec;
1622 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1623 ulp_encap_buffer_copy(buff,
1624 eth_spec->dst.addr_bytes,
1625 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1626 ULP_BUFFER_ALIGN_8_BYTE);
1628 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1629 ulp_encap_buffer_copy(buff,
1630 eth_spec->src.addr_bytes,
1631 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1632 ULP_BUFFER_ALIGN_8_BYTE);
1634 /* Goto the next item */
1635 if (!ulp_rte_item_skip_void(&item, 1))
1636 return BNXT_TF_RC_ERROR;
1638 /* May have vlan header */
1639 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1641 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1642 ulp_encap_buffer_copy(buff,
1644 sizeof(struct rte_flow_item_vlan),
1645 ULP_BUFFER_ALIGN_8_BYTE);
1647 if (!ulp_rte_item_skip_void(&item, 1))
1648 return BNXT_TF_RC_ERROR;
1651 /* may have two vlan headers */
1652 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1654 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1655 sizeof(struct rte_flow_item_vlan)],
1657 sizeof(struct rte_flow_item_vlan));
1658 if (!ulp_rte_item_skip_void(&item, 1))
1659 return BNXT_TF_RC_ERROR;
1661 /* Update the vlan count and size of more than one */
1663 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1664 vlan_num = tfp_cpu_to_be_32(vlan_num);
1665 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1668 vlan_size = tfp_cpu_to_be_32(vlan_size);
1669 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1674 /* L3 must be IPv4, IPv6 */
1675 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1676 ipv4_spec = item->spec;
1677 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1679 /* copy the ipv4 details */
1680 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1681 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1682 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1683 ulp_encap_buffer_copy(buff,
1685 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1686 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1687 ULP_BUFFER_ALIGN_8_BYTE);
1689 /* Total length being ignored in the ip hdr. */
1690 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1691 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1692 ulp_encap_buffer_copy(buff,
1694 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1695 ULP_BUFFER_ALIGN_8_BYTE);
1696 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1697 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1698 ulp_encap_buffer_copy(buff,
1699 &ipv4_spec->hdr.version_ihl,
1700 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1701 ULP_BUFFER_ALIGN_8_BYTE);
1704 /* Update the dst ip address in ip encap buffer */
1705 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1706 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1707 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1708 ulp_encap_buffer_copy(buff,
1709 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1710 sizeof(ipv4_spec->hdr.dst_addr),
1711 ULP_BUFFER_ALIGN_8_BYTE);
1713 /* Update the src ip address */
1714 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1715 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1716 sizeof(ipv4_spec->hdr.src_addr)];
1717 ulp_encap_buffer_copy(buff,
1718 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1719 sizeof(ipv4_spec->hdr.src_addr),
1720 ULP_BUFFER_ALIGN_8_BYTE);
1722 /* Update the ip size details */
1723 ip_size = tfp_cpu_to_be_32(ip_size);
1724 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1725 &ip_size, sizeof(uint32_t));
1727 /* update the ip type */
1728 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1729 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1730 &ip_type, sizeof(uint32_t));
1732 /* update the computed field to notify it is ipv4 header */
1733 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1736 if (!ulp_rte_item_skip_void(&item, 1))
1737 return BNXT_TF_RC_ERROR;
1738 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1739 ipv6_spec = item->spec;
1740 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1742 /* copy the ipv6 details */
1743 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1744 if (ulp_buffer_is_empty(tmp_buff,
1745 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1746 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1747 ulp_encap_buffer_copy(buff,
1749 sizeof(def_ipv6_hdr),
1750 ULP_BUFFER_ALIGN_8_BYTE);
1752 /* The payload length being ignored in the ip hdr. */
1753 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1754 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1755 ulp_encap_buffer_copy(buff,
1757 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1758 ULP_BUFFER_ALIGN_8_BYTE);
1759 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1760 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1761 BNXT_ULP_ENCAP_IPV6_DO];
1762 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1763 ulp_encap_buffer_copy(buff,
1765 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1766 ULP_BUFFER_ALIGN_8_BYTE);
1768 /* Update the dst ip address in ip encap buffer */
1769 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1770 sizeof(def_ipv6_hdr)];
1771 ulp_encap_buffer_copy(buff,
1772 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1773 sizeof(ipv6_spec->hdr.dst_addr),
1774 ULP_BUFFER_ALIGN_8_BYTE);
1776 /* Update the src ip address */
1777 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1778 ulp_encap_buffer_copy(buff,
1779 (const uint8_t *)ipv6_spec->hdr.src_addr,
1780 sizeof(ipv6_spec->hdr.src_addr),
1781 ULP_BUFFER_ALIGN_16_BYTE);
1783 /* Update the ip size details */
1784 ip_size = tfp_cpu_to_be_32(ip_size);
1785 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1786 &ip_size, sizeof(uint32_t));
1788 /* update the ip type */
1789 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1790 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1791 &ip_type, sizeof(uint32_t));
1793 /* update the computed field to notify it is ipv6 header */
1794 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1797 if (!ulp_rte_item_skip_void(&item, 1))
1798 return BNXT_TF_RC_ERROR;
1800 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1801 return BNXT_TF_RC_ERROR;
1805 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1806 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1807 return BNXT_TF_RC_ERROR;
1809 /* copy the udp details */
1810 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1811 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1812 ULP_BUFFER_ALIGN_8_BYTE);
1814 if (!ulp_rte_item_skip_void(&item, 1))
1815 return BNXT_TF_RC_ERROR;
1818 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1819 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1820 return BNXT_TF_RC_ERROR;
1822 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1823 /* copy the vxlan details */
1824 memcpy(&vxlan_spec, item->spec, vxlan_size);
1825 vxlan_spec.flags = 0x08;
1826 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1827 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1828 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1829 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1831 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1832 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1833 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1834 (const uint8_t *)&vxlan_spec.vni,
1835 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1837 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1838 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1839 &vxlan_size, sizeof(uint32_t));
1841 /* update the hdr_bitmap with vxlan */
1842 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1843 return BNXT_TF_RC_SUCCESS;
1846 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1848 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1850 struct ulp_rte_parser_params *params)
1852 /* update the hdr_bitmap with vxlan */
1853 ULP_BITMAP_SET(params->act_bitmap.bits,
1854 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1855 /* Update computational field with tunnel decap info */
1856 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1857 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1858 return BNXT_TF_RC_SUCCESS;
1861 /* Function to handle the parsing of RTE Flow action drop Header. */
1863 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1864 struct ulp_rte_parser_params *params)
1866 /* Update the hdr_bitmap with drop */
1867 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1868 return BNXT_TF_RC_SUCCESS;
1871 /* Function to handle the parsing of RTE Flow action count. */
1873 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1874 struct ulp_rte_parser_params *params)
1877 const struct rte_flow_action_count *act_count;
1878 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1880 act_count = action_item->conf;
1882 if (act_count->shared) {
1884 "Parse Error:Shared count not supported\n");
1885 return BNXT_TF_RC_PARSE_ERR;
1887 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1889 BNXT_ULP_ACT_PROP_SZ_COUNT);
1892 /* Update the hdr_bitmap with count */
1893 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1894 return BNXT_TF_RC_SUCCESS;
1897 /* Function to handle the parsing of action ports. */
1899 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1902 enum bnxt_ulp_direction_type dir;
1905 struct ulp_rte_act_prop *act = ¶m->act_prop;
1906 enum bnxt_ulp_intf_type port_type;
1909 /* Get the direction */
1910 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1911 if (dir == BNXT_ULP_DIR_EGRESS) {
1912 /* For egress direction, fill vport */
1913 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1914 return BNXT_TF_RC_ERROR;
1917 pid = rte_cpu_to_be_32(pid);
1918 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1919 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1921 /* For ingress direction, fill vnic */
1922 port_type = ULP_COMP_FLD_IDX_RD(param,
1923 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1924 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1925 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1927 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1929 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1931 return BNXT_TF_RC_ERROR;
1934 pid = rte_cpu_to_be_32(pid);
1935 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1936 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1939 /* Update the action port set bit */
1940 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1941 return BNXT_TF_RC_SUCCESS;
1944 /* Function to handle the parsing of RTE Flow action PF. */
1946 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1947 struct ulp_rte_parser_params *params)
1951 enum bnxt_ulp_intf_type intf_type;
1953 /* Get the port id of the current device */
1954 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1956 /* Get the port db ifindex */
1957 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1959 BNXT_TF_DBG(ERR, "Invalid port id\n");
1960 return BNXT_TF_RC_ERROR;
1963 /* Check the port is PF port */
1964 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1965 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1966 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1967 return BNXT_TF_RC_ERROR;
1969 /* Update the action properties */
1970 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1971 return ulp_rte_parser_act_port_set(params, ifindex);
1974 /* Function to handle the parsing of RTE Flow action VF. */
1976 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1977 struct ulp_rte_parser_params *params)
1979 const struct rte_flow_action_vf *vf_action;
1980 enum bnxt_ulp_intf_type intf_type;
1984 vf_action = action_item->conf;
1986 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1987 return BNXT_TF_RC_PARSE_ERR;
1990 if (vf_action->original) {
1991 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1992 return BNXT_TF_RC_PARSE_ERR;
1995 bp = bnxt_get_bp(params->port_id);
1997 BNXT_TF_DBG(ERR, "Invalid bp\n");
1998 return BNXT_TF_RC_ERROR;
2001 /* vf_action->id is a logical number which in this case is an
2002 * offset from the first VF. So, to get the absolute VF id, the
2003 * offset must be added to the absolute first vf id of that port.
2005 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2006 bp->first_vf_id + vf_action->id,
2008 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2009 return BNXT_TF_RC_ERROR;
2011 /* Check the port is VF port */
2012 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2013 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2014 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2015 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2016 return BNXT_TF_RC_ERROR;
2019 /* Update the action properties */
2020 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2021 return ulp_rte_parser_act_port_set(params, ifindex);
2024 /* Function to handle the parsing of RTE Flow action port_id. */
2026 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2027 struct ulp_rte_parser_params *param)
2029 const struct rte_flow_action_port_id *port_id = act_item->conf;
2031 enum bnxt_ulp_intf_type intf_type;
2035 "ParseErr: Invalid Argument\n");
2036 return BNXT_TF_RC_PARSE_ERR;
2038 if (port_id->original) {
2040 "ParseErr:Portid Original not supported\n");
2041 return BNXT_TF_RC_PARSE_ERR;
2044 /* Get the port db ifindex */
2045 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2047 BNXT_TF_DBG(ERR, "Invalid port id\n");
2048 return BNXT_TF_RC_ERROR;
2051 /* Get the intf type */
2052 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2054 BNXT_TF_DBG(ERR, "Invalid port type\n");
2055 return BNXT_TF_RC_ERROR;
2058 /* Set the action port */
2059 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2060 return ulp_rte_parser_act_port_set(param, ifindex);
2063 /* Function to handle the parsing of RTE Flow action phy_port. */
2065 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2066 struct ulp_rte_parser_params *prm)
2068 const struct rte_flow_action_phy_port *phy_port;
2072 enum bnxt_ulp_direction_type dir;
2074 phy_port = action_item->conf;
2077 "ParseErr: Invalid Argument\n");
2078 return BNXT_TF_RC_PARSE_ERR;
2081 if (phy_port->original) {
2083 "Parse Err:Port Original not supported\n");
2084 return BNXT_TF_RC_PARSE_ERR;
2086 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2087 if (dir != BNXT_ULP_DIR_EGRESS) {
2089 "Parse Err:Phy ports are valid only for egress\n");
2090 return BNXT_TF_RC_PARSE_ERR;
2092 /* Get the physical port details from port db */
2093 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2096 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2101 pid = rte_cpu_to_be_32(pid);
2102 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2103 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2105 /* Update the action port set bit */
2106 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2107 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2108 BNXT_ULP_INTF_TYPE_PHY_PORT);
2109 return BNXT_TF_RC_SUCCESS;
2112 /* Function to handle the parsing of RTE Flow action pop vlan. */
2114 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2115 struct ulp_rte_parser_params *params)
2117 /* Update the act_bitmap with pop */
2118 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2119 return BNXT_TF_RC_SUCCESS;
2122 /* Function to handle the parsing of RTE Flow action push vlan. */
2124 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2125 struct ulp_rte_parser_params *params)
2127 const struct rte_flow_action_of_push_vlan *push_vlan;
2129 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2131 push_vlan = action_item->conf;
2133 ethertype = push_vlan->ethertype;
2134 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2136 "Parse Err: Ethertype not supported\n");
2137 return BNXT_TF_RC_PARSE_ERR;
2139 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2140 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2141 /* Update the hdr_bitmap with push vlan */
2142 ULP_BITMAP_SET(params->act_bitmap.bits,
2143 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2144 return BNXT_TF_RC_SUCCESS;
2146 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2147 return BNXT_TF_RC_ERROR;
2150 /* Function to handle the parsing of RTE Flow action set vlan id. */
2152 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2153 struct ulp_rte_parser_params *params)
2155 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2157 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2159 vlan_vid = action_item->conf;
2160 if (vlan_vid && vlan_vid->vlan_vid) {
2161 vid = vlan_vid->vlan_vid;
2162 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2163 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2164 /* Update the hdr_bitmap with vlan vid */
2165 ULP_BITMAP_SET(params->act_bitmap.bits,
2166 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2167 return BNXT_TF_RC_SUCCESS;
2169 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2170 return BNXT_TF_RC_ERROR;
2173 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2175 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2176 struct ulp_rte_parser_params *params)
2178 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2180 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2182 vlan_pcp = action_item->conf;
2184 pcp = vlan_pcp->vlan_pcp;
2185 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2186 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2187 /* Update the hdr_bitmap with vlan vid */
2188 ULP_BITMAP_SET(params->act_bitmap.bits,
2189 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2190 return BNXT_TF_RC_SUCCESS;
2192 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2193 return BNXT_TF_RC_ERROR;
2196 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2198 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2199 struct ulp_rte_parser_params *params)
2201 const struct rte_flow_action_set_ipv4 *set_ipv4;
2202 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2204 set_ipv4 = action_item->conf;
2206 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2207 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2208 /* Update the hdr_bitmap with set ipv4 src */
2209 ULP_BITMAP_SET(params->act_bitmap.bits,
2210 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2211 return BNXT_TF_RC_SUCCESS;
2213 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2214 return BNXT_TF_RC_ERROR;
2217 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2219 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2220 struct ulp_rte_parser_params *params)
2222 const struct rte_flow_action_set_ipv4 *set_ipv4;
2223 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2225 set_ipv4 = action_item->conf;
2227 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2228 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2229 /* Update the hdr_bitmap with set ipv4 dst */
2230 ULP_BITMAP_SET(params->act_bitmap.bits,
2231 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2232 return BNXT_TF_RC_SUCCESS;
2234 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2235 return BNXT_TF_RC_ERROR;
2238 /* Function to handle the parsing of RTE Flow action set tp src.*/
2240 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2241 struct ulp_rte_parser_params *params)
2243 const struct rte_flow_action_set_tp *set_tp;
2244 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2246 set_tp = action_item->conf;
2248 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2249 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2250 /* Update the hdr_bitmap with set tp src */
2251 ULP_BITMAP_SET(params->act_bitmap.bits,
2252 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2253 return BNXT_TF_RC_SUCCESS;
2256 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2257 return BNXT_TF_RC_ERROR;
2260 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2262 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2263 struct ulp_rte_parser_params *params)
2265 const struct rte_flow_action_set_tp *set_tp;
2266 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2268 set_tp = action_item->conf;
2270 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2271 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2272 /* Update the hdr_bitmap with set tp dst */
2273 ULP_BITMAP_SET(params->act_bitmap.bits,
2274 BNXT_ULP_ACT_BIT_SET_TP_DST);
2275 return BNXT_TF_RC_SUCCESS;
2278 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2279 return BNXT_TF_RC_ERROR;
2282 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2284 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2285 struct ulp_rte_parser_params *params)
2287 /* Update the act_bitmap with dec ttl */
2288 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2289 return BNXT_TF_RC_SUCCESS;
2292 /* Function to handle the parsing of RTE Flow action JUMP */
2294 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2295 struct ulp_rte_parser_params *params)
2297 /* Update the act_bitmap with dec ttl */
2298 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2299 return BNXT_TF_RC_SUCCESS;
2303 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2304 struct ulp_rte_parser_params *params)
2306 const struct rte_flow_action_sample *sample;
2309 sample = action_item->conf;
2311 /* if SAMPLE bit is set it means this sample action is nested within the
2312 * actions of another sample action; this is not allowed
2314 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2315 BNXT_ULP_ACT_BIT_SAMPLE))
2316 return BNXT_TF_RC_ERROR;
2318 /* a sample action is only allowed as a shared action */
2319 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2320 BNXT_ULP_ACT_BIT_SHARED))
2321 return BNXT_TF_RC_ERROR;
2323 /* only a ratio of 1 i.e. 100% is supported */
2324 if (sample->ratio != 1)
2325 return BNXT_TF_RC_ERROR;
2327 if (!sample->actions)
2328 return BNXT_TF_RC_ERROR;
2330 /* parse the nested actions for a sample action */
2331 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2332 if (ret == BNXT_TF_RC_SUCCESS)
2333 /* Update the act_bitmap with sample */
2334 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SAMPLE);