1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
19 #include "ulp_template_db_tbl.h"
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK 0x700
24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN 4789
27 /* Utility function to skip the void items. */
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42 /* Utility function to update the field_bitmap */
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
47 struct ulp_rte_hdr_field *field;
49 field = ¶ms->hdr_field[idx];
50 if (ulp_bitmap_notzero(field->mask, field->size)) {
51 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
53 if (!ulp_bitmap_is_ones(field->mask, field->size))
54 ULP_COMP_FLD_IDX_WR(params,
55 BNXT_ULP_CF_IDX_WC_MATCH, 1);
57 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
61 /* Utility function to copy field spec items */
62 static struct ulp_rte_hdr_field *
63 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
68 memcpy(field->spec, buffer, field->size);
73 /* Utility function to copy field masks items */
75 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
80 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
82 memcpy(field->mask, buffer, size);
83 ulp_rte_parser_field_bitmap_update(params, *idx);
87 /* Utility function to ignore field masks items */
89 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
91 const void *buffer __rte_unused,
92 uint32_t size __rte_unused)
98 * Function to handle the parsing of RTE Flows and placing
99 * the RTE flow items into the ulp structures.
102 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103 struct ulp_rte_parser_params *params)
105 const struct rte_flow_item *item = pattern;
106 struct bnxt_ulp_rte_hdr_info *hdr_info;
108 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
110 /* Set the computed flags for no vlan tags before parsing */
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
114 /* Parse all the items in the pattern */
115 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116 /* get the header information from the flow_hdr_info table */
117 hdr_info = &ulp_hdr_info[item->type];
118 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
120 "Truflow parser does not support type %d\n",
122 return BNXT_TF_RC_PARSE_ERR;
123 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124 /* call the registered callback handler */
125 if (hdr_info->proto_hdr_func) {
126 if (hdr_info->proto_hdr_func(item, params) !=
127 BNXT_TF_RC_SUCCESS) {
128 return BNXT_TF_RC_ERROR;
134 /* update the implied SVIF */
135 return ulp_rte_parser_implicit_match_port_process(params);
139 * Function to handle the parsing of RTE Flows and placing
140 * the RTE flow actions into the ulp structures.
143 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144 struct ulp_rte_parser_params *params)
146 const struct rte_flow_action *action_item = actions;
147 struct bnxt_ulp_rte_act_info *hdr_info;
149 /* Parse all the items in the pattern */
150 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151 /* get the header information from the flow_hdr_info table */
152 hdr_info = &ulp_act_info[action_item->type];
153 if (hdr_info->act_type ==
154 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
156 "Truflow parser does not support act %u\n",
158 return BNXT_TF_RC_ERROR;
159 } else if (hdr_info->act_type ==
160 BNXT_ULP_ACT_TYPE_SUPPORTED) {
161 /* call the registered callback handler */
162 if (hdr_info->proto_act_func) {
163 if (hdr_info->proto_act_func(action_item,
165 BNXT_TF_RC_SUCCESS) {
166 return BNXT_TF_RC_ERROR;
172 /* update the implied port details */
173 ulp_rte_parser_implicit_act_port_process(params);
174 return BNXT_TF_RC_SUCCESS;
178 * Function to handle the post processing of the computed
179 * fields for the interface.
182 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
185 uint16_t port_id, parif;
187 enum bnxt_ulp_direction_type dir;
189 /* get the direction details */
190 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
192 /* read the port id details */
193 port_id = ULP_COMP_FLD_IDX_RD(params,
194 BNXT_ULP_CF_IDX_INCOMING_IF);
195 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
198 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
202 if (dir == BNXT_ULP_DIR_INGRESS) {
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
209 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
212 /* Get the match port type */
213 mtype = ULP_COMP_FLD_IDX_RD(params,
214 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216 ULP_COMP_FLD_IDX_WR(params,
217 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
219 /* Set VF func PARIF */
220 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221 BNXT_ULP_VF_FUNC_PARIF,
224 "ParseErr:ifindex is not valid\n");
227 ULP_COMP_FLD_IDX_WR(params,
228 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
232 /* Set DRV func PARIF */
233 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
234 BNXT_ULP_DRV_FUNC_PARIF,
237 "ParseErr:ifindex is not valid\n");
240 ULP_COMP_FLD_IDX_WR(params,
241 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
244 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
245 ULP_COMP_FLD_IDX_WR(params,
246 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
253 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
255 enum bnxt_ulp_intf_type match_port_type, act_port_type;
256 enum bnxt_ulp_direction_type dir;
257 uint32_t act_port_set;
259 /* Get the computed details */
260 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261 match_port_type = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 act_port_type = ULP_COMP_FLD_IDX_RD(params,
264 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265 act_port_set = ULP_COMP_FLD_IDX_RD(params,
266 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
268 /* set the flow direction in the proto and action header */
269 if (dir == BNXT_ULP_DIR_EGRESS) {
270 ULP_BITMAP_SET(params->hdr_bitmap.bits,
271 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272 ULP_BITMAP_SET(params->act_bitmap.bits,
273 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
276 /* calculate the VF to VF flag */
277 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
281 /* Update the decrement ttl computational fields */
282 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283 BNXT_ULP_ACT_BIT_DEC_TTL)) {
285 * Check that vxlan proto is included and vxlan decap
286 * action is not set then decrement tunnel ttl.
287 * Similarly add GRE and NVGRE in future.
289 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290 BNXT_ULP_HDR_BIT_T_VXLAN) &&
291 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
296 ULP_COMP_FLD_IDX_WR(params,
297 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
301 /* Merge the hdr_fp_bit into the proto header bit */
302 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
304 /* Update the comp fld fid */
305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
307 /* Update the computed interface parameters */
308 bnxt_ulp_comp_fld_intf_update(params);
310 /* TBD: Handle the flow rejection scenarios */
315 * Function to handle the post processing of the parsing details
318 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
320 ulp_post_process_normal_flow(params);
321 return ulp_post_process_tun_flow(params);
325 * Function to compute the flow direction based on the match port details
328 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
330 enum bnxt_ulp_intf_type match_port_type;
332 /* Get the match port type */
333 match_port_type = ULP_COMP_FLD_IDX_RD(params,
334 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
336 /* If ingress flow and matchport is vf rep then dir is egress*/
337 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
338 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
339 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
340 BNXT_ULP_DIR_EGRESS);
342 /* Assign the input direction */
343 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
344 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345 BNXT_ULP_DIR_INGRESS);
347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
348 BNXT_ULP_DIR_EGRESS);
352 /* Function to handle the parsing of RTE Flow item PF Header. */
354 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
359 enum bnxt_ulp_direction_type dir;
360 struct ulp_rte_hdr_field *hdr_field;
361 enum bnxt_ulp_svif_type svif_type;
362 enum bnxt_ulp_intf_type port_type;
364 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
365 BNXT_ULP_INVALID_SVIF_VAL) {
367 "SVIF already set,multiple source not support'd\n");
368 return BNXT_TF_RC_ERROR;
371 /* Get port type details */
372 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
373 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
374 BNXT_TF_DBG(ERR, "Invalid port type\n");
375 return BNXT_TF_RC_ERROR;
378 /* Update the match port type */
379 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
381 /* compute the direction */
382 bnxt_ulp_rte_parser_direction_compute(params);
384 /* Get the computed direction */
385 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
386 if (dir == BNXT_ULP_DIR_INGRESS) {
387 svif_type = BNXT_ULP_PHY_PORT_SVIF;
389 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
390 svif_type = BNXT_ULP_VF_FUNC_SVIF;
392 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
394 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
396 svif = rte_cpu_to_be_16(svif);
397 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
398 memcpy(hdr_field->spec, &svif, sizeof(svif));
399 memcpy(hdr_field->mask, &mask, sizeof(mask));
400 hdr_field->size = sizeof(svif);
401 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
402 rte_be_to_cpu_16(svif));
403 return BNXT_TF_RC_SUCCESS;
406 /* Function to handle the parsing of the RTE port id */
408 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
410 uint16_t port_id = 0;
411 uint16_t svif_mask = 0xFFFF;
413 int32_t rc = BNXT_TF_RC_ERROR;
415 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
416 BNXT_ULP_INVALID_SVIF_VAL)
417 return BNXT_TF_RC_SUCCESS;
419 /* SVIF not set. So get the port id */
420 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
422 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
425 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
429 /* Update the SVIF details */
430 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
434 /* Function to handle the implicit action port id */
436 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
438 struct rte_flow_action action_item = {0};
439 struct rte_flow_action_port_id port_id = {0};
441 /* Read the action port set bit */
442 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
443 /* Already set, so just exit */
444 return BNXT_TF_RC_SUCCESS;
446 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
447 action_item.conf = &port_id;
449 /* Update the action port based on incoming port */
450 ulp_rte_port_id_act_handler(&action_item, params);
452 /* Reset the action port set bit */
453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
454 return BNXT_TF_RC_SUCCESS;
457 /* Function to handle the parsing of RTE Flow item PF Header. */
459 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
460 struct ulp_rte_parser_params *params)
462 uint16_t port_id = 0;
463 uint16_t svif_mask = 0xFFFF;
466 /* Get the implicit port id */
467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
469 /* perform the conversion from dpdk port to bnxt ifindex */
470 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
473 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
474 return BNXT_TF_RC_ERROR;
477 /* Update the SVIF details */
478 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
481 /* Function to handle the parsing of RTE Flow item VF Header. */
483 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
484 struct ulp_rte_parser_params *params)
486 const struct rte_flow_item_vf *vf_spec = item->spec;
487 const struct rte_flow_item_vf *vf_mask = item->mask;
490 int32_t rc = BNXT_TF_RC_PARSE_ERR;
492 /* Get VF rte_flow_item for Port details */
494 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
498 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
503 /* perform the conversion from VF Func id to bnxt ifindex */
504 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
507 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
510 /* Update the SVIF details */
511 return ulp_rte_parser_svif_set(params, ifindex, mask);
514 /* Function to handle the parsing of RTE Flow item port id Header. */
516 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
517 struct ulp_rte_parser_params *params)
519 const struct rte_flow_item_port_id *port_spec = item->spec;
520 const struct rte_flow_item_port_id *port_mask = item->mask;
522 int32_t rc = BNXT_TF_RC_PARSE_ERR;
526 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
530 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
533 mask = port_mask->id;
535 /* perform the conversion from dpdk port to bnxt ifindex */
536 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
539 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
542 /* Update the SVIF details */
543 return ulp_rte_parser_svif_set(params, ifindex, mask);
546 /* Function to handle the parsing of RTE Flow item phy port Header. */
548 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
549 struct ulp_rte_parser_params *params)
551 const struct rte_flow_item_phy_port *port_spec = item->spec;
552 const struct rte_flow_item_phy_port *port_mask = item->mask;
554 int32_t rc = BNXT_TF_RC_ERROR;
556 enum bnxt_ulp_direction_type dir;
557 struct ulp_rte_hdr_field *hdr_field;
559 /* Copy the rte_flow_item for phy port into hdr_field */
561 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
565 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
568 mask = port_mask->index;
570 /* Update the match port type */
571 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
572 BNXT_ULP_INTF_TYPE_PHY_PORT);
574 /* Compute the Hw direction */
575 bnxt_ulp_rte_parser_direction_compute(params);
577 /* Direction validation */
578 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
579 if (dir == BNXT_ULP_DIR_EGRESS) {
581 "Parse Err:Phy ports are valid only for ingress\n");
582 return BNXT_TF_RC_PARSE_ERR;
585 /* Get the physical port details from port db */
586 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
589 BNXT_TF_DBG(ERR, "Failed to get port details\n");
590 return BNXT_TF_RC_PARSE_ERR;
593 /* Update the SVIF details */
594 svif = rte_cpu_to_be_16(svif);
595 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
596 memcpy(hdr_field->spec, &svif, sizeof(svif));
597 memcpy(hdr_field->mask, &mask, sizeof(mask));
598 hdr_field->size = sizeof(svif);
599 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
600 rte_be_to_cpu_16(svif));
601 return BNXT_TF_RC_SUCCESS;
604 /* Function to handle the update of proto header based on field values */
606 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
607 uint16_t type, uint32_t in_flag)
609 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
611 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612 BNXT_ULP_HDR_BIT_I_IPV4);
613 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
615 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616 BNXT_ULP_HDR_BIT_O_IPV4);
617 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
619 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
621 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 BNXT_ULP_HDR_BIT_I_IPV6);
623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
625 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
626 BNXT_ULP_HDR_BIT_O_IPV6);
627 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
632 /* Internal Function to identify broadcast or multicast packets */
634 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
636 if (rte_is_multicast_ether_addr(eth_addr) ||
637 rte_is_broadcast_ether_addr(eth_addr)) {
639 "No support for bcast or mcast addr offload\n");
645 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
647 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
648 struct ulp_rte_parser_params *params)
650 const struct rte_flow_item_eth *eth_spec = item->spec;
651 const struct rte_flow_item_eth *eth_mask = item->mask;
652 struct ulp_rte_hdr_field *field;
653 uint32_t idx = params->field_idx;
655 uint16_t eth_type = 0;
656 uint32_t inner_flag = 0;
659 * Copy the rte_flow_item for eth into hdr_field using ethernet
663 size = sizeof(eth_spec->dst.addr_bytes);
664 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
665 eth_spec->dst.addr_bytes,
667 /* Todo: work around to avoid multicast and broadcast addr */
668 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
669 return BNXT_TF_RC_PARSE_ERR;
671 size = sizeof(eth_spec->src.addr_bytes);
672 field = ulp_rte_parser_fld_copy(field,
673 eth_spec->src.addr_bytes,
675 /* Todo: work around to avoid multicast and broadcast addr */
676 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
677 return BNXT_TF_RC_PARSE_ERR;
679 field = ulp_rte_parser_fld_copy(field,
681 sizeof(eth_spec->type));
682 eth_type = eth_spec->type;
685 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
686 sizeof(eth_mask->dst.addr_bytes));
687 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
688 sizeof(eth_mask->src.addr_bytes));
689 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
690 sizeof(eth_mask->type));
692 /* Add number of Eth header elements */
693 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
695 /* Update the protocol hdr bitmap */
696 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_ETH) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_IPV4) ||
700 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701 BNXT_ULP_HDR_BIT_O_IPV6) ||
702 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703 BNXT_ULP_HDR_BIT_O_UDP) ||
704 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
705 BNXT_ULP_HDR_BIT_O_TCP)) {
706 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
709 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
711 /* Update the field protocol hdr bitmap */
712 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
714 return BNXT_TF_RC_SUCCESS;
717 /* Function to handle the parsing of RTE Flow item Vlan Header. */
719 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
720 struct ulp_rte_parser_params *params)
722 const struct rte_flow_item_vlan *vlan_spec = item->spec;
723 const struct rte_flow_item_vlan *vlan_mask = item->mask;
724 struct ulp_rte_hdr_field *field;
725 struct ulp_rte_hdr_bitmap *hdr_bit;
726 uint32_t idx = params->field_idx;
727 uint16_t vlan_tag, priority;
728 uint32_t outer_vtag_num;
729 uint32_t inner_vtag_num;
730 uint16_t eth_type = 0;
731 uint32_t inner_flag = 0;
734 * Copy the rte_flow_item for vlan into hdr_field using Vlan
738 vlan_tag = ntohs(vlan_spec->tci);
739 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
740 vlan_tag &= ULP_VLAN_TAG_MASK;
741 vlan_tag = htons(vlan_tag);
743 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
746 field = ulp_rte_parser_fld_copy(field,
750 field = ulp_rte_parser_fld_copy(field,
751 &vlan_spec->inner_type,
752 sizeof(vlan_spec->inner_type));
753 eth_type = vlan_spec->inner_type;
757 vlan_tag = ntohs(vlan_mask->tci);
758 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
762 * the storage for priority and vlan tag is 2 bytes
763 * The mask of priority which is 3 bits if it is all 1's
764 * then make the rest bits 13 bits as 1's
765 * so that it is matched as exact match.
767 if (priority == ULP_VLAN_PRIORITY_MASK)
768 priority |= ~ULP_VLAN_PRIORITY_MASK;
769 if (vlan_tag == ULP_VLAN_TAG_MASK)
770 vlan_tag |= ~ULP_VLAN_TAG_MASK;
771 vlan_tag = htons(vlan_tag);
774 * The priority field is ignored since OVS is setting it as
775 * wild card match and it is not supported. This is a work
776 * around and shall be addressed in the future.
778 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
781 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
783 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
784 sizeof(vlan_mask->inner_type));
786 /* Set the field index to new incremented value */
787 params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
789 /* Get the outer tag and inner tag counts */
790 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791 BNXT_ULP_CF_IDX_O_VTAG_NUM);
792 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
793 BNXT_ULP_CF_IDX_I_VTAG_NUM);
795 /* Update the hdr_bitmap of the vlans */
796 hdr_bit = ¶ms->hdr_bitmap;
797 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
798 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
800 /* Update the vlan tag num */
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
804 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
805 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
806 ULP_BITMAP_SET(params->hdr_bitmap.bits,
807 BNXT_ULP_HDR_BIT_OO_VLAN);
808 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
809 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
810 outer_vtag_num == 1) {
811 /* update the vlan tag num */
813 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
815 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
817 ULP_BITMAP_SET(params->hdr_bitmap.bits,
818 BNXT_ULP_HDR_BIT_OI_VLAN);
819 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
820 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
822 /* update the vlan tag num */
824 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
826 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
827 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
828 ULP_BITMAP_SET(params->hdr_bitmap.bits,
829 BNXT_ULP_HDR_BIT_IO_VLAN);
831 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
832 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
833 inner_vtag_num == 1) {
834 /* update the vlan tag num */
836 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
838 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
839 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
840 ULP_BITMAP_SET(params->hdr_bitmap.bits,
841 BNXT_ULP_HDR_BIT_II_VLAN);
844 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
845 return BNXT_TF_RC_ERROR;
847 /* Update the field protocol hdr bitmap */
848 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
849 return BNXT_TF_RC_SUCCESS;
852 /* Function to handle the update of proto header based on field values */
854 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
855 uint8_t proto, uint32_t in_flag)
857 if (proto == IPPROTO_UDP) {
859 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
860 BNXT_ULP_HDR_BIT_I_UDP);
861 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
863 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
864 BNXT_ULP_HDR_BIT_O_UDP);
865 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
867 } else if (proto == IPPROTO_TCP) {
869 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
870 BNXT_ULP_HDR_BIT_I_TCP);
871 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
873 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
874 BNXT_ULP_HDR_BIT_O_TCP);
875 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
877 } else if (proto == IPPROTO_GRE) {
878 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
879 } else if (proto == IPPROTO_ICMP) {
880 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
881 ULP_BITMAP_SET(param->hdr_bitmap.bits,
882 BNXT_ULP_HDR_BIT_I_ICMP);
884 ULP_BITMAP_SET(param->hdr_bitmap.bits,
885 BNXT_ULP_HDR_BIT_O_ICMP);
889 ULP_COMP_FLD_IDX_WR(param,
890 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
892 ULP_COMP_FLD_IDX_WR(param,
893 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
896 ULP_COMP_FLD_IDX_WR(param,
897 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
899 ULP_COMP_FLD_IDX_WR(param,
900 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
906 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
908 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
909 struct ulp_rte_parser_params *params)
911 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
912 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
913 struct ulp_rte_hdr_field *field;
914 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
915 uint32_t idx = params->field_idx;
918 uint32_t inner_flag = 0;
921 /* validate there are no 3rd L3 header */
922 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
924 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
925 return BNXT_TF_RC_ERROR;
928 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
929 BNXT_ULP_HDR_BIT_O_ETH) &&
930 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
931 BNXT_ULP_HDR_BIT_I_ETH)) {
932 /* Since F2 flow does not include eth item, when parser detects
933 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
934 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
935 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
936 * This will allow the parser post processor to update the
937 * t_dmac in hdr_field[o_eth.dmac]
939 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
940 BNXT_ULP_PROTO_HDR_VLAN_NUM);
941 params->field_idx = idx;
945 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
949 size = sizeof(ipv4_spec->hdr.version_ihl);
950 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
951 &ipv4_spec->hdr.version_ihl,
953 size = sizeof(ipv4_spec->hdr.type_of_service);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.type_of_service,
957 size = sizeof(ipv4_spec->hdr.total_length);
958 field = ulp_rte_parser_fld_copy(field,
959 &ipv4_spec->hdr.total_length,
961 size = sizeof(ipv4_spec->hdr.packet_id);
962 field = ulp_rte_parser_fld_copy(field,
963 &ipv4_spec->hdr.packet_id,
965 size = sizeof(ipv4_spec->hdr.fragment_offset);
966 field = ulp_rte_parser_fld_copy(field,
967 &ipv4_spec->hdr.fragment_offset,
969 size = sizeof(ipv4_spec->hdr.time_to_live);
970 field = ulp_rte_parser_fld_copy(field,
971 &ipv4_spec->hdr.time_to_live,
973 size = sizeof(ipv4_spec->hdr.next_proto_id);
974 field = ulp_rte_parser_fld_copy(field,
975 &ipv4_spec->hdr.next_proto_id,
977 proto = ipv4_spec->hdr.next_proto_id;
978 size = sizeof(ipv4_spec->hdr.hdr_checksum);
979 field = ulp_rte_parser_fld_copy(field,
980 &ipv4_spec->hdr.hdr_checksum,
982 size = sizeof(ipv4_spec->hdr.src_addr);
983 field = ulp_rte_parser_fld_copy(field,
984 &ipv4_spec->hdr.src_addr,
986 size = sizeof(ipv4_spec->hdr.dst_addr);
987 field = ulp_rte_parser_fld_copy(field,
988 &ipv4_spec->hdr.dst_addr,
992 ulp_rte_prsr_mask_copy(params, &idx,
993 &ipv4_mask->hdr.version_ihl,
994 sizeof(ipv4_mask->hdr.version_ihl));
996 * The tos field is ignored since OVS is setting it as wild card
997 * match and it is not supported. This is a work around and
998 * shall be addressed in the future.
1000 ulp_rte_prsr_mask_ignore(params, &idx,
1001 &ipv4_mask->hdr.type_of_service,
1002 sizeof(ipv4_mask->hdr.type_of_service)
1005 ulp_rte_prsr_mask_copy(params, &idx,
1006 &ipv4_mask->hdr.total_length,
1007 sizeof(ipv4_mask->hdr.total_length));
1008 ulp_rte_prsr_mask_copy(params, &idx,
1009 &ipv4_mask->hdr.packet_id,
1010 sizeof(ipv4_mask->hdr.packet_id));
1011 ulp_rte_prsr_mask_copy(params, &idx,
1012 &ipv4_mask->hdr.fragment_offset,
1013 sizeof(ipv4_mask->hdr.fragment_offset));
1014 ulp_rte_prsr_mask_copy(params, &idx,
1015 &ipv4_mask->hdr.time_to_live,
1016 sizeof(ipv4_mask->hdr.time_to_live));
1017 ulp_rte_prsr_mask_copy(params, &idx,
1018 &ipv4_mask->hdr.next_proto_id,
1019 sizeof(ipv4_mask->hdr.next_proto_id));
1020 ulp_rte_prsr_mask_copy(params, &idx,
1021 &ipv4_mask->hdr.hdr_checksum,
1022 sizeof(ipv4_mask->hdr.hdr_checksum));
1023 ulp_rte_prsr_mask_copy(params, &idx,
1024 &ipv4_mask->hdr.src_addr,
1025 sizeof(ipv4_mask->hdr.src_addr));
1026 ulp_rte_prsr_mask_copy(params, &idx,
1027 &ipv4_mask->hdr.dst_addr,
1028 sizeof(ipv4_mask->hdr.dst_addr));
1030 /* Add the number of ipv4 header elements */
1031 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1033 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1034 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1035 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1036 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1037 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1040 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1041 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1044 /* Some of the PMD applications may set the protocol field
1045 * in the IPv4 spec but don't set the mask. So, consider
1046 * the mask in the proto value calculation.
1049 proto &= ipv4_mask->hdr.next_proto_id;
1051 /* Update the field protocol hdr bitmap */
1052 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1053 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1054 return BNXT_TF_RC_SUCCESS;
1057 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1059 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1060 struct ulp_rte_parser_params *params)
1062 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1063 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1064 struct ulp_rte_hdr_field *field;
1065 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1066 uint32_t idx = params->field_idx;
1068 uint32_t vtcf, vtcf_mask;
1070 uint32_t inner_flag = 0;
1073 /* validate there are no 3rd L3 header */
1074 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1076 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1077 return BNXT_TF_RC_ERROR;
1080 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1081 BNXT_ULP_HDR_BIT_O_ETH) &&
1082 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1083 BNXT_ULP_HDR_BIT_I_ETH)) {
1084 /* Since F2 flow does not include eth item, when parser detects
1085 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1086 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1087 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1088 * This will allow the parser post processor to update the
1089 * t_dmac in hdr_field[o_eth.dmac]
1091 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1092 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1093 params->field_idx = idx;
1097 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1101 size = sizeof(ipv6_spec->hdr.vtc_flow);
1103 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1104 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1108 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1109 field = ulp_rte_parser_fld_copy(field,
1113 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1114 field = ulp_rte_parser_fld_copy(field,
1118 size = sizeof(ipv6_spec->hdr.payload_len);
1119 field = ulp_rte_parser_fld_copy(field,
1120 &ipv6_spec->hdr.payload_len,
1122 size = sizeof(ipv6_spec->hdr.proto);
1123 field = ulp_rte_parser_fld_copy(field,
1124 &ipv6_spec->hdr.proto,
1126 proto = ipv6_spec->hdr.proto;
1127 size = sizeof(ipv6_spec->hdr.hop_limits);
1128 field = ulp_rte_parser_fld_copy(field,
1129 &ipv6_spec->hdr.hop_limits,
1131 size = sizeof(ipv6_spec->hdr.src_addr);
1132 field = ulp_rte_parser_fld_copy(field,
1133 &ipv6_spec->hdr.src_addr,
1135 size = sizeof(ipv6_spec->hdr.dst_addr);
1136 field = ulp_rte_parser_fld_copy(field,
1137 &ipv6_spec->hdr.dst_addr,
1141 size = sizeof(ipv6_mask->hdr.vtc_flow);
1143 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1144 ulp_rte_prsr_mask_copy(params, &idx,
1148 * The TC and flow label field are ignored since OVS is setting
1149 * it for match and it is not supported.
1150 * This is a work around and
1151 * shall be addressed in the future.
1153 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1154 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1156 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1157 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1159 ulp_rte_prsr_mask_copy(params, &idx,
1160 &ipv6_mask->hdr.payload_len,
1161 sizeof(ipv6_mask->hdr.payload_len));
1162 ulp_rte_prsr_mask_copy(params, &idx,
1163 &ipv6_mask->hdr.proto,
1164 sizeof(ipv6_mask->hdr.proto));
1165 ulp_rte_prsr_mask_copy(params, &idx,
1166 &ipv6_mask->hdr.hop_limits,
1167 sizeof(ipv6_mask->hdr.hop_limits));
1168 ulp_rte_prsr_mask_copy(params, &idx,
1169 &ipv6_mask->hdr.src_addr,
1170 sizeof(ipv6_mask->hdr.src_addr));
1171 ulp_rte_prsr_mask_copy(params, &idx,
1172 &ipv6_mask->hdr.dst_addr,
1173 sizeof(ipv6_mask->hdr.dst_addr));
1175 /* add number of ipv6 header elements */
1176 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1178 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1179 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1180 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1181 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1182 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1185 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1186 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1189 /* Some of the PMD applications may set the protocol field
1190 * in the IPv6 spec but don't set the mask. So, consider
1191 * the mask in proto value calculation.
1194 proto &= ipv6_mask->hdr.proto;
1196 /* Update the field protocol hdr bitmap */
1197 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1198 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1200 return BNXT_TF_RC_SUCCESS;
1203 /* Function to handle the update of proto header based on field values */
1205 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1208 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1209 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1210 BNXT_ULP_HDR_BIT_T_VXLAN);
1212 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1213 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1214 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1215 BNXT_ULP_HDR_BIT_T_GRE))
1216 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1220 /* Function to handle the parsing of RTE Flow item UDP Header. */
1222 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1223 struct ulp_rte_parser_params *params)
1225 const struct rte_flow_item_udp *udp_spec = item->spec;
1226 const struct rte_flow_item_udp *udp_mask = item->mask;
1227 struct ulp_rte_hdr_field *field;
1228 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1229 uint32_t idx = params->field_idx;
1231 uint16_t dport = 0, sport = 0;
1234 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1236 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1237 return BNXT_TF_RC_ERROR;
1241 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1245 size = sizeof(udp_spec->hdr.src_port);
1246 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1247 &udp_spec->hdr.src_port,
1249 sport = udp_spec->hdr.src_port;
1250 size = sizeof(udp_spec->hdr.dst_port);
1251 field = ulp_rte_parser_fld_copy(field,
1252 &udp_spec->hdr.dst_port,
1254 dport = udp_spec->hdr.dst_port;
1255 size = sizeof(udp_spec->hdr.dgram_len);
1256 field = ulp_rte_parser_fld_copy(field,
1257 &udp_spec->hdr.dgram_len,
1259 size = sizeof(udp_spec->hdr.dgram_cksum);
1260 field = ulp_rte_parser_fld_copy(field,
1261 &udp_spec->hdr.dgram_cksum,
1265 ulp_rte_prsr_mask_copy(params, &idx,
1266 &udp_mask->hdr.src_port,
1267 sizeof(udp_mask->hdr.src_port));
1268 ulp_rte_prsr_mask_copy(params, &idx,
1269 &udp_mask->hdr.dst_port,
1270 sizeof(udp_mask->hdr.dst_port));
1271 ulp_rte_prsr_mask_copy(params, &idx,
1272 &udp_mask->hdr.dgram_len,
1273 sizeof(udp_mask->hdr.dgram_len));
1274 ulp_rte_prsr_mask_copy(params, &idx,
1275 &udp_mask->hdr.dgram_cksum,
1276 sizeof(udp_mask->hdr.dgram_cksum));
1279 /* Add number of UDP header elements */
1280 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1282 /* Set the udp header bitmap and computed l4 header bitmaps */
1283 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1284 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1285 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1286 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1288 (uint32_t)rte_be_to_cpu_16(sport));
1289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1290 (uint32_t)rte_be_to_cpu_16(dport));
1291 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1295 if (udp_mask && udp_mask->hdr.src_port)
1296 ULP_COMP_FLD_IDX_WR(params,
1297 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1299 if (udp_mask && udp_mask->hdr.dst_port)
1300 ULP_COMP_FLD_IDX_WR(params,
1301 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1304 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1306 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1307 (uint32_t)rte_be_to_cpu_16(sport));
1308 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1309 (uint32_t)rte_be_to_cpu_16(dport));
1310 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1314 if (udp_mask && udp_mask->hdr.src_port)
1315 ULP_COMP_FLD_IDX_WR(params,
1316 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1318 if (udp_mask && udp_mask->hdr.dst_port)
1319 ULP_COMP_FLD_IDX_WR(params,
1320 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1323 /* Update the field protocol hdr bitmap */
1324 ulp_rte_l4_proto_type_update(params, dport);
1326 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1327 return BNXT_TF_RC_SUCCESS;
1330 /* Function to handle the parsing of RTE Flow item TCP Header. */
1332 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1333 struct ulp_rte_parser_params *params)
1335 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1336 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1337 struct ulp_rte_hdr_field *field;
1338 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1339 uint32_t idx = params->field_idx;
1340 uint16_t dport = 0, sport = 0;
1344 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1346 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1347 return BNXT_TF_RC_ERROR;
1351 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1355 sport = tcp_spec->hdr.src_port;
1356 size = sizeof(tcp_spec->hdr.src_port);
1357 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1358 &tcp_spec->hdr.src_port,
1360 dport = tcp_spec->hdr.dst_port;
1361 size = sizeof(tcp_spec->hdr.dst_port);
1362 field = ulp_rte_parser_fld_copy(field,
1363 &tcp_spec->hdr.dst_port,
1365 size = sizeof(tcp_spec->hdr.sent_seq);
1366 field = ulp_rte_parser_fld_copy(field,
1367 &tcp_spec->hdr.sent_seq,
1369 size = sizeof(tcp_spec->hdr.recv_ack);
1370 field = ulp_rte_parser_fld_copy(field,
1371 &tcp_spec->hdr.recv_ack,
1373 size = sizeof(tcp_spec->hdr.data_off);
1374 field = ulp_rte_parser_fld_copy(field,
1375 &tcp_spec->hdr.data_off,
1377 size = sizeof(tcp_spec->hdr.tcp_flags);
1378 field = ulp_rte_parser_fld_copy(field,
1379 &tcp_spec->hdr.tcp_flags,
1381 size = sizeof(tcp_spec->hdr.rx_win);
1382 field = ulp_rte_parser_fld_copy(field,
1383 &tcp_spec->hdr.rx_win,
1385 size = sizeof(tcp_spec->hdr.cksum);
1386 field = ulp_rte_parser_fld_copy(field,
1387 &tcp_spec->hdr.cksum,
1389 size = sizeof(tcp_spec->hdr.tcp_urp);
1390 field = ulp_rte_parser_fld_copy(field,
1391 &tcp_spec->hdr.tcp_urp,
1394 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1398 ulp_rte_prsr_mask_copy(params, &idx,
1399 &tcp_mask->hdr.src_port,
1400 sizeof(tcp_mask->hdr.src_port));
1401 ulp_rte_prsr_mask_copy(params, &idx,
1402 &tcp_mask->hdr.dst_port,
1403 sizeof(tcp_mask->hdr.dst_port));
1404 ulp_rte_prsr_mask_copy(params, &idx,
1405 &tcp_mask->hdr.sent_seq,
1406 sizeof(tcp_mask->hdr.sent_seq));
1407 ulp_rte_prsr_mask_copy(params, &idx,
1408 &tcp_mask->hdr.recv_ack,
1409 sizeof(tcp_mask->hdr.recv_ack));
1410 ulp_rte_prsr_mask_copy(params, &idx,
1411 &tcp_mask->hdr.data_off,
1412 sizeof(tcp_mask->hdr.data_off));
1413 ulp_rte_prsr_mask_copy(params, &idx,
1414 &tcp_mask->hdr.tcp_flags,
1415 sizeof(tcp_mask->hdr.tcp_flags));
1416 ulp_rte_prsr_mask_copy(params, &idx,
1417 &tcp_mask->hdr.rx_win,
1418 sizeof(tcp_mask->hdr.rx_win));
1419 ulp_rte_prsr_mask_copy(params, &idx,
1420 &tcp_mask->hdr.cksum,
1421 sizeof(tcp_mask->hdr.cksum));
1422 ulp_rte_prsr_mask_copy(params, &idx,
1423 &tcp_mask->hdr.tcp_urp,
1424 sizeof(tcp_mask->hdr.tcp_urp));
1426 /* add number of TCP header elements */
1427 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1429 /* Set the udp header bitmap and computed l4 header bitmaps */
1430 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1431 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1432 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1433 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1435 (uint32_t)rte_be_to_cpu_16(sport));
1436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1437 (uint32_t)rte_be_to_cpu_16(dport));
1438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1440 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1442 if (tcp_mask && tcp_mask->hdr.src_port)
1443 ULP_COMP_FLD_IDX_WR(params,
1444 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1446 if (tcp_mask && tcp_mask->hdr.dst_port)
1447 ULP_COMP_FLD_IDX_WR(params,
1448 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1451 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1454 (uint32_t)rte_be_to_cpu_16(sport));
1455 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1456 (uint32_t)rte_be_to_cpu_16(dport));
1457 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1459 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1461 if (tcp_mask && tcp_mask->hdr.src_port)
1462 ULP_COMP_FLD_IDX_WR(params,
1463 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1465 if (tcp_mask && tcp_mask->hdr.dst_port)
1466 ULP_COMP_FLD_IDX_WR(params,
1467 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1470 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1471 return BNXT_TF_RC_SUCCESS;
1474 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1476 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1477 struct ulp_rte_parser_params *params)
1479 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1480 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1481 struct ulp_rte_hdr_field *field;
1482 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1483 uint32_t idx = params->field_idx;
1487 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1491 size = sizeof(vxlan_spec->flags);
1492 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1495 size = sizeof(vxlan_spec->rsvd0);
1496 field = ulp_rte_parser_fld_copy(field,
1499 size = sizeof(vxlan_spec->vni);
1500 field = ulp_rte_parser_fld_copy(field,
1503 size = sizeof(vxlan_spec->rsvd1);
1504 field = ulp_rte_parser_fld_copy(field,
1509 ulp_rte_prsr_mask_copy(params, &idx,
1511 sizeof(vxlan_mask->flags));
1512 ulp_rte_prsr_mask_copy(params, &idx,
1514 sizeof(vxlan_mask->rsvd0));
1515 ulp_rte_prsr_mask_copy(params, &idx,
1517 sizeof(vxlan_mask->vni));
1518 ulp_rte_prsr_mask_copy(params, &idx,
1520 sizeof(vxlan_mask->rsvd1));
1522 /* Add number of vxlan header elements */
1523 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1525 /* Update the hdr_bitmap with vxlan */
1526 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1527 ulp_rte_l4_proto_type_update(params, 0);
1528 return BNXT_TF_RC_SUCCESS;
1531 /* Function to handle the parsing of RTE Flow item GRE Header. */
1533 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1534 struct ulp_rte_parser_params *params)
1536 const struct rte_flow_item_gre *gre_spec = item->spec;
1537 const struct rte_flow_item_gre *gre_mask = item->mask;
1538 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1539 uint32_t idx = params->field_idx;
1541 struct ulp_rte_hdr_field *field;
1544 size = sizeof(gre_spec->c_rsvd0_ver);
1545 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1546 &gre_spec->c_rsvd0_ver,
1548 size = sizeof(gre_spec->protocol);
1549 field = ulp_rte_parser_fld_copy(field,
1550 &gre_spec->protocol,
1554 ulp_rte_prsr_mask_copy(params, &idx,
1555 &gre_mask->c_rsvd0_ver,
1556 sizeof(gre_mask->c_rsvd0_ver));
1557 ulp_rte_prsr_mask_copy(params, &idx,
1558 &gre_mask->protocol,
1559 sizeof(gre_mask->protocol));
1561 /* Add number of GRE header elements */
1562 params->field_idx += BNXT_ULP_PROTO_HDR_GRE_NUM;
1564 /* Update the hdr_bitmap with GRE */
1565 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1566 ulp_rte_l4_proto_type_update(params, 0);
1567 return BNXT_TF_RC_SUCCESS;
1570 /* Function to handle the parsing of RTE Flow item ANY. */
1572 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1573 struct ulp_rte_parser_params *params __rte_unused)
1575 return BNXT_TF_RC_SUCCESS;
1578 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1580 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1581 struct ulp_rte_parser_params *params)
1583 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1584 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1585 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1586 uint32_t idx = params->field_idx;
1588 struct ulp_rte_hdr_field *field;
1591 size = sizeof(icmp_spec->hdr.icmp_type);
1592 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1593 &icmp_spec->hdr.icmp_type,
1595 size = sizeof(icmp_spec->hdr.icmp_code);
1596 field = ulp_rte_parser_fld_copy(field,
1597 &icmp_spec->hdr.icmp_code,
1599 size = sizeof(icmp_spec->hdr.icmp_cksum);
1600 field = ulp_rte_parser_fld_copy(field,
1601 &icmp_spec->hdr.icmp_cksum,
1603 size = sizeof(icmp_spec->hdr.icmp_ident);
1604 field = ulp_rte_parser_fld_copy(field,
1605 &icmp_spec->hdr.icmp_ident,
1607 size = sizeof(icmp_spec->hdr.icmp_seq_nb);
1608 field = ulp_rte_parser_fld_copy(field,
1609 &icmp_spec->hdr.icmp_seq_nb,
1613 ulp_rte_prsr_mask_copy(params, &idx,
1614 &icmp_mask->hdr.icmp_type,
1615 sizeof(icmp_mask->hdr.icmp_type));
1616 ulp_rte_prsr_mask_copy(params, &idx,
1617 &icmp_mask->hdr.icmp_code,
1618 sizeof(icmp_mask->hdr.icmp_code));
1619 ulp_rte_prsr_mask_copy(params, &idx,
1620 &icmp_mask->hdr.icmp_cksum,
1621 sizeof(icmp_mask->hdr.icmp_cksum));
1622 ulp_rte_prsr_mask_copy(params, &idx,
1623 &icmp_mask->hdr.icmp_ident,
1624 sizeof(icmp_mask->hdr.icmp_ident));
1625 ulp_rte_prsr_mask_copy(params, &idx,
1626 &icmp_mask->hdr.icmp_seq_nb,
1627 sizeof(icmp_mask->hdr.icmp_seq_nb));
1629 /* Add number of GRE header elements */
1630 params->field_idx += BNXT_ULP_PROTO_HDR_ICMP_NUM;
1632 /* Update the hdr_bitmap with ICMP */
1633 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1634 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1636 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1637 return BNXT_TF_RC_SUCCESS;
1640 /* Function to handle the parsing of RTE Flow item void Header */
1642 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1643 struct ulp_rte_parser_params *params __rte_unused)
1645 return BNXT_TF_RC_SUCCESS;
1648 /* Function to handle the parsing of RTE Flow action void Header. */
1650 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1651 struct ulp_rte_parser_params *params __rte_unused)
1653 return BNXT_TF_RC_SUCCESS;
1656 /* Function to handle the parsing of RTE Flow action Mark Header. */
1658 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1659 struct ulp_rte_parser_params *param)
1661 const struct rte_flow_action_mark *mark;
1662 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1665 mark = action_item->conf;
1667 mark_id = tfp_cpu_to_be_32(mark->id);
1668 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1669 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1671 /* Update the hdr_bitmap with vxlan */
1672 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1673 return BNXT_TF_RC_SUCCESS;
1675 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1676 return BNXT_TF_RC_ERROR;
1679 /* Function to handle the parsing of RTE Flow action RSS Header. */
1681 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1682 struct ulp_rte_parser_params *param)
1684 const struct rte_flow_action_rss *rss = action_item->conf;
1687 /* Update the hdr_bitmap with vxlan */
1688 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1689 return BNXT_TF_RC_SUCCESS;
1691 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1692 return BNXT_TF_RC_ERROR;
1695 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1697 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1698 struct ulp_rte_parser_params *params)
1700 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1701 const struct rte_flow_item *item;
1702 const struct rte_flow_item_eth *eth_spec;
1703 const struct rte_flow_item_ipv4 *ipv4_spec;
1704 const struct rte_flow_item_ipv6 *ipv6_spec;
1705 struct rte_flow_item_vxlan vxlan_spec;
1706 uint32_t vlan_num = 0, vlan_size = 0;
1707 uint32_t ip_size = 0, ip_type = 0;
1708 uint32_t vxlan_size = 0;
1710 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1711 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1713 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1714 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1716 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1717 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1718 const uint8_t *tmp_buff;
1720 vxlan_encap = action_item->conf;
1722 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1723 return BNXT_TF_RC_ERROR;
1726 item = vxlan_encap->definition;
1728 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1729 return BNXT_TF_RC_ERROR;
1732 if (!ulp_rte_item_skip_void(&item, 0))
1733 return BNXT_TF_RC_ERROR;
1735 /* must have ethernet header */
1736 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1737 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1738 return BNXT_TF_RC_ERROR;
1740 eth_spec = item->spec;
1741 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1742 ulp_encap_buffer_copy(buff,
1743 eth_spec->dst.addr_bytes,
1744 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1745 ULP_BUFFER_ALIGN_8_BYTE);
1747 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1748 ulp_encap_buffer_copy(buff,
1749 eth_spec->src.addr_bytes,
1750 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1751 ULP_BUFFER_ALIGN_8_BYTE);
1753 /* Goto the next item */
1754 if (!ulp_rte_item_skip_void(&item, 1))
1755 return BNXT_TF_RC_ERROR;
1757 /* May have vlan header */
1758 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1760 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1761 ulp_encap_buffer_copy(buff,
1763 sizeof(struct rte_flow_item_vlan),
1764 ULP_BUFFER_ALIGN_8_BYTE);
1766 if (!ulp_rte_item_skip_void(&item, 1))
1767 return BNXT_TF_RC_ERROR;
1770 /* may have two vlan headers */
1771 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1773 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1774 sizeof(struct rte_flow_item_vlan)],
1776 sizeof(struct rte_flow_item_vlan));
1777 if (!ulp_rte_item_skip_void(&item, 1))
1778 return BNXT_TF_RC_ERROR;
1780 /* Update the vlan count and size of more than one */
1782 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1783 vlan_num = tfp_cpu_to_be_32(vlan_num);
1784 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1787 vlan_size = tfp_cpu_to_be_32(vlan_size);
1788 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1793 /* L3 must be IPv4, IPv6 */
1794 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1795 ipv4_spec = item->spec;
1796 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1798 /* copy the ipv4 details */
1799 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1800 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1801 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1802 ulp_encap_buffer_copy(buff,
1804 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1805 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1806 ULP_BUFFER_ALIGN_8_BYTE);
1808 /* Total length being ignored in the ip hdr. */
1809 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1810 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1811 ulp_encap_buffer_copy(buff,
1813 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1814 ULP_BUFFER_ALIGN_8_BYTE);
1815 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1816 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1817 ulp_encap_buffer_copy(buff,
1818 &ipv4_spec->hdr.version_ihl,
1819 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1820 ULP_BUFFER_ALIGN_8_BYTE);
1823 /* Update the dst ip address in ip encap buffer */
1824 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1825 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1826 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1827 ulp_encap_buffer_copy(buff,
1828 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1829 sizeof(ipv4_spec->hdr.dst_addr),
1830 ULP_BUFFER_ALIGN_8_BYTE);
1832 /* Update the src ip address */
1833 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1834 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1835 sizeof(ipv4_spec->hdr.src_addr)];
1836 ulp_encap_buffer_copy(buff,
1837 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1838 sizeof(ipv4_spec->hdr.src_addr),
1839 ULP_BUFFER_ALIGN_8_BYTE);
1841 /* Update the ip size details */
1842 ip_size = tfp_cpu_to_be_32(ip_size);
1843 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1844 &ip_size, sizeof(uint32_t));
1846 /* update the ip type */
1847 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1848 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1849 &ip_type, sizeof(uint32_t));
1851 /* update the computed field to notify it is ipv4 header */
1852 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1855 if (!ulp_rte_item_skip_void(&item, 1))
1856 return BNXT_TF_RC_ERROR;
1857 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1858 ipv6_spec = item->spec;
1859 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1861 /* copy the ipv6 details */
1862 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1863 if (ulp_buffer_is_empty(tmp_buff,
1864 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1865 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1866 ulp_encap_buffer_copy(buff,
1868 sizeof(def_ipv6_hdr),
1869 ULP_BUFFER_ALIGN_8_BYTE);
1871 /* The payload length being ignored in the ip hdr. */
1872 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1873 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1874 ulp_encap_buffer_copy(buff,
1876 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1877 ULP_BUFFER_ALIGN_8_BYTE);
1878 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1879 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1880 BNXT_ULP_ENCAP_IPV6_DO];
1881 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1882 ulp_encap_buffer_copy(buff,
1884 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1885 ULP_BUFFER_ALIGN_8_BYTE);
1887 /* Update the dst ip address in ip encap buffer */
1888 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1889 sizeof(def_ipv6_hdr)];
1890 ulp_encap_buffer_copy(buff,
1891 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1892 sizeof(ipv6_spec->hdr.dst_addr),
1893 ULP_BUFFER_ALIGN_8_BYTE);
1895 /* Update the src ip address */
1896 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1897 ulp_encap_buffer_copy(buff,
1898 (const uint8_t *)ipv6_spec->hdr.src_addr,
1899 sizeof(ipv6_spec->hdr.src_addr),
1900 ULP_BUFFER_ALIGN_16_BYTE);
1902 /* Update the ip size details */
1903 ip_size = tfp_cpu_to_be_32(ip_size);
1904 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1905 &ip_size, sizeof(uint32_t));
1907 /* update the ip type */
1908 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1909 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1910 &ip_type, sizeof(uint32_t));
1912 /* update the computed field to notify it is ipv6 header */
1913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1916 if (!ulp_rte_item_skip_void(&item, 1))
1917 return BNXT_TF_RC_ERROR;
1919 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1920 return BNXT_TF_RC_ERROR;
1924 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1925 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1926 return BNXT_TF_RC_ERROR;
1928 /* copy the udp details */
1929 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1930 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1931 ULP_BUFFER_ALIGN_8_BYTE);
1933 if (!ulp_rte_item_skip_void(&item, 1))
1934 return BNXT_TF_RC_ERROR;
1937 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1938 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1939 return BNXT_TF_RC_ERROR;
1941 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1942 /* copy the vxlan details */
1943 memcpy(&vxlan_spec, item->spec, vxlan_size);
1944 vxlan_spec.flags = 0x08;
1945 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1946 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1947 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1948 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1950 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1951 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1952 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1953 (const uint8_t *)&vxlan_spec.vni,
1954 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1956 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1957 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1958 &vxlan_size, sizeof(uint32_t));
1960 /* update the hdr_bitmap with vxlan */
1961 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1962 return BNXT_TF_RC_SUCCESS;
1965 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1967 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1969 struct ulp_rte_parser_params *params)
1971 /* update the hdr_bitmap with vxlan */
1972 ULP_BITMAP_SET(params->act_bitmap.bits,
1973 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1974 /* Update computational field with tunnel decap info */
1975 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1976 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1977 return BNXT_TF_RC_SUCCESS;
1980 /* Function to handle the parsing of RTE Flow action drop Header. */
1982 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1983 struct ulp_rte_parser_params *params)
1985 /* Update the hdr_bitmap with drop */
1986 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1987 return BNXT_TF_RC_SUCCESS;
1990 /* Function to handle the parsing of RTE Flow action count. */
1992 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1993 struct ulp_rte_parser_params *params)
1995 const struct rte_flow_action_count *act_count;
1996 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1998 act_count = action_item->conf;
2000 if (act_count->shared) {
2002 "Parse Error:Shared count not supported\n");
2003 return BNXT_TF_RC_PARSE_ERR;
2005 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2007 BNXT_ULP_ACT_PROP_SZ_COUNT);
2010 /* Update the hdr_bitmap with count */
2011 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2012 return BNXT_TF_RC_SUCCESS;
2015 /* Function to handle the parsing of action ports. */
2017 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2020 enum bnxt_ulp_direction_type dir;
2023 struct ulp_rte_act_prop *act = ¶m->act_prop;
2024 enum bnxt_ulp_intf_type port_type;
2027 /* Get the direction */
2028 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2029 if (dir == BNXT_ULP_DIR_EGRESS) {
2030 /* For egress direction, fill vport */
2031 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2032 return BNXT_TF_RC_ERROR;
2035 pid = rte_cpu_to_be_32(pid);
2036 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2037 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2039 /* For ingress direction, fill vnic */
2040 port_type = ULP_COMP_FLD_IDX_RD(param,
2041 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2042 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2043 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2045 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2047 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2049 return BNXT_TF_RC_ERROR;
2052 pid = rte_cpu_to_be_32(pid);
2053 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2054 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2057 /* Update the action port set bit */
2058 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2059 return BNXT_TF_RC_SUCCESS;
2062 /* Function to handle the parsing of RTE Flow action PF. */
2064 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2065 struct ulp_rte_parser_params *params)
2069 enum bnxt_ulp_intf_type intf_type;
2071 /* Get the port id of the current device */
2072 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2074 /* Get the port db ifindex */
2075 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2077 BNXT_TF_DBG(ERR, "Invalid port id\n");
2078 return BNXT_TF_RC_ERROR;
2081 /* Check the port is PF port */
2082 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2083 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2084 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2085 return BNXT_TF_RC_ERROR;
2087 /* Update the action properties */
2088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2089 return ulp_rte_parser_act_port_set(params, ifindex);
2092 /* Function to handle the parsing of RTE Flow action VF. */
2094 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2095 struct ulp_rte_parser_params *params)
2097 const struct rte_flow_action_vf *vf_action;
2098 enum bnxt_ulp_intf_type intf_type;
2102 vf_action = action_item->conf;
2104 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2105 return BNXT_TF_RC_PARSE_ERR;
2108 if (vf_action->original) {
2109 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2110 return BNXT_TF_RC_PARSE_ERR;
2113 bp = bnxt_get_bp(params->port_id);
2115 BNXT_TF_DBG(ERR, "Invalid bp\n");
2116 return BNXT_TF_RC_ERROR;
2119 /* vf_action->id is a logical number which in this case is an
2120 * offset from the first VF. So, to get the absolute VF id, the
2121 * offset must be added to the absolute first vf id of that port.
2123 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2124 bp->first_vf_id + vf_action->id,
2126 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2127 return BNXT_TF_RC_ERROR;
2129 /* Check the port is VF port */
2130 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2131 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2132 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2133 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2134 return BNXT_TF_RC_ERROR;
2137 /* Update the action properties */
2138 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2139 return ulp_rte_parser_act_port_set(params, ifindex);
2142 /* Function to handle the parsing of RTE Flow action port_id. */
2144 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2145 struct ulp_rte_parser_params *param)
2147 const struct rte_flow_action_port_id *port_id = act_item->conf;
2149 enum bnxt_ulp_intf_type intf_type;
2153 "ParseErr: Invalid Argument\n");
2154 return BNXT_TF_RC_PARSE_ERR;
2156 if (port_id->original) {
2158 "ParseErr:Portid Original not supported\n");
2159 return BNXT_TF_RC_PARSE_ERR;
2162 /* Get the port db ifindex */
2163 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2165 BNXT_TF_DBG(ERR, "Invalid port id\n");
2166 return BNXT_TF_RC_ERROR;
2169 /* Get the intf type */
2170 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2172 BNXT_TF_DBG(ERR, "Invalid port type\n");
2173 return BNXT_TF_RC_ERROR;
2176 /* Set the action port */
2177 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2178 return ulp_rte_parser_act_port_set(param, ifindex);
2181 /* Function to handle the parsing of RTE Flow action phy_port. */
2183 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2184 struct ulp_rte_parser_params *prm)
2186 const struct rte_flow_action_phy_port *phy_port;
2190 enum bnxt_ulp_direction_type dir;
2192 phy_port = action_item->conf;
2195 "ParseErr: Invalid Argument\n");
2196 return BNXT_TF_RC_PARSE_ERR;
2199 if (phy_port->original) {
2201 "Parse Err:Port Original not supported\n");
2202 return BNXT_TF_RC_PARSE_ERR;
2204 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2205 if (dir != BNXT_ULP_DIR_EGRESS) {
2207 "Parse Err:Phy ports are valid only for egress\n");
2208 return BNXT_TF_RC_PARSE_ERR;
2210 /* Get the physical port details from port db */
2211 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2214 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2219 pid = rte_cpu_to_be_32(pid);
2220 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2221 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2223 /* Update the action port set bit */
2224 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2225 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2226 BNXT_ULP_INTF_TYPE_PHY_PORT);
2227 return BNXT_TF_RC_SUCCESS;
2230 /* Function to handle the parsing of RTE Flow action pop vlan. */
2232 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2233 struct ulp_rte_parser_params *params)
2235 /* Update the act_bitmap with pop */
2236 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2237 return BNXT_TF_RC_SUCCESS;
2240 /* Function to handle the parsing of RTE Flow action push vlan. */
2242 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2243 struct ulp_rte_parser_params *params)
2245 const struct rte_flow_action_of_push_vlan *push_vlan;
2247 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2249 push_vlan = action_item->conf;
2251 ethertype = push_vlan->ethertype;
2252 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2254 "Parse Err: Ethertype not supported\n");
2255 return BNXT_TF_RC_PARSE_ERR;
2257 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2258 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2259 /* Update the hdr_bitmap with push vlan */
2260 ULP_BITMAP_SET(params->act_bitmap.bits,
2261 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2262 return BNXT_TF_RC_SUCCESS;
2264 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2265 return BNXT_TF_RC_ERROR;
2268 /* Function to handle the parsing of RTE Flow action set vlan id. */
2270 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2271 struct ulp_rte_parser_params *params)
2273 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2275 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2277 vlan_vid = action_item->conf;
2278 if (vlan_vid && vlan_vid->vlan_vid) {
2279 vid = vlan_vid->vlan_vid;
2280 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2281 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2282 /* Update the hdr_bitmap with vlan vid */
2283 ULP_BITMAP_SET(params->act_bitmap.bits,
2284 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2285 return BNXT_TF_RC_SUCCESS;
2287 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2288 return BNXT_TF_RC_ERROR;
2291 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2293 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2294 struct ulp_rte_parser_params *params)
2296 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2298 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2300 vlan_pcp = action_item->conf;
2302 pcp = vlan_pcp->vlan_pcp;
2303 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2304 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2305 /* Update the hdr_bitmap with vlan vid */
2306 ULP_BITMAP_SET(params->act_bitmap.bits,
2307 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2308 return BNXT_TF_RC_SUCCESS;
2310 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2311 return BNXT_TF_RC_ERROR;
2314 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2316 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2317 struct ulp_rte_parser_params *params)
2319 const struct rte_flow_action_set_ipv4 *set_ipv4;
2320 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2322 set_ipv4 = action_item->conf;
2324 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2325 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2326 /* Update the hdr_bitmap with set ipv4 src */
2327 ULP_BITMAP_SET(params->act_bitmap.bits,
2328 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2329 return BNXT_TF_RC_SUCCESS;
2331 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2332 return BNXT_TF_RC_ERROR;
2335 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2337 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2338 struct ulp_rte_parser_params *params)
2340 const struct rte_flow_action_set_ipv4 *set_ipv4;
2341 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2343 set_ipv4 = action_item->conf;
2345 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2346 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2347 /* Update the hdr_bitmap with set ipv4 dst */
2348 ULP_BITMAP_SET(params->act_bitmap.bits,
2349 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2350 return BNXT_TF_RC_SUCCESS;
2352 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2353 return BNXT_TF_RC_ERROR;
2356 /* Function to handle the parsing of RTE Flow action set tp src.*/
2358 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2359 struct ulp_rte_parser_params *params)
2361 const struct rte_flow_action_set_tp *set_tp;
2362 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2364 set_tp = action_item->conf;
2366 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2367 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2368 /* Update the hdr_bitmap with set tp src */
2369 ULP_BITMAP_SET(params->act_bitmap.bits,
2370 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2371 return BNXT_TF_RC_SUCCESS;
2374 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2375 return BNXT_TF_RC_ERROR;
2378 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2380 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2381 struct ulp_rte_parser_params *params)
2383 const struct rte_flow_action_set_tp *set_tp;
2384 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2386 set_tp = action_item->conf;
2388 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2389 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2390 /* Update the hdr_bitmap with set tp dst */
2391 ULP_BITMAP_SET(params->act_bitmap.bits,
2392 BNXT_ULP_ACT_BIT_SET_TP_DST);
2393 return BNXT_TF_RC_SUCCESS;
2396 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2397 return BNXT_TF_RC_ERROR;
2400 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2402 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2403 struct ulp_rte_parser_params *params)
2405 /* Update the act_bitmap with dec ttl */
2406 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2407 return BNXT_TF_RC_SUCCESS;
2410 /* Function to handle the parsing of RTE Flow action JUMP */
2412 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2413 struct ulp_rte_parser_params *params)
2415 /* Update the act_bitmap with dec ttl */
2416 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2417 return BNXT_TF_RC_SUCCESS;
2421 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2422 struct ulp_rte_parser_params *params)
2424 const struct rte_flow_action_sample *sample;
2427 sample = action_item->conf;
2429 /* if SAMPLE bit is set it means this sample action is nested within the
2430 * actions of another sample action; this is not allowed
2432 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2433 BNXT_ULP_ACT_BIT_SAMPLE))
2434 return BNXT_TF_RC_ERROR;
2436 /* a sample action is only allowed as a shared action */
2437 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2438 BNXT_ULP_ACT_BIT_SHARED))
2439 return BNXT_TF_RC_ERROR;
2441 /* only a ratio of 1 i.e. 100% is supported */
2442 if (sample->ratio != 1)
2443 return BNXT_TF_RC_ERROR;
2445 if (!sample->actions)
2446 return BNXT_TF_RC_ERROR;
2448 /* parse the nested actions for a sample action */
2449 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2450 if (ret == BNXT_TF_RC_SUCCESS)
2451 /* Update the act_bitmap with sample */
2452 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SAMPLE);