1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
88 /* Set the computed flags for no vlan tags before parsing */
89 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
90 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
92 /* Parse all the items in the pattern */
93 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
94 /* get the header information from the flow_hdr_info table */
95 hdr_info = &ulp_hdr_info[item->type];
96 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
98 "Truflow parser does not support type %d\n",
100 return BNXT_TF_RC_PARSE_ERR;
101 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
102 /* call the registered callback handler */
103 if (hdr_info->proto_hdr_func) {
104 if (hdr_info->proto_hdr_func(item, params) !=
105 BNXT_TF_RC_SUCCESS) {
106 return BNXT_TF_RC_ERROR;
112 /* update the implied SVIF */
113 return ulp_rte_parser_implicit_match_port_process(params);
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 /* Parse all the items in the pattern */
128 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129 /* get the header information from the flow_hdr_info table */
130 hdr_info = &ulp_act_info[action_item->type];
131 if (hdr_info->act_type ==
132 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
134 "Truflow parser does not support act %u\n",
136 return BNXT_TF_RC_ERROR;
137 } else if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_SUPPORTED) {
139 /* call the registered callback handler */
140 if (hdr_info->proto_act_func) {
141 if (hdr_info->proto_act_func(action_item,
143 BNXT_TF_RC_SUCCESS) {
144 return BNXT_TF_RC_ERROR;
150 /* update the implied port details */
151 ulp_rte_parser_implicit_act_port_process(params);
152 return BNXT_TF_RC_SUCCESS;
156 * Function to handle the post processing of the parsing details
159 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
161 enum bnxt_ulp_direction_type dir;
162 enum bnxt_ulp_intf_type match_port_type, act_port_type;
163 uint32_t act_port_set;
165 /* Get the computed details */
166 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
167 match_port_type = ULP_COMP_FLD_IDX_RD(params,
168 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
169 act_port_type = ULP_COMP_FLD_IDX_RD(params,
170 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
171 act_port_set = ULP_COMP_FLD_IDX_RD(params,
172 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
174 /* set the flow direction in the proto and action header */
175 if (dir == BNXT_ULP_DIR_EGRESS) {
176 ULP_BITMAP_SET(params->hdr_bitmap.bits,
177 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
178 ULP_BITMAP_SET(params->act_bitmap.bits,
179 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
182 /* calculate the VF to VF flag */
183 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
184 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
185 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
187 /* TBD: Handle the flow rejection scenarios */
192 * Function to compute the flow direction based on the match port details
195 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
197 enum bnxt_ulp_intf_type match_port_type;
199 /* Get the match port type */
200 match_port_type = ULP_COMP_FLD_IDX_RD(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
203 /* If ingress flow and matchport is vf rep then dir is egress*/
204 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
205 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
206 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
207 BNXT_ULP_DIR_EGRESS);
209 /* Assign the input direction */
210 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
211 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
212 BNXT_ULP_DIR_INGRESS);
214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
215 BNXT_ULP_DIR_EGRESS);
219 /* Function to handle the parsing of RTE Flow item PF Header. */
221 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
226 enum bnxt_ulp_direction_type dir;
227 struct ulp_rte_hdr_field *hdr_field;
228 enum bnxt_ulp_svif_type svif_type;
229 enum bnxt_ulp_intf_type port_type;
231 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
232 BNXT_ULP_INVALID_SVIF_VAL) {
234 "SVIF already set,multiple source not support'd\n");
235 return BNXT_TF_RC_ERROR;
238 /* Get port type details */
239 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
240 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
241 BNXT_TF_DBG(ERR, "Invalid port type\n");
242 return BNXT_TF_RC_ERROR;
245 /* Update the match port type */
246 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
248 /* compute the direction */
249 bnxt_ulp_rte_parser_direction_compute(params);
251 /* Get the computed direction */
252 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
253 if (dir == BNXT_ULP_DIR_INGRESS) {
254 svif_type = BNXT_ULP_PHY_PORT_SVIF;
256 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
257 svif_type = BNXT_ULP_VF_FUNC_SVIF;
259 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
261 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
263 svif = rte_cpu_to_be_16(svif);
264 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
265 memcpy(hdr_field->spec, &svif, sizeof(svif));
266 memcpy(hdr_field->mask, &mask, sizeof(mask));
267 hdr_field->size = sizeof(svif);
268 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
269 rte_be_to_cpu_16(svif));
270 return BNXT_TF_RC_SUCCESS;
273 /* Function to handle the parsing of the RTE port id */
275 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
277 uint16_t port_id = 0;
278 uint16_t svif_mask = 0xFFFF;
280 int32_t rc = BNXT_TF_RC_ERROR;
282 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
283 BNXT_ULP_INVALID_SVIF_VAL)
284 return BNXT_TF_RC_SUCCESS;
286 /* SVIF not set. So get the port id */
287 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
289 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
292 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
296 /* Update the SVIF details */
297 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
301 /* Function to handle the implicit action port id */
303 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
305 struct rte_flow_action action_item = {0};
306 struct rte_flow_action_port_id port_id = {0};
308 /* Read the action port set bit */
309 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
310 /* Already set, so just exit */
311 return BNXT_TF_RC_SUCCESS;
313 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
314 action_item.conf = &port_id;
316 /* Update the action port based on incoming port */
317 ulp_rte_port_id_act_handler(&action_item, params);
319 /* Reset the action port set bit */
320 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
321 return BNXT_TF_RC_SUCCESS;
324 /* Function to handle the parsing of RTE Flow item PF Header. */
326 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
327 struct ulp_rte_parser_params *params)
329 uint16_t port_id = 0;
330 uint16_t svif_mask = 0xFFFF;
333 /* Get the implicit port id */
334 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
336 /* perform the conversion from dpdk port to bnxt ifindex */
337 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
340 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
341 return BNXT_TF_RC_ERROR;
344 /* Update the SVIF details */
345 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
348 /* Function to handle the parsing of RTE Flow item VF Header. */
350 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
351 struct ulp_rte_parser_params *params)
353 const struct rte_flow_item_vf *vf_spec = item->spec;
354 const struct rte_flow_item_vf *vf_mask = item->mask;
357 int32_t rc = BNXT_TF_RC_PARSE_ERR;
359 /* Get VF rte_flow_item for Port details */
361 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
365 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
370 /* perform the conversion from VF Func id to bnxt ifindex */
371 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
374 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
377 /* Update the SVIF details */
378 return ulp_rte_parser_svif_set(params, ifindex, mask);
381 /* Function to handle the parsing of RTE Flow item port id Header. */
383 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
384 struct ulp_rte_parser_params *params)
386 const struct rte_flow_item_port_id *port_spec = item->spec;
387 const struct rte_flow_item_port_id *port_mask = item->mask;
389 int32_t rc = BNXT_TF_RC_PARSE_ERR;
393 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
397 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
400 mask = port_mask->id;
402 /* perform the conversion from dpdk port to bnxt ifindex */
403 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
406 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
409 /* Update the SVIF details */
410 return ulp_rte_parser_svif_set(params, ifindex, mask);
413 /* Function to handle the parsing of RTE Flow item phy port Header. */
415 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
416 struct ulp_rte_parser_params *params)
418 const struct rte_flow_item_phy_port *port_spec = item->spec;
419 const struct rte_flow_item_phy_port *port_mask = item->mask;
421 int32_t rc = BNXT_TF_RC_ERROR;
423 enum bnxt_ulp_direction_type dir;
424 struct ulp_rte_hdr_field *hdr_field;
426 /* Copy the rte_flow_item for phy port into hdr_field */
428 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
432 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
435 mask = port_mask->index;
437 /* Update the match port type */
438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
439 BNXT_ULP_INTF_TYPE_PHY_PORT);
441 /* Compute the Hw direction */
442 bnxt_ulp_rte_parser_direction_compute(params);
444 /* Direction validation */
445 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
446 if (dir == BNXT_ULP_DIR_EGRESS) {
448 "Parse Err:Phy ports are valid only for ingress\n");
449 return BNXT_TF_RC_PARSE_ERR;
452 /* Get the physical port details from port db */
453 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
456 BNXT_TF_DBG(ERR, "Failed to get port details\n");
457 return BNXT_TF_RC_PARSE_ERR;
460 /* Update the SVIF details */
461 svif = rte_cpu_to_be_16(svif);
462 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
463 memcpy(hdr_field->spec, &svif, sizeof(svif));
464 memcpy(hdr_field->mask, &mask, sizeof(mask));
465 hdr_field->size = sizeof(svif);
466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
467 rte_be_to_cpu_16(svif));
468 return BNXT_TF_RC_SUCCESS;
471 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
473 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
474 struct ulp_rte_parser_params *params)
476 const struct rte_flow_item_eth *eth_spec = item->spec;
477 const struct rte_flow_item_eth *eth_mask = item->mask;
478 struct ulp_rte_hdr_field *field;
479 uint32_t idx = params->field_idx;
480 uint64_t set_flag = 0;
484 * Copy the rte_flow_item for eth into hdr_field using ethernet
488 size = sizeof(eth_spec->dst.addr_bytes);
489 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
490 eth_spec->dst.addr_bytes,
492 size = sizeof(eth_spec->src.addr_bytes);
493 field = ulp_rte_parser_fld_copy(field,
494 eth_spec->src.addr_bytes,
496 field = ulp_rte_parser_fld_copy(field,
498 sizeof(eth_spec->type));
501 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
502 sizeof(eth_mask->dst.addr_bytes));
503 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
504 sizeof(eth_mask->src.addr_bytes));
505 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
506 sizeof(eth_mask->type));
508 /* Add number of vlan header elements */
509 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
510 params->vlan_idx = params->field_idx;
511 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
513 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
514 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
515 BNXT_ULP_HDR_BIT_O_ETH);
517 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
519 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
520 BNXT_ULP_HDR_BIT_I_ETH);
522 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
523 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
525 return BNXT_TF_RC_SUCCESS;
528 /* Function to handle the parsing of RTE Flow item Vlan Header. */
530 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_vlan *vlan_spec = item->spec;
534 const struct rte_flow_item_vlan *vlan_mask = item->mask;
535 struct ulp_rte_hdr_field *field;
536 struct ulp_rte_hdr_bitmap *hdr_bit;
537 uint32_t idx = params->vlan_idx;
538 uint16_t vlan_tag, priority;
539 uint32_t outer_vtag_num;
540 uint32_t inner_vtag_num;
543 * Copy the rte_flow_item for vlan into hdr_field using Vlan
547 vlan_tag = ntohs(vlan_spec->tci);
548 priority = htons(vlan_tag >> 13);
550 vlan_tag = htons(vlan_tag);
552 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
555 field = ulp_rte_parser_fld_copy(field,
558 field = ulp_rte_parser_fld_copy(field,
559 &vlan_spec->inner_type,
560 sizeof(vlan_spec->inner_type));
564 vlan_tag = ntohs(vlan_mask->tci);
565 priority = htons(vlan_tag >> 13);
567 vlan_tag = htons(vlan_tag);
569 field = ¶ms->hdr_field[idx];
570 memcpy(field->mask, &priority, field->size);
572 memcpy(field->mask, &vlan_tag, field->size);
574 memcpy(field->mask, &vlan_mask->inner_type, field->size);
576 /* Set the vlan index to new incremented value */
577 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
579 /* Get the outer tag and inner tag counts */
580 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
581 BNXT_ULP_CF_IDX_O_VTAG_NUM);
582 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
583 BNXT_ULP_CF_IDX_I_VTAG_NUM);
585 /* Update the hdr_bitmap of the vlans */
586 hdr_bit = ¶ms->hdr_bitmap;
587 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
588 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
590 /* Update the vlan tag num */
592 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
594 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
595 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
596 ULP_BITMAP_SET(params->hdr_bitmap.bits,
597 BNXT_ULP_HDR_BIT_OO_VLAN);
598 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
599 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
600 outer_vtag_num == 1) {
601 /* update the vlan tag num */
603 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
605 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
607 ULP_BITMAP_SET(params->hdr_bitmap.bits,
608 BNXT_ULP_HDR_BIT_OI_VLAN);
609 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
610 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
612 /* update the vlan tag num */
614 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
616 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
617 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
618 ULP_BITMAP_SET(params->hdr_bitmap.bits,
619 BNXT_ULP_HDR_BIT_IO_VLAN);
620 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
621 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
622 inner_vtag_num == 1) {
623 /* update the vlan tag num */
625 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
627 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
628 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
629 ULP_BITMAP_SET(params->hdr_bitmap.bits,
630 BNXT_ULP_HDR_BIT_II_VLAN);
632 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
633 return BNXT_TF_RC_ERROR;
635 return BNXT_TF_RC_SUCCESS;
638 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
640 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
641 struct ulp_rte_parser_params *params)
643 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
644 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
645 struct ulp_rte_hdr_field *field;
646 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
647 uint32_t idx = params->field_idx;
649 uint32_t inner_l3, outer_l3;
651 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
653 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
654 return BNXT_TF_RC_ERROR;
658 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
662 size = sizeof(ipv4_spec->hdr.version_ihl);
663 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
664 &ipv4_spec->hdr.version_ihl,
666 size = sizeof(ipv4_spec->hdr.type_of_service);
667 field = ulp_rte_parser_fld_copy(field,
668 &ipv4_spec->hdr.type_of_service,
670 size = sizeof(ipv4_spec->hdr.total_length);
671 field = ulp_rte_parser_fld_copy(field,
672 &ipv4_spec->hdr.total_length,
674 size = sizeof(ipv4_spec->hdr.packet_id);
675 field = ulp_rte_parser_fld_copy(field,
676 &ipv4_spec->hdr.packet_id,
678 size = sizeof(ipv4_spec->hdr.fragment_offset);
679 field = ulp_rte_parser_fld_copy(field,
680 &ipv4_spec->hdr.fragment_offset,
682 size = sizeof(ipv4_spec->hdr.time_to_live);
683 field = ulp_rte_parser_fld_copy(field,
684 &ipv4_spec->hdr.time_to_live,
686 size = sizeof(ipv4_spec->hdr.next_proto_id);
687 field = ulp_rte_parser_fld_copy(field,
688 &ipv4_spec->hdr.next_proto_id,
690 size = sizeof(ipv4_spec->hdr.hdr_checksum);
691 field = ulp_rte_parser_fld_copy(field,
692 &ipv4_spec->hdr.hdr_checksum,
694 size = sizeof(ipv4_spec->hdr.src_addr);
695 field = ulp_rte_parser_fld_copy(field,
696 &ipv4_spec->hdr.src_addr,
698 size = sizeof(ipv4_spec->hdr.dst_addr);
699 field = ulp_rte_parser_fld_copy(field,
700 &ipv4_spec->hdr.dst_addr,
704 ulp_rte_prsr_mask_copy(params, &idx,
705 &ipv4_mask->hdr.version_ihl,
706 sizeof(ipv4_mask->hdr.version_ihl));
707 #ifdef ULP_DONT_IGNORE_TOS
708 ulp_rte_prsr_mask_copy(params, &idx,
709 &ipv4_mask->hdr.type_of_service,
710 sizeof(ipv4_mask->hdr.type_of_service));
713 * The tos field is ignored since OVS is setting it as wild card
714 * match and it is not supported. This is a work around and
715 * shall be addressed in the future.
720 ulp_rte_prsr_mask_copy(params, &idx,
721 &ipv4_mask->hdr.total_length,
722 sizeof(ipv4_mask->hdr.total_length));
723 ulp_rte_prsr_mask_copy(params, &idx,
724 &ipv4_mask->hdr.packet_id,
725 sizeof(ipv4_mask->hdr.packet_id));
726 ulp_rte_prsr_mask_copy(params, &idx,
727 &ipv4_mask->hdr.fragment_offset,
728 sizeof(ipv4_mask->hdr.fragment_offset));
729 ulp_rte_prsr_mask_copy(params, &idx,
730 &ipv4_mask->hdr.time_to_live,
731 sizeof(ipv4_mask->hdr.time_to_live));
732 ulp_rte_prsr_mask_copy(params, &idx,
733 &ipv4_mask->hdr.next_proto_id,
734 sizeof(ipv4_mask->hdr.next_proto_id));
735 ulp_rte_prsr_mask_copy(params, &idx,
736 &ipv4_mask->hdr.hdr_checksum,
737 sizeof(ipv4_mask->hdr.hdr_checksum));
738 ulp_rte_prsr_mask_copy(params, &idx,
739 &ipv4_mask->hdr.src_addr,
740 sizeof(ipv4_mask->hdr.src_addr));
741 ulp_rte_prsr_mask_copy(params, &idx,
742 &ipv4_mask->hdr.dst_addr,
743 sizeof(ipv4_mask->hdr.dst_addr));
745 /* Add the number of ipv4 header elements */
746 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
748 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
749 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
751 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
752 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
753 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
755 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
757 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
761 return BNXT_TF_RC_SUCCESS;
764 /* Function to handle the parsing of RTE Flow item IPV6 Header */
766 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
767 struct ulp_rte_parser_params *params)
769 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
770 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
771 struct ulp_rte_hdr_field *field;
772 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
773 uint32_t idx = params->field_idx;
775 uint32_t inner_l3, outer_l3;
776 uint32_t vtcf, vtcf_mask;
778 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
780 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
781 return BNXT_TF_RC_ERROR;
785 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
789 size = sizeof(ipv6_spec->hdr.vtc_flow);
791 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
792 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
796 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
797 field = ulp_rte_parser_fld_copy(field,
801 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
802 field = ulp_rte_parser_fld_copy(field,
806 size = sizeof(ipv6_spec->hdr.payload_len);
807 field = ulp_rte_parser_fld_copy(field,
808 &ipv6_spec->hdr.payload_len,
810 size = sizeof(ipv6_spec->hdr.proto);
811 field = ulp_rte_parser_fld_copy(field,
812 &ipv6_spec->hdr.proto,
814 size = sizeof(ipv6_spec->hdr.hop_limits);
815 field = ulp_rte_parser_fld_copy(field,
816 &ipv6_spec->hdr.hop_limits,
818 size = sizeof(ipv6_spec->hdr.src_addr);
819 field = ulp_rte_parser_fld_copy(field,
820 &ipv6_spec->hdr.src_addr,
822 size = sizeof(ipv6_spec->hdr.dst_addr);
823 field = ulp_rte_parser_fld_copy(field,
824 &ipv6_spec->hdr.dst_addr,
828 size = sizeof(ipv6_mask->hdr.vtc_flow);
830 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
831 ulp_rte_prsr_mask_copy(params, &idx,
835 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
836 ulp_rte_prsr_mask_copy(params, &idx,
841 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
842 ulp_rte_prsr_mask_copy(params, &idx,
846 ulp_rte_prsr_mask_copy(params, &idx,
847 &ipv6_mask->hdr.payload_len,
848 sizeof(ipv6_mask->hdr.payload_len));
849 ulp_rte_prsr_mask_copy(params, &idx,
850 &ipv6_mask->hdr.proto,
851 sizeof(ipv6_mask->hdr.proto));
852 ulp_rte_prsr_mask_copy(params, &idx,
853 &ipv6_mask->hdr.hop_limits,
854 sizeof(ipv6_mask->hdr.hop_limits));
855 ulp_rte_prsr_mask_copy(params, &idx,
856 &ipv6_mask->hdr.src_addr,
857 sizeof(ipv6_mask->hdr.src_addr));
858 ulp_rte_prsr_mask_copy(params, &idx,
859 &ipv6_mask->hdr.dst_addr,
860 sizeof(ipv6_mask->hdr.dst_addr));
862 /* add number of ipv6 header elements */
863 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
865 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
866 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
868 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
869 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
870 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
871 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
873 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
874 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
876 return BNXT_TF_RC_SUCCESS;
879 /* Function to handle the parsing of RTE Flow item UDP Header. */
881 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
882 struct ulp_rte_parser_params *params)
884 const struct rte_flow_item_udp *udp_spec = item->spec;
885 const struct rte_flow_item_udp *udp_mask = item->mask;
886 struct ulp_rte_hdr_field *field;
887 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
888 uint32_t idx = params->field_idx;
890 uint32_t inner_l4, outer_l4;
892 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
894 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
895 return BNXT_TF_RC_ERROR;
899 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
903 size = sizeof(udp_spec->hdr.src_port);
904 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
905 &udp_spec->hdr.src_port,
907 size = sizeof(udp_spec->hdr.dst_port);
908 field = ulp_rte_parser_fld_copy(field,
909 &udp_spec->hdr.dst_port,
911 size = sizeof(udp_spec->hdr.dgram_len);
912 field = ulp_rte_parser_fld_copy(field,
913 &udp_spec->hdr.dgram_len,
915 size = sizeof(udp_spec->hdr.dgram_cksum);
916 field = ulp_rte_parser_fld_copy(field,
917 &udp_spec->hdr.dgram_cksum,
921 ulp_rte_prsr_mask_copy(params, &idx,
922 &udp_mask->hdr.src_port,
923 sizeof(udp_mask->hdr.src_port));
924 ulp_rte_prsr_mask_copy(params, &idx,
925 &udp_mask->hdr.dst_port,
926 sizeof(udp_mask->hdr.dst_port));
927 ulp_rte_prsr_mask_copy(params, &idx,
928 &udp_mask->hdr.dgram_len,
929 sizeof(udp_mask->hdr.dgram_len));
930 ulp_rte_prsr_mask_copy(params, &idx,
931 &udp_mask->hdr.dgram_cksum,
932 sizeof(udp_mask->hdr.dgram_cksum));
935 /* Add number of UDP header elements */
936 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
938 /* Set the udp header bitmap and computed l4 header bitmaps */
939 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
941 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
942 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
943 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
944 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
946 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
947 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
949 return BNXT_TF_RC_SUCCESS;
952 /* Function to handle the parsing of RTE Flow item TCP Header. */
954 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
955 struct ulp_rte_parser_params *params)
957 const struct rte_flow_item_tcp *tcp_spec = item->spec;
958 const struct rte_flow_item_tcp *tcp_mask = item->mask;
959 struct ulp_rte_hdr_field *field;
960 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
961 uint32_t idx = params->field_idx;
963 uint32_t inner_l4, outer_l4;
965 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
967 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
968 return BNXT_TF_RC_ERROR;
972 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
976 size = sizeof(tcp_spec->hdr.src_port);
977 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
978 &tcp_spec->hdr.src_port,
980 size = sizeof(tcp_spec->hdr.dst_port);
981 field = ulp_rte_parser_fld_copy(field,
982 &tcp_spec->hdr.dst_port,
984 size = sizeof(tcp_spec->hdr.sent_seq);
985 field = ulp_rte_parser_fld_copy(field,
986 &tcp_spec->hdr.sent_seq,
988 size = sizeof(tcp_spec->hdr.recv_ack);
989 field = ulp_rte_parser_fld_copy(field,
990 &tcp_spec->hdr.recv_ack,
992 size = sizeof(tcp_spec->hdr.data_off);
993 field = ulp_rte_parser_fld_copy(field,
994 &tcp_spec->hdr.data_off,
996 size = sizeof(tcp_spec->hdr.tcp_flags);
997 field = ulp_rte_parser_fld_copy(field,
998 &tcp_spec->hdr.tcp_flags,
1000 size = sizeof(tcp_spec->hdr.rx_win);
1001 field = ulp_rte_parser_fld_copy(field,
1002 &tcp_spec->hdr.rx_win,
1004 size = sizeof(tcp_spec->hdr.cksum);
1005 field = ulp_rte_parser_fld_copy(field,
1006 &tcp_spec->hdr.cksum,
1008 size = sizeof(tcp_spec->hdr.tcp_urp);
1009 field = ulp_rte_parser_fld_copy(field,
1010 &tcp_spec->hdr.tcp_urp,
1013 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1017 ulp_rte_prsr_mask_copy(params, &idx,
1018 &tcp_mask->hdr.src_port,
1019 sizeof(tcp_mask->hdr.src_port));
1020 ulp_rte_prsr_mask_copy(params, &idx,
1021 &tcp_mask->hdr.dst_port,
1022 sizeof(tcp_mask->hdr.dst_port));
1023 ulp_rte_prsr_mask_copy(params, &idx,
1024 &tcp_mask->hdr.sent_seq,
1025 sizeof(tcp_mask->hdr.sent_seq));
1026 ulp_rte_prsr_mask_copy(params, &idx,
1027 &tcp_mask->hdr.recv_ack,
1028 sizeof(tcp_mask->hdr.recv_ack));
1029 ulp_rte_prsr_mask_copy(params, &idx,
1030 &tcp_mask->hdr.data_off,
1031 sizeof(tcp_mask->hdr.data_off));
1032 ulp_rte_prsr_mask_copy(params, &idx,
1033 &tcp_mask->hdr.tcp_flags,
1034 sizeof(tcp_mask->hdr.tcp_flags));
1035 ulp_rte_prsr_mask_copy(params, &idx,
1036 &tcp_mask->hdr.rx_win,
1037 sizeof(tcp_mask->hdr.rx_win));
1038 ulp_rte_prsr_mask_copy(params, &idx,
1039 &tcp_mask->hdr.cksum,
1040 sizeof(tcp_mask->hdr.cksum));
1041 ulp_rte_prsr_mask_copy(params, &idx,
1042 &tcp_mask->hdr.tcp_urp,
1043 sizeof(tcp_mask->hdr.tcp_urp));
1045 /* add number of TCP header elements */
1046 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1048 /* Set the udp header bitmap and computed l4 header bitmaps */
1049 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
1051 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1052 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1053 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1054 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1056 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1057 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1059 return BNXT_TF_RC_SUCCESS;
1062 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1064 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1065 struct ulp_rte_parser_params *params)
1067 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1068 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1069 struct ulp_rte_hdr_field *field;
1070 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1071 uint32_t idx = params->field_idx;
1075 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1079 size = sizeof(vxlan_spec->flags);
1080 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1083 size = sizeof(vxlan_spec->rsvd0);
1084 field = ulp_rte_parser_fld_copy(field,
1087 size = sizeof(vxlan_spec->vni);
1088 field = ulp_rte_parser_fld_copy(field,
1091 size = sizeof(vxlan_spec->rsvd1);
1092 field = ulp_rte_parser_fld_copy(field,
1097 ulp_rte_prsr_mask_copy(params, &idx,
1099 sizeof(vxlan_mask->flags));
1100 ulp_rte_prsr_mask_copy(params, &idx,
1102 sizeof(vxlan_mask->rsvd0));
1103 ulp_rte_prsr_mask_copy(params, &idx,
1105 sizeof(vxlan_mask->vni));
1106 ulp_rte_prsr_mask_copy(params, &idx,
1108 sizeof(vxlan_mask->rsvd1));
1110 /* Add number of vxlan header elements */
1111 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1113 /* Update the hdr_bitmap with vxlan */
1114 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1115 return BNXT_TF_RC_SUCCESS;
1118 /* Function to handle the parsing of RTE Flow item void Header */
1120 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1121 struct ulp_rte_parser_params *params __rte_unused)
1123 return BNXT_TF_RC_SUCCESS;
1126 /* Function to handle the parsing of RTE Flow action void Header. */
1128 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1129 struct ulp_rte_parser_params *params __rte_unused)
1131 return BNXT_TF_RC_SUCCESS;
1134 /* Function to handle the parsing of RTE Flow action Mark Header. */
1136 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1137 struct ulp_rte_parser_params *param)
1139 const struct rte_flow_action_mark *mark;
1140 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1143 mark = action_item->conf;
1145 mark_id = tfp_cpu_to_be_32(mark->id);
1146 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1147 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1149 /* Update the hdr_bitmap with vxlan */
1150 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1151 return BNXT_TF_RC_SUCCESS;
1153 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1154 return BNXT_TF_RC_ERROR;
1157 /* Function to handle the parsing of RTE Flow action RSS Header. */
1159 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1160 struct ulp_rte_parser_params *param)
1162 const struct rte_flow_action_rss *rss = action_item->conf;
1165 /* Update the hdr_bitmap with vxlan */
1166 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1167 return BNXT_TF_RC_SUCCESS;
1169 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1170 return BNXT_TF_RC_ERROR;
1173 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1175 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1176 struct ulp_rte_parser_params *params)
1178 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1179 const struct rte_flow_item *item;
1180 const struct rte_flow_item_eth *eth_spec;
1181 const struct rte_flow_item_ipv4 *ipv4_spec;
1182 const struct rte_flow_item_ipv6 *ipv6_spec;
1183 struct rte_flow_item_vxlan vxlan_spec;
1184 uint32_t vlan_num = 0, vlan_size = 0;
1185 uint32_t ip_size = 0, ip_type = 0;
1186 uint32_t vxlan_size = 0;
1188 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1189 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1191 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1192 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1194 vxlan_encap = action_item->conf;
1196 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1197 return BNXT_TF_RC_ERROR;
1200 item = vxlan_encap->definition;
1202 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1203 return BNXT_TF_RC_ERROR;
1206 if (!ulp_rte_item_skip_void(&item, 0))
1207 return BNXT_TF_RC_ERROR;
1209 /* must have ethernet header */
1210 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1211 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1212 return BNXT_TF_RC_ERROR;
1214 eth_spec = item->spec;
1215 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1216 ulp_encap_buffer_copy(buff,
1217 eth_spec->dst.addr_bytes,
1218 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1220 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1221 ulp_encap_buffer_copy(buff,
1222 eth_spec->src.addr_bytes,
1223 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1225 /* Goto the next item */
1226 if (!ulp_rte_item_skip_void(&item, 1))
1227 return BNXT_TF_RC_ERROR;
1229 /* May have vlan header */
1230 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1232 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1233 ulp_encap_buffer_copy(buff,
1235 sizeof(struct rte_flow_item_vlan));
1237 if (!ulp_rte_item_skip_void(&item, 1))
1238 return BNXT_TF_RC_ERROR;
1241 /* may have two vlan headers */
1242 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1244 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1245 sizeof(struct rte_flow_item_vlan)],
1247 sizeof(struct rte_flow_item_vlan));
1248 if (!ulp_rte_item_skip_void(&item, 1))
1249 return BNXT_TF_RC_ERROR;
1251 /* Update the vlan count and size of more than one */
1253 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1254 vlan_num = tfp_cpu_to_be_32(vlan_num);
1255 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1258 vlan_size = tfp_cpu_to_be_32(vlan_size);
1259 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1264 /* L3 must be IPv4, IPv6 */
1265 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1266 ipv4_spec = item->spec;
1267 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1269 /* copy the ipv4 details */
1270 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1271 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1272 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1273 ulp_encap_buffer_copy(buff,
1275 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1276 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1278 const uint8_t *tmp_buff;
1280 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1281 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1282 ulp_encap_buffer_copy(buff,
1284 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1285 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1286 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1287 ulp_encap_buffer_copy(buff,
1288 &ipv4_spec->hdr.version_ihl,
1289 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1291 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1292 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1293 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1294 ulp_encap_buffer_copy(buff,
1295 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1296 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1298 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1299 ulp_encap_buffer_copy(buff,
1300 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1301 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1303 /* Update the ip size details */
1304 ip_size = tfp_cpu_to_be_32(ip_size);
1305 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1306 &ip_size, sizeof(uint32_t));
1308 /* update the ip type */
1309 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1310 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1311 &ip_type, sizeof(uint32_t));
1313 /* update the computed field to notify it is ipv4 header */
1314 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1317 if (!ulp_rte_item_skip_void(&item, 1))
1318 return BNXT_TF_RC_ERROR;
1319 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1320 ipv6_spec = item->spec;
1321 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1323 /* copy the ipv4 details */
1324 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1325 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1327 /* Update the ip size details */
1328 ip_size = tfp_cpu_to_be_32(ip_size);
1329 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1330 &ip_size, sizeof(uint32_t));
1332 /* update the ip type */
1333 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1334 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1335 &ip_type, sizeof(uint32_t));
1337 /* update the computed field to notify it is ipv6 header */
1338 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1341 if (!ulp_rte_item_skip_void(&item, 1))
1342 return BNXT_TF_RC_ERROR;
1344 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1345 return BNXT_TF_RC_ERROR;
1349 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1350 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1351 return BNXT_TF_RC_ERROR;
1353 /* copy the udp details */
1354 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1355 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1357 if (!ulp_rte_item_skip_void(&item, 1))
1358 return BNXT_TF_RC_ERROR;
1361 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1362 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1363 return BNXT_TF_RC_ERROR;
1365 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1366 /* copy the vxlan details */
1367 memcpy(&vxlan_spec, item->spec, vxlan_size);
1368 vxlan_spec.flags = 0x08;
1369 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1370 (const uint8_t *)&vxlan_spec,
1372 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1373 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1374 &vxlan_size, sizeof(uint32_t));
1376 /* update the hdr_bitmap with vxlan */
1377 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1378 return BNXT_TF_RC_SUCCESS;
1381 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1383 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1385 struct ulp_rte_parser_params *params)
1387 /* update the hdr_bitmap with vxlan */
1388 ULP_BITMAP_SET(params->act_bitmap.bits,
1389 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1390 return BNXT_TF_RC_SUCCESS;
1393 /* Function to handle the parsing of RTE Flow action drop Header. */
1395 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1396 struct ulp_rte_parser_params *params)
1398 /* Update the hdr_bitmap with drop */
1399 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1400 return BNXT_TF_RC_SUCCESS;
1403 /* Function to handle the parsing of RTE Flow action count. */
1405 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1406 struct ulp_rte_parser_params *params)
1409 const struct rte_flow_action_count *act_count;
1410 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1412 act_count = action_item->conf;
1414 if (act_count->shared) {
1416 "Parse Error:Shared count not supported\n");
1417 return BNXT_TF_RC_PARSE_ERR;
1419 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1421 BNXT_ULP_ACT_PROP_SZ_COUNT);
1424 /* Update the hdr_bitmap with count */
1425 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1426 return BNXT_TF_RC_SUCCESS;
1429 /* Function to handle the parsing of action ports. */
1431 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1434 enum bnxt_ulp_direction_type dir;
1437 struct ulp_rte_act_prop *act = ¶m->act_prop;
1438 enum bnxt_ulp_intf_type port_type;
1441 /* Get the direction */
1442 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1443 if (dir == BNXT_ULP_DIR_EGRESS) {
1444 /* For egress direction, fill vport */
1445 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1446 return BNXT_TF_RC_ERROR;
1449 pid = rte_cpu_to_be_32(pid);
1450 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1451 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1453 /* For ingress direction, fill vnic */
1454 port_type = ULP_COMP_FLD_IDX_RD(param,
1455 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1456 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1457 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1459 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1461 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1463 return BNXT_TF_RC_ERROR;
1466 pid = rte_cpu_to_be_32(pid);
1467 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1468 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1471 /* Update the action port set bit */
1472 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1473 return BNXT_TF_RC_SUCCESS;
1476 /* Function to handle the parsing of RTE Flow action PF. */
1478 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1479 struct ulp_rte_parser_params *params)
1483 enum bnxt_ulp_intf_type intf_type;
1485 /* Get the port id of the current device */
1486 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1488 /* Get the port db ifindex */
1489 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1491 BNXT_TF_DBG(ERR, "Invalid port id\n");
1492 return BNXT_TF_RC_ERROR;
1495 /* Check the port is PF port */
1496 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1497 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1498 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1499 return BNXT_TF_RC_ERROR;
1501 /* Update the action properties */
1502 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1503 return ulp_rte_parser_act_port_set(params, ifindex);
1506 /* Function to handle the parsing of RTE Flow action VF. */
1508 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1509 struct ulp_rte_parser_params *params)
1511 const struct rte_flow_action_vf *vf_action;
1513 enum bnxt_ulp_intf_type intf_type;
1515 vf_action = action_item->conf;
1517 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1518 return BNXT_TF_RC_PARSE_ERR;
1521 if (vf_action->original) {
1522 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1523 return BNXT_TF_RC_PARSE_ERR;
1526 /* Check the port is VF port */
1527 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1529 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1530 return BNXT_TF_RC_ERROR;
1532 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1533 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1534 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1535 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1536 return BNXT_TF_RC_ERROR;
1539 /* Update the action properties */
1540 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1541 return ulp_rte_parser_act_port_set(params, ifindex);
1544 /* Function to handle the parsing of RTE Flow action port_id. */
1546 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1547 struct ulp_rte_parser_params *param)
1549 const struct rte_flow_action_port_id *port_id = act_item->conf;
1551 enum bnxt_ulp_intf_type intf_type;
1555 "ParseErr: Invalid Argument\n");
1556 return BNXT_TF_RC_PARSE_ERR;
1558 if (port_id->original) {
1560 "ParseErr:Portid Original not supported\n");
1561 return BNXT_TF_RC_PARSE_ERR;
1564 /* Get the port db ifindex */
1565 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1567 BNXT_TF_DBG(ERR, "Invalid port id\n");
1568 return BNXT_TF_RC_ERROR;
1571 /* Get the intf type */
1572 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1574 BNXT_TF_DBG(ERR, "Invalid port type\n");
1575 return BNXT_TF_RC_ERROR;
1578 /* Set the action port */
1579 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1580 return ulp_rte_parser_act_port_set(param, ifindex);
1583 /* Function to handle the parsing of RTE Flow action phy_port. */
1585 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1586 struct ulp_rte_parser_params *prm)
1588 const struct rte_flow_action_phy_port *phy_port;
1592 enum bnxt_ulp_direction_type dir;
1594 phy_port = action_item->conf;
1597 "ParseErr: Invalid Argument\n");
1598 return BNXT_TF_RC_PARSE_ERR;
1601 if (phy_port->original) {
1603 "Parse Err:Port Original not supported\n");
1604 return BNXT_TF_RC_PARSE_ERR;
1606 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1607 if (dir != BNXT_ULP_DIR_EGRESS) {
1609 "Parse Err:Phy ports are valid only for egress\n");
1610 return BNXT_TF_RC_PARSE_ERR;
1612 /* Get the physical port details from port db */
1613 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1616 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1621 pid = rte_cpu_to_be_32(pid);
1622 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1623 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1625 /* Update the action port set bit */
1626 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1627 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1628 BNXT_ULP_INTF_TYPE_PHY_PORT);
1629 return BNXT_TF_RC_SUCCESS;
1632 /* Function to handle the parsing of RTE Flow action pop vlan. */
1634 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1635 struct ulp_rte_parser_params *params)
1637 /* Update the act_bitmap with pop */
1638 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1639 return BNXT_TF_RC_SUCCESS;
1642 /* Function to handle the parsing of RTE Flow action push vlan. */
1644 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1645 struct ulp_rte_parser_params *params)
1647 const struct rte_flow_action_of_push_vlan *push_vlan;
1649 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1651 push_vlan = action_item->conf;
1653 ethertype = push_vlan->ethertype;
1654 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1656 "Parse Err: Ethertype not supported\n");
1657 return BNXT_TF_RC_PARSE_ERR;
1659 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1660 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1661 /* Update the hdr_bitmap with push vlan */
1662 ULP_BITMAP_SET(params->act_bitmap.bits,
1663 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1664 return BNXT_TF_RC_SUCCESS;
1666 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1667 return BNXT_TF_RC_ERROR;
1670 /* Function to handle the parsing of RTE Flow action set vlan id. */
1672 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1673 struct ulp_rte_parser_params *params)
1675 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1677 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1679 vlan_vid = action_item->conf;
1680 if (vlan_vid && vlan_vid->vlan_vid) {
1681 vid = vlan_vid->vlan_vid;
1682 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1683 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1684 /* Update the hdr_bitmap with vlan vid */
1685 ULP_BITMAP_SET(params->act_bitmap.bits,
1686 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1687 return BNXT_TF_RC_SUCCESS;
1689 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1690 return BNXT_TF_RC_ERROR;
1693 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1695 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1696 struct ulp_rte_parser_params *params)
1698 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1700 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1702 vlan_pcp = action_item->conf;
1704 pcp = vlan_pcp->vlan_pcp;
1705 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1706 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1707 /* Update the hdr_bitmap with vlan vid */
1708 ULP_BITMAP_SET(params->act_bitmap.bits,
1709 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1710 return BNXT_TF_RC_SUCCESS;
1712 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1713 return BNXT_TF_RC_ERROR;
1716 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1718 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1719 struct ulp_rte_parser_params *params)
1721 const struct rte_flow_action_set_ipv4 *set_ipv4;
1722 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1724 set_ipv4 = action_item->conf;
1726 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1727 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1728 /* Update the hdr_bitmap with set ipv4 src */
1729 ULP_BITMAP_SET(params->act_bitmap.bits,
1730 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1731 return BNXT_TF_RC_SUCCESS;
1733 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1734 return BNXT_TF_RC_ERROR;
1737 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1739 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1740 struct ulp_rte_parser_params *params)
1742 const struct rte_flow_action_set_ipv4 *set_ipv4;
1743 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1745 set_ipv4 = action_item->conf;
1747 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1748 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1749 /* Update the hdr_bitmap with set ipv4 dst */
1750 ULP_BITMAP_SET(params->act_bitmap.bits,
1751 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1752 return BNXT_TF_RC_SUCCESS;
1754 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1755 return BNXT_TF_RC_ERROR;
1758 /* Function to handle the parsing of RTE Flow action set tp src.*/
1760 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1761 struct ulp_rte_parser_params *params)
1763 const struct rte_flow_action_set_tp *set_tp;
1764 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1766 set_tp = action_item->conf;
1768 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1769 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1770 /* Update the hdr_bitmap with set tp src */
1771 ULP_BITMAP_SET(params->act_bitmap.bits,
1772 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1773 return BNXT_TF_RC_SUCCESS;
1776 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1777 return BNXT_TF_RC_ERROR;
1780 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1782 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1783 struct ulp_rte_parser_params *params)
1785 const struct rte_flow_action_set_tp *set_tp;
1786 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1788 set_tp = action_item->conf;
1790 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1791 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1792 /* Update the hdr_bitmap with set tp dst */
1793 ULP_BITMAP_SET(params->act_bitmap.bits,
1794 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1795 return BNXT_TF_RC_SUCCESS;
1798 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1799 return BNXT_TF_RC_ERROR;