1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
88 /* Set the computed flags for no vlan tags before parsing */
89 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
90 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
92 /* Parse all the items in the pattern */
93 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
94 /* get the header information from the flow_hdr_info table */
95 hdr_info = &ulp_hdr_info[item->type];
96 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
98 "Truflow parser does not support type %d\n",
100 return BNXT_TF_RC_PARSE_ERR;
101 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
102 /* call the registered callback handler */
103 if (hdr_info->proto_hdr_func) {
104 if (hdr_info->proto_hdr_func(item, params) !=
105 BNXT_TF_RC_SUCCESS) {
106 return BNXT_TF_RC_ERROR;
112 /* update the implied SVIF */
113 return ulp_rte_parser_implicit_match_port_process(params);
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 /* Parse all the items in the pattern */
128 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129 /* get the header information from the flow_hdr_info table */
130 hdr_info = &ulp_act_info[action_item->type];
131 if (hdr_info->act_type ==
132 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
134 "Truflow parser does not support act %u\n",
136 return BNXT_TF_RC_ERROR;
137 } else if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_SUPPORTED) {
139 /* call the registered callback handler */
140 if (hdr_info->proto_act_func) {
141 if (hdr_info->proto_act_func(action_item,
143 BNXT_TF_RC_SUCCESS) {
144 return BNXT_TF_RC_ERROR;
150 /* update the implied port details */
151 ulp_rte_parser_implicit_act_port_process(params);
152 return BNXT_TF_RC_SUCCESS;
156 * Function to handle the post processing of the parsing details
159 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
161 enum bnxt_ulp_direction_type dir;
162 enum bnxt_ulp_intf_type match_port_type, act_port_type;
163 uint32_t act_port_set;
165 /* Get the computed details */
166 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
167 match_port_type = ULP_COMP_FLD_IDX_RD(params,
168 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
169 act_port_type = ULP_COMP_FLD_IDX_RD(params,
170 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
171 act_port_set = ULP_COMP_FLD_IDX_RD(params,
172 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
174 /* set the flow direction in the proto and action header */
175 if (dir == BNXT_ULP_DIR_EGRESS) {
176 ULP_BITMAP_SET(params->hdr_bitmap.bits,
177 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
178 ULP_BITMAP_SET(params->act_bitmap.bits,
179 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
182 /* calculate the VF to VF flag */
183 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
184 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
185 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
187 /* TBD: Handle the flow rejection scenarios */
192 * Function to compute the flow direction based on the match port details
195 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
197 enum bnxt_ulp_intf_type match_port_type;
199 /* Get the match port type */
200 match_port_type = ULP_COMP_FLD_IDX_RD(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
203 /* If ingress flow and matchport is vf rep then dir is egress*/
204 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
205 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
206 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
207 BNXT_ULP_DIR_EGRESS);
209 /* Assign the input direction */
210 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
211 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
212 BNXT_ULP_DIR_INGRESS);
214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
215 BNXT_ULP_DIR_EGRESS);
219 /* Function to handle the parsing of RTE Flow item PF Header. */
221 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
226 enum bnxt_ulp_direction_type dir;
227 struct ulp_rte_hdr_field *hdr_field;
228 enum bnxt_ulp_svif_type svif_type;
229 enum bnxt_ulp_intf_type port_type;
231 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
232 BNXT_ULP_INVALID_SVIF_VAL) {
234 "SVIF already set,multiple source not support'd\n");
235 return BNXT_TF_RC_ERROR;
238 /* Get port type details */
239 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
240 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
241 BNXT_TF_DBG(ERR, "Invalid port type\n");
242 return BNXT_TF_RC_ERROR;
245 /* Update the match port type */
246 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
248 /* compute the direction */
249 bnxt_ulp_rte_parser_direction_compute(params);
251 /* Get the computed direction */
252 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
253 if (dir == BNXT_ULP_DIR_INGRESS) {
254 svif_type = BNXT_ULP_PHY_PORT_SVIF;
256 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
257 svif_type = BNXT_ULP_VF_FUNC_SVIF;
259 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
261 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
263 svif = rte_cpu_to_be_16(svif);
264 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
265 memcpy(hdr_field->spec, &svif, sizeof(svif));
266 memcpy(hdr_field->mask, &mask, sizeof(mask));
267 hdr_field->size = sizeof(svif);
268 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
269 rte_be_to_cpu_16(svif));
270 return BNXT_TF_RC_SUCCESS;
273 /* Function to handle the parsing of the RTE port id */
275 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
277 uint16_t port_id = 0;
278 uint16_t svif_mask = 0xFFFF;
280 int32_t rc = BNXT_TF_RC_ERROR;
282 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
283 BNXT_ULP_INVALID_SVIF_VAL)
284 return BNXT_TF_RC_SUCCESS;
286 /* SVIF not set. So get the port id */
287 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
289 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
292 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
296 /* Update the SVIF details */
297 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
301 /* Function to handle the implicit action port id */
303 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
305 struct rte_flow_action action_item = {0};
306 struct rte_flow_action_port_id port_id = {0};
308 /* Read the action port set bit */
309 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
310 /* Already set, so just exit */
311 return BNXT_TF_RC_SUCCESS;
313 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
314 action_item.conf = &port_id;
316 /* Update the action port based on incoming port */
317 ulp_rte_port_id_act_handler(&action_item, params);
319 /* Reset the action port set bit */
320 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
321 return BNXT_TF_RC_SUCCESS;
324 /* Function to handle the parsing of RTE Flow item PF Header. */
326 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
327 struct ulp_rte_parser_params *params)
329 uint16_t port_id = 0;
330 uint16_t svif_mask = 0xFFFF;
333 /* Get the implicit port id */
334 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
336 /* perform the conversion from dpdk port to bnxt ifindex */
337 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
340 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
341 return BNXT_TF_RC_ERROR;
344 /* Update the SVIF details */
345 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
348 /* Function to handle the parsing of RTE Flow item VF Header. */
350 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
351 struct ulp_rte_parser_params *params)
353 const struct rte_flow_item_vf *vf_spec = item->spec;
354 const struct rte_flow_item_vf *vf_mask = item->mask;
357 int32_t rc = BNXT_TF_RC_PARSE_ERR;
359 /* Get VF rte_flow_item for Port details */
361 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
365 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
370 /* perform the conversion from VF Func id to bnxt ifindex */
371 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
374 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
377 /* Update the SVIF details */
378 return ulp_rte_parser_svif_set(params, ifindex, mask);
381 /* Function to handle the parsing of RTE Flow item port id Header. */
383 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
384 struct ulp_rte_parser_params *params)
386 const struct rte_flow_item_port_id *port_spec = item->spec;
387 const struct rte_flow_item_port_id *port_mask = item->mask;
389 int32_t rc = BNXT_TF_RC_PARSE_ERR;
393 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
397 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
400 mask = port_mask->id;
402 /* perform the conversion from dpdk port to bnxt ifindex */
403 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
406 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
409 /* Update the SVIF details */
410 return ulp_rte_parser_svif_set(params, ifindex, mask);
413 /* Function to handle the parsing of RTE Flow item phy port Header. */
415 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
416 struct ulp_rte_parser_params *params)
418 const struct rte_flow_item_phy_port *port_spec = item->spec;
419 const struct rte_flow_item_phy_port *port_mask = item->mask;
421 int32_t rc = BNXT_TF_RC_ERROR;
423 enum bnxt_ulp_direction_type dir;
424 struct ulp_rte_hdr_field *hdr_field;
426 /* Copy the rte_flow_item for phy port into hdr_field */
428 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
432 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
435 mask = port_mask->index;
437 /* Update the match port type */
438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
439 BNXT_ULP_INTF_TYPE_PHY_PORT);
441 /* Compute the Hw direction */
442 bnxt_ulp_rte_parser_direction_compute(params);
444 /* Direction validation */
445 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
446 if (dir == BNXT_ULP_DIR_EGRESS) {
448 "Parse Err:Phy ports are valid only for ingress\n");
449 return BNXT_TF_RC_PARSE_ERR;
452 /* Get the physical port details from port db */
453 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
456 BNXT_TF_DBG(ERR, "Failed to get port details\n");
457 return BNXT_TF_RC_PARSE_ERR;
460 /* Update the SVIF details */
461 svif = rte_cpu_to_be_16(svif);
462 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
463 memcpy(hdr_field->spec, &svif, sizeof(svif));
464 memcpy(hdr_field->mask, &mask, sizeof(mask));
465 hdr_field->size = sizeof(svif);
466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
467 rte_be_to_cpu_16(svif));
468 return BNXT_TF_RC_SUCCESS;
471 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
473 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
474 struct ulp_rte_parser_params *params)
476 const struct rte_flow_item_eth *eth_spec = item->spec;
477 const struct rte_flow_item_eth *eth_mask = item->mask;
478 struct ulp_rte_hdr_field *field;
479 uint32_t idx = params->field_idx;
480 uint64_t set_flag = 0;
484 * Copy the rte_flow_item for eth into hdr_field using ethernet
488 size = sizeof(eth_spec->dst.addr_bytes);
489 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
490 eth_spec->dst.addr_bytes,
492 size = sizeof(eth_spec->src.addr_bytes);
493 field = ulp_rte_parser_fld_copy(field,
494 eth_spec->src.addr_bytes,
496 field = ulp_rte_parser_fld_copy(field,
498 sizeof(eth_spec->type));
501 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
502 sizeof(eth_mask->dst.addr_bytes));
503 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
504 sizeof(eth_mask->src.addr_bytes));
505 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
506 sizeof(eth_mask->type));
508 /* Add number of vlan header elements */
509 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
510 params->vlan_idx = params->field_idx;
511 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
513 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
514 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
515 BNXT_ULP_HDR_BIT_O_ETH);
517 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
519 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
520 BNXT_ULP_HDR_BIT_I_ETH);
522 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
523 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
525 return BNXT_TF_RC_SUCCESS;
528 /* Function to handle the parsing of RTE Flow item Vlan Header. */
530 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_vlan *vlan_spec = item->spec;
534 const struct rte_flow_item_vlan *vlan_mask = item->mask;
535 struct ulp_rte_hdr_field *field;
536 struct ulp_rte_hdr_bitmap *hdr_bit;
537 uint32_t idx = params->vlan_idx;
538 uint16_t vlan_tag, priority;
539 uint32_t outer_vtag_num;
540 uint32_t inner_vtag_num;
543 * Copy the rte_flow_item for vlan into hdr_field using Vlan
547 vlan_tag = ntohs(vlan_spec->tci);
548 priority = htons(vlan_tag >> 13);
550 vlan_tag = htons(vlan_tag);
552 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
555 field = ulp_rte_parser_fld_copy(field,
558 field = ulp_rte_parser_fld_copy(field,
559 &vlan_spec->inner_type,
560 sizeof(vlan_spec->inner_type));
564 vlan_tag = ntohs(vlan_mask->tci);
565 priority = htons(vlan_tag >> 13);
567 vlan_tag = htons(vlan_tag);
569 field = ¶ms->hdr_field[idx];
570 memcpy(field->mask, &priority, field->size);
572 memcpy(field->mask, &vlan_tag, field->size);
574 memcpy(field->mask, &vlan_mask->inner_type, field->size);
576 /* Set the vlan index to new incremented value */
577 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
579 /* Get the outer tag and inner tag counts */
580 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
581 BNXT_ULP_CF_IDX_O_VTAG_NUM);
582 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
583 BNXT_ULP_CF_IDX_I_VTAG_NUM);
585 /* Update the hdr_bitmap of the vlans */
586 hdr_bit = ¶ms->hdr_bitmap;
587 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
588 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
590 /* Update the vlan tag num */
592 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
594 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
595 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
596 ULP_BITMAP_SET(params->hdr_bitmap.bits,
597 BNXT_ULP_HDR_BIT_OO_VLAN);
598 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
599 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
600 outer_vtag_num == 1) {
601 /* update the vlan tag num */
603 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
605 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
607 ULP_BITMAP_SET(params->hdr_bitmap.bits,
608 BNXT_ULP_HDR_BIT_OI_VLAN);
609 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
610 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
612 /* update the vlan tag num */
614 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
616 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
617 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
618 ULP_BITMAP_SET(params->hdr_bitmap.bits,
619 BNXT_ULP_HDR_BIT_IO_VLAN);
620 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
621 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
622 inner_vtag_num == 1) {
623 /* update the vlan tag num */
625 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
627 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
628 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
629 ULP_BITMAP_SET(params->hdr_bitmap.bits,
630 BNXT_ULP_HDR_BIT_II_VLAN);
632 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
633 return BNXT_TF_RC_ERROR;
635 return BNXT_TF_RC_SUCCESS;
638 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
640 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
641 struct ulp_rte_parser_params *params)
643 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
644 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
645 struct ulp_rte_hdr_field *field;
646 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
647 uint32_t idx = params->field_idx;
649 uint32_t inner_l3, outer_l3;
651 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
653 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
654 return BNXT_TF_RC_ERROR;
658 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
662 size = sizeof(ipv4_spec->hdr.version_ihl);
663 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
664 &ipv4_spec->hdr.version_ihl,
666 size = sizeof(ipv4_spec->hdr.type_of_service);
667 field = ulp_rte_parser_fld_copy(field,
668 &ipv4_spec->hdr.type_of_service,
670 size = sizeof(ipv4_spec->hdr.total_length);
671 field = ulp_rte_parser_fld_copy(field,
672 &ipv4_spec->hdr.total_length,
674 size = sizeof(ipv4_spec->hdr.packet_id);
675 field = ulp_rte_parser_fld_copy(field,
676 &ipv4_spec->hdr.packet_id,
678 size = sizeof(ipv4_spec->hdr.fragment_offset);
679 field = ulp_rte_parser_fld_copy(field,
680 &ipv4_spec->hdr.fragment_offset,
682 size = sizeof(ipv4_spec->hdr.time_to_live);
683 field = ulp_rte_parser_fld_copy(field,
684 &ipv4_spec->hdr.time_to_live,
686 size = sizeof(ipv4_spec->hdr.next_proto_id);
687 field = ulp_rte_parser_fld_copy(field,
688 &ipv4_spec->hdr.next_proto_id,
690 size = sizeof(ipv4_spec->hdr.hdr_checksum);
691 field = ulp_rte_parser_fld_copy(field,
692 &ipv4_spec->hdr.hdr_checksum,
694 size = sizeof(ipv4_spec->hdr.src_addr);
695 field = ulp_rte_parser_fld_copy(field,
696 &ipv4_spec->hdr.src_addr,
698 size = sizeof(ipv4_spec->hdr.dst_addr);
699 field = ulp_rte_parser_fld_copy(field,
700 &ipv4_spec->hdr.dst_addr,
704 ulp_rte_prsr_mask_copy(params, &idx,
705 &ipv4_mask->hdr.version_ihl,
706 sizeof(ipv4_mask->hdr.version_ihl));
707 ulp_rte_prsr_mask_copy(params, &idx,
708 &ipv4_mask->hdr.type_of_service,
709 sizeof(ipv4_mask->hdr.type_of_service));
710 ulp_rte_prsr_mask_copy(params, &idx,
711 &ipv4_mask->hdr.total_length,
712 sizeof(ipv4_mask->hdr.total_length));
713 ulp_rte_prsr_mask_copy(params, &idx,
714 &ipv4_mask->hdr.packet_id,
715 sizeof(ipv4_mask->hdr.packet_id));
716 ulp_rte_prsr_mask_copy(params, &idx,
717 &ipv4_mask->hdr.fragment_offset,
718 sizeof(ipv4_mask->hdr.fragment_offset));
719 ulp_rte_prsr_mask_copy(params, &idx,
720 &ipv4_mask->hdr.time_to_live,
721 sizeof(ipv4_mask->hdr.time_to_live));
722 ulp_rte_prsr_mask_copy(params, &idx,
723 &ipv4_mask->hdr.next_proto_id,
724 sizeof(ipv4_mask->hdr.next_proto_id));
725 ulp_rte_prsr_mask_copy(params, &idx,
726 &ipv4_mask->hdr.hdr_checksum,
727 sizeof(ipv4_mask->hdr.hdr_checksum));
728 ulp_rte_prsr_mask_copy(params, &idx,
729 &ipv4_mask->hdr.src_addr,
730 sizeof(ipv4_mask->hdr.src_addr));
731 ulp_rte_prsr_mask_copy(params, &idx,
732 &ipv4_mask->hdr.dst_addr,
733 sizeof(ipv4_mask->hdr.dst_addr));
735 /* Add the number of ipv4 header elements */
736 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
738 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
739 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
741 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
742 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
743 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
745 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
747 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
749 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
751 return BNXT_TF_RC_SUCCESS;
754 /* Function to handle the parsing of RTE Flow item IPV6 Header */
756 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
757 struct ulp_rte_parser_params *params)
759 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
760 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
761 struct ulp_rte_hdr_field *field;
762 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
763 uint32_t idx = params->field_idx;
765 uint32_t inner_l3, outer_l3;
766 uint32_t vtcf, vtcf_mask;
768 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
770 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
771 return BNXT_TF_RC_ERROR;
775 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
779 size = sizeof(ipv6_spec->hdr.vtc_flow);
781 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
782 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
786 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
787 field = ulp_rte_parser_fld_copy(field,
791 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
792 field = ulp_rte_parser_fld_copy(field,
796 size = sizeof(ipv6_spec->hdr.payload_len);
797 field = ulp_rte_parser_fld_copy(field,
798 &ipv6_spec->hdr.payload_len,
800 size = sizeof(ipv6_spec->hdr.proto);
801 field = ulp_rte_parser_fld_copy(field,
802 &ipv6_spec->hdr.proto,
804 size = sizeof(ipv6_spec->hdr.hop_limits);
805 field = ulp_rte_parser_fld_copy(field,
806 &ipv6_spec->hdr.hop_limits,
808 size = sizeof(ipv6_spec->hdr.src_addr);
809 field = ulp_rte_parser_fld_copy(field,
810 &ipv6_spec->hdr.src_addr,
812 size = sizeof(ipv6_spec->hdr.dst_addr);
813 field = ulp_rte_parser_fld_copy(field,
814 &ipv6_spec->hdr.dst_addr,
818 size = sizeof(ipv6_mask->hdr.vtc_flow);
820 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
821 ulp_rte_prsr_mask_copy(params, &idx,
825 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
826 ulp_rte_prsr_mask_copy(params, &idx,
831 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
832 ulp_rte_prsr_mask_copy(params, &idx,
836 ulp_rte_prsr_mask_copy(params, &idx,
837 &ipv6_mask->hdr.payload_len,
838 sizeof(ipv6_mask->hdr.payload_len));
839 ulp_rte_prsr_mask_copy(params, &idx,
840 &ipv6_mask->hdr.proto,
841 sizeof(ipv6_mask->hdr.proto));
842 ulp_rte_prsr_mask_copy(params, &idx,
843 &ipv6_mask->hdr.hop_limits,
844 sizeof(ipv6_mask->hdr.hop_limits));
845 ulp_rte_prsr_mask_copy(params, &idx,
846 &ipv6_mask->hdr.src_addr,
847 sizeof(ipv6_mask->hdr.src_addr));
848 ulp_rte_prsr_mask_copy(params, &idx,
849 &ipv6_mask->hdr.dst_addr,
850 sizeof(ipv6_mask->hdr.dst_addr));
852 /* add number of ipv6 header elements */
853 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
855 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
856 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
858 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
859 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
860 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
861 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
863 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
864 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
866 return BNXT_TF_RC_SUCCESS;
869 /* Function to handle the parsing of RTE Flow item UDP Header. */
871 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
872 struct ulp_rte_parser_params *params)
874 const struct rte_flow_item_udp *udp_spec = item->spec;
875 const struct rte_flow_item_udp *udp_mask = item->mask;
876 struct ulp_rte_hdr_field *field;
877 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
878 uint32_t idx = params->field_idx;
880 uint32_t inner_l4, outer_l4;
882 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
884 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
885 return BNXT_TF_RC_ERROR;
889 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
893 size = sizeof(udp_spec->hdr.src_port);
894 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
895 &udp_spec->hdr.src_port,
897 size = sizeof(udp_spec->hdr.dst_port);
898 field = ulp_rte_parser_fld_copy(field,
899 &udp_spec->hdr.dst_port,
901 size = sizeof(udp_spec->hdr.dgram_len);
902 field = ulp_rte_parser_fld_copy(field,
903 &udp_spec->hdr.dgram_len,
905 size = sizeof(udp_spec->hdr.dgram_cksum);
906 field = ulp_rte_parser_fld_copy(field,
907 &udp_spec->hdr.dgram_cksum,
911 ulp_rte_prsr_mask_copy(params, &idx,
912 &udp_mask->hdr.src_port,
913 sizeof(udp_mask->hdr.src_port));
914 ulp_rte_prsr_mask_copy(params, &idx,
915 &udp_mask->hdr.dst_port,
916 sizeof(udp_mask->hdr.dst_port));
917 ulp_rte_prsr_mask_copy(params, &idx,
918 &udp_mask->hdr.dgram_len,
919 sizeof(udp_mask->hdr.dgram_len));
920 ulp_rte_prsr_mask_copy(params, &idx,
921 &udp_mask->hdr.dgram_cksum,
922 sizeof(udp_mask->hdr.dgram_cksum));
925 /* Add number of UDP header elements */
926 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
928 /* Set the udp header bitmap and computed l4 header bitmaps */
929 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
931 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
932 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
933 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
934 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
936 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
937 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
939 return BNXT_TF_RC_SUCCESS;
942 /* Function to handle the parsing of RTE Flow item TCP Header. */
944 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
945 struct ulp_rte_parser_params *params)
947 const struct rte_flow_item_tcp *tcp_spec = item->spec;
948 const struct rte_flow_item_tcp *tcp_mask = item->mask;
949 struct ulp_rte_hdr_field *field;
950 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
951 uint32_t idx = params->field_idx;
953 uint32_t inner_l4, outer_l4;
955 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
957 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
958 return BNXT_TF_RC_ERROR;
962 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
966 size = sizeof(tcp_spec->hdr.src_port);
967 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
968 &tcp_spec->hdr.src_port,
970 size = sizeof(tcp_spec->hdr.dst_port);
971 field = ulp_rte_parser_fld_copy(field,
972 &tcp_spec->hdr.dst_port,
974 size = sizeof(tcp_spec->hdr.sent_seq);
975 field = ulp_rte_parser_fld_copy(field,
976 &tcp_spec->hdr.sent_seq,
978 size = sizeof(tcp_spec->hdr.recv_ack);
979 field = ulp_rte_parser_fld_copy(field,
980 &tcp_spec->hdr.recv_ack,
982 size = sizeof(tcp_spec->hdr.data_off);
983 field = ulp_rte_parser_fld_copy(field,
984 &tcp_spec->hdr.data_off,
986 size = sizeof(tcp_spec->hdr.tcp_flags);
987 field = ulp_rte_parser_fld_copy(field,
988 &tcp_spec->hdr.tcp_flags,
990 size = sizeof(tcp_spec->hdr.rx_win);
991 field = ulp_rte_parser_fld_copy(field,
992 &tcp_spec->hdr.rx_win,
994 size = sizeof(tcp_spec->hdr.cksum);
995 field = ulp_rte_parser_fld_copy(field,
996 &tcp_spec->hdr.cksum,
998 size = sizeof(tcp_spec->hdr.tcp_urp);
999 field = ulp_rte_parser_fld_copy(field,
1000 &tcp_spec->hdr.tcp_urp,
1003 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1007 ulp_rte_prsr_mask_copy(params, &idx,
1008 &tcp_mask->hdr.src_port,
1009 sizeof(tcp_mask->hdr.src_port));
1010 ulp_rte_prsr_mask_copy(params, &idx,
1011 &tcp_mask->hdr.dst_port,
1012 sizeof(tcp_mask->hdr.dst_port));
1013 ulp_rte_prsr_mask_copy(params, &idx,
1014 &tcp_mask->hdr.sent_seq,
1015 sizeof(tcp_mask->hdr.sent_seq));
1016 ulp_rte_prsr_mask_copy(params, &idx,
1017 &tcp_mask->hdr.recv_ack,
1018 sizeof(tcp_mask->hdr.recv_ack));
1019 ulp_rte_prsr_mask_copy(params, &idx,
1020 &tcp_mask->hdr.data_off,
1021 sizeof(tcp_mask->hdr.data_off));
1022 ulp_rte_prsr_mask_copy(params, &idx,
1023 &tcp_mask->hdr.tcp_flags,
1024 sizeof(tcp_mask->hdr.tcp_flags));
1025 ulp_rte_prsr_mask_copy(params, &idx,
1026 &tcp_mask->hdr.rx_win,
1027 sizeof(tcp_mask->hdr.rx_win));
1028 ulp_rte_prsr_mask_copy(params, &idx,
1029 &tcp_mask->hdr.cksum,
1030 sizeof(tcp_mask->hdr.cksum));
1031 ulp_rte_prsr_mask_copy(params, &idx,
1032 &tcp_mask->hdr.tcp_urp,
1033 sizeof(tcp_mask->hdr.tcp_urp));
1035 /* add number of TCP header elements */
1036 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1038 /* Set the udp header bitmap and computed l4 header bitmaps */
1039 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
1041 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1042 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1043 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1044 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1046 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1047 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1049 return BNXT_TF_RC_SUCCESS;
1052 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1054 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1055 struct ulp_rte_parser_params *params)
1057 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1058 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1059 struct ulp_rte_hdr_field *field;
1060 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1061 uint32_t idx = params->field_idx;
1065 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1069 size = sizeof(vxlan_spec->flags);
1070 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1073 size = sizeof(vxlan_spec->rsvd0);
1074 field = ulp_rte_parser_fld_copy(field,
1077 size = sizeof(vxlan_spec->vni);
1078 field = ulp_rte_parser_fld_copy(field,
1081 size = sizeof(vxlan_spec->rsvd1);
1082 field = ulp_rte_parser_fld_copy(field,
1087 ulp_rte_prsr_mask_copy(params, &idx,
1089 sizeof(vxlan_mask->flags));
1090 ulp_rte_prsr_mask_copy(params, &idx,
1092 sizeof(vxlan_mask->rsvd0));
1093 ulp_rte_prsr_mask_copy(params, &idx,
1095 sizeof(vxlan_mask->vni));
1096 ulp_rte_prsr_mask_copy(params, &idx,
1098 sizeof(vxlan_mask->rsvd1));
1100 /* Add number of vxlan header elements */
1101 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1103 /* Update the hdr_bitmap with vxlan */
1104 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1105 return BNXT_TF_RC_SUCCESS;
1108 /* Function to handle the parsing of RTE Flow item void Header */
1110 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1111 struct ulp_rte_parser_params *params __rte_unused)
1113 return BNXT_TF_RC_SUCCESS;
1116 /* Function to handle the parsing of RTE Flow action void Header. */
1118 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1119 struct ulp_rte_parser_params *params __rte_unused)
1121 return BNXT_TF_RC_SUCCESS;
1124 /* Function to handle the parsing of RTE Flow action Mark Header. */
1126 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1127 struct ulp_rte_parser_params *param)
1129 const struct rte_flow_action_mark *mark;
1130 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1133 mark = action_item->conf;
1135 mark_id = tfp_cpu_to_be_32(mark->id);
1136 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1137 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1139 /* Update the hdr_bitmap with vxlan */
1140 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1141 return BNXT_TF_RC_SUCCESS;
1143 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1144 return BNXT_TF_RC_ERROR;
1147 /* Function to handle the parsing of RTE Flow action RSS Header. */
1149 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1150 struct ulp_rte_parser_params *param)
1152 const struct rte_flow_action_rss *rss = action_item->conf;
1155 /* Update the hdr_bitmap with vxlan */
1156 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1157 return BNXT_TF_RC_SUCCESS;
1159 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1160 return BNXT_TF_RC_ERROR;
1163 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1165 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1166 struct ulp_rte_parser_params *params)
1168 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1169 const struct rte_flow_item *item;
1170 const struct rte_flow_item_eth *eth_spec;
1171 const struct rte_flow_item_ipv4 *ipv4_spec;
1172 const struct rte_flow_item_ipv6 *ipv6_spec;
1173 struct rte_flow_item_vxlan vxlan_spec;
1174 uint32_t vlan_num = 0, vlan_size = 0;
1175 uint32_t ip_size = 0, ip_type = 0;
1176 uint32_t vxlan_size = 0;
1178 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1179 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1181 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1182 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1184 vxlan_encap = action_item->conf;
1186 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1187 return BNXT_TF_RC_ERROR;
1190 item = vxlan_encap->definition;
1192 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1193 return BNXT_TF_RC_ERROR;
1196 if (!ulp_rte_item_skip_void(&item, 0))
1197 return BNXT_TF_RC_ERROR;
1199 /* must have ethernet header */
1200 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1201 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1202 return BNXT_TF_RC_ERROR;
1204 eth_spec = item->spec;
1205 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1206 ulp_encap_buffer_copy(buff,
1207 eth_spec->dst.addr_bytes,
1208 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1210 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1211 ulp_encap_buffer_copy(buff,
1212 eth_spec->src.addr_bytes,
1213 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1215 /* Goto the next item */
1216 if (!ulp_rte_item_skip_void(&item, 1))
1217 return BNXT_TF_RC_ERROR;
1219 /* May have vlan header */
1220 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1222 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1223 ulp_encap_buffer_copy(buff,
1225 sizeof(struct rte_flow_item_vlan));
1227 if (!ulp_rte_item_skip_void(&item, 1))
1228 return BNXT_TF_RC_ERROR;
1231 /* may have two vlan headers */
1232 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1234 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1235 sizeof(struct rte_flow_item_vlan)],
1237 sizeof(struct rte_flow_item_vlan));
1238 if (!ulp_rte_item_skip_void(&item, 1))
1239 return BNXT_TF_RC_ERROR;
1241 /* Update the vlan count and size of more than one */
1243 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1244 vlan_num = tfp_cpu_to_be_32(vlan_num);
1245 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1248 vlan_size = tfp_cpu_to_be_32(vlan_size);
1249 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1254 /* L3 must be IPv4, IPv6 */
1255 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1256 ipv4_spec = item->spec;
1257 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1259 /* copy the ipv4 details */
1260 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1261 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1262 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1263 ulp_encap_buffer_copy(buff,
1265 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1266 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1268 const uint8_t *tmp_buff;
1270 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1271 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1272 ulp_encap_buffer_copy(buff,
1274 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1275 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1276 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1277 ulp_encap_buffer_copy(buff,
1278 &ipv4_spec->hdr.version_ihl,
1279 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1281 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1282 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1283 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1284 ulp_encap_buffer_copy(buff,
1285 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1286 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1288 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1289 ulp_encap_buffer_copy(buff,
1290 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1291 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1293 /* Update the ip size details */
1294 ip_size = tfp_cpu_to_be_32(ip_size);
1295 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1296 &ip_size, sizeof(uint32_t));
1298 /* update the ip type */
1299 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1300 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1301 &ip_type, sizeof(uint32_t));
1303 /* update the computed field to notify it is ipv4 header */
1304 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1307 if (!ulp_rte_item_skip_void(&item, 1))
1308 return BNXT_TF_RC_ERROR;
1309 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1310 ipv6_spec = item->spec;
1311 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1313 /* copy the ipv4 details */
1314 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1315 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1317 /* Update the ip size details */
1318 ip_size = tfp_cpu_to_be_32(ip_size);
1319 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1320 &ip_size, sizeof(uint32_t));
1322 /* update the ip type */
1323 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1324 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1325 &ip_type, sizeof(uint32_t));
1327 /* update the computed field to notify it is ipv6 header */
1328 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1331 if (!ulp_rte_item_skip_void(&item, 1))
1332 return BNXT_TF_RC_ERROR;
1334 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1335 return BNXT_TF_RC_ERROR;
1339 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1340 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1341 return BNXT_TF_RC_ERROR;
1343 /* copy the udp details */
1344 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1345 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1347 if (!ulp_rte_item_skip_void(&item, 1))
1348 return BNXT_TF_RC_ERROR;
1351 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1352 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1353 return BNXT_TF_RC_ERROR;
1355 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1356 /* copy the vxlan details */
1357 memcpy(&vxlan_spec, item->spec, vxlan_size);
1358 vxlan_spec.flags = 0x08;
1359 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1360 (const uint8_t *)&vxlan_spec,
1362 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1363 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1364 &vxlan_size, sizeof(uint32_t));
1366 /* update the hdr_bitmap with vxlan */
1367 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1368 return BNXT_TF_RC_SUCCESS;
1371 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1373 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1375 struct ulp_rte_parser_params *params)
1377 /* update the hdr_bitmap with vxlan */
1378 ULP_BITMAP_SET(params->act_bitmap.bits,
1379 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1380 return BNXT_TF_RC_SUCCESS;
1383 /* Function to handle the parsing of RTE Flow action drop Header. */
1385 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1386 struct ulp_rte_parser_params *params)
1388 /* Update the hdr_bitmap with drop */
1389 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1390 return BNXT_TF_RC_SUCCESS;
1393 /* Function to handle the parsing of RTE Flow action count. */
1395 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1396 struct ulp_rte_parser_params *params)
1399 const struct rte_flow_action_count *act_count;
1400 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1402 act_count = action_item->conf;
1404 if (act_count->shared) {
1406 "Parse Error:Shared count not supported\n");
1407 return BNXT_TF_RC_PARSE_ERR;
1409 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1411 BNXT_ULP_ACT_PROP_SZ_COUNT);
1414 /* Update the hdr_bitmap with count */
1415 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1416 return BNXT_TF_RC_SUCCESS;
1419 /* Function to handle the parsing of action ports. */
1421 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1424 enum bnxt_ulp_direction_type dir;
1427 struct ulp_rte_act_prop *act = ¶m->act_prop;
1429 /* Get the direction */
1430 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1431 if (dir == BNXT_ULP_DIR_EGRESS) {
1432 /* For egress direction, fill vport */
1433 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1434 return BNXT_TF_RC_ERROR;
1437 pid = rte_cpu_to_be_32(pid);
1438 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1439 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1441 /* For ingress direction, fill vnic */
1442 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1443 BNXT_ULP_DRV_FUNC_VNIC,
1445 return BNXT_TF_RC_ERROR;
1448 pid = rte_cpu_to_be_32(pid);
1449 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1450 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1453 /* Update the action port set bit */
1454 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1455 return BNXT_TF_RC_SUCCESS;
1458 /* Function to handle the parsing of RTE Flow action PF. */
1460 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1461 struct ulp_rte_parser_params *params)
1465 enum bnxt_ulp_intf_type intf_type;
1467 /* Get the port id of the current device */
1468 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1470 /* Get the port db ifindex */
1471 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1473 BNXT_TF_DBG(ERR, "Invalid port id\n");
1474 return BNXT_TF_RC_ERROR;
1477 /* Check the port is PF port */
1478 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1479 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1480 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1481 return BNXT_TF_RC_ERROR;
1483 /* Update the action properties */
1484 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1485 return ulp_rte_parser_act_port_set(params, ifindex);
1488 /* Function to handle the parsing of RTE Flow action VF. */
1490 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1491 struct ulp_rte_parser_params *params)
1493 const struct rte_flow_action_vf *vf_action;
1495 enum bnxt_ulp_intf_type intf_type;
1497 vf_action = action_item->conf;
1499 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1500 return BNXT_TF_RC_PARSE_ERR;
1503 if (vf_action->original) {
1504 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1505 return BNXT_TF_RC_PARSE_ERR;
1508 /* Check the port is VF port */
1509 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1511 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1512 return BNXT_TF_RC_ERROR;
1514 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1515 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1516 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1517 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1518 return BNXT_TF_RC_ERROR;
1521 /* Update the action properties */
1522 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1523 return ulp_rte_parser_act_port_set(params, ifindex);
1526 /* Function to handle the parsing of RTE Flow action port_id. */
1528 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1529 struct ulp_rte_parser_params *param)
1531 const struct rte_flow_action_port_id *port_id = act_item->conf;
1533 enum bnxt_ulp_intf_type intf_type;
1537 "ParseErr: Invalid Argument\n");
1538 return BNXT_TF_RC_PARSE_ERR;
1540 if (port_id->original) {
1542 "ParseErr:Portid Original not supported\n");
1543 return BNXT_TF_RC_PARSE_ERR;
1546 /* Get the port db ifindex */
1547 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1549 BNXT_TF_DBG(ERR, "Invalid port id\n");
1550 return BNXT_TF_RC_ERROR;
1553 /* Get the intf type */
1554 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1556 BNXT_TF_DBG(ERR, "Invalid port type\n");
1557 return BNXT_TF_RC_ERROR;
1560 /* Set the action port */
1561 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1562 return ulp_rte_parser_act_port_set(param, ifindex);
1565 /* Function to handle the parsing of RTE Flow action phy_port. */
1567 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1568 struct ulp_rte_parser_params *prm)
1570 const struct rte_flow_action_phy_port *phy_port;
1574 enum bnxt_ulp_direction_type dir;
1576 phy_port = action_item->conf;
1579 "ParseErr: Invalid Argument\n");
1580 return BNXT_TF_RC_PARSE_ERR;
1583 if (phy_port->original) {
1585 "Parse Err:Port Original not supported\n");
1586 return BNXT_TF_RC_PARSE_ERR;
1588 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1589 if (dir != BNXT_ULP_DIR_EGRESS) {
1591 "Parse Err:Phy ports are valid only for egress\n");
1592 return BNXT_TF_RC_PARSE_ERR;
1594 /* Get the physical port details from port db */
1595 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1598 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1603 pid = rte_cpu_to_be_32(pid);
1604 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1605 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1607 /* Update the action port set bit */
1608 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1609 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1610 BNXT_ULP_INTF_TYPE_PHY_PORT);
1611 return BNXT_TF_RC_SUCCESS;
1614 /* Function to handle the parsing of RTE Flow action pop vlan. */
1616 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1617 struct ulp_rte_parser_params *params)
1619 /* Update the act_bitmap with pop */
1620 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1621 return BNXT_TF_RC_SUCCESS;
1624 /* Function to handle the parsing of RTE Flow action push vlan. */
1626 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1627 struct ulp_rte_parser_params *params)
1629 const struct rte_flow_action_of_push_vlan *push_vlan;
1631 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1633 push_vlan = action_item->conf;
1635 ethertype = push_vlan->ethertype;
1636 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1638 "Parse Err: Ethertype not supported\n");
1639 return BNXT_TF_RC_PARSE_ERR;
1641 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1642 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1643 /* Update the hdr_bitmap with push vlan */
1644 ULP_BITMAP_SET(params->act_bitmap.bits,
1645 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1646 return BNXT_TF_RC_SUCCESS;
1648 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1649 return BNXT_TF_RC_ERROR;
1652 /* Function to handle the parsing of RTE Flow action set vlan id. */
1654 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1655 struct ulp_rte_parser_params *params)
1657 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1659 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1661 vlan_vid = action_item->conf;
1662 if (vlan_vid && vlan_vid->vlan_vid) {
1663 vid = vlan_vid->vlan_vid;
1664 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1665 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1666 /* Update the hdr_bitmap with vlan vid */
1667 ULP_BITMAP_SET(params->act_bitmap.bits,
1668 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1669 return BNXT_TF_RC_SUCCESS;
1671 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1672 return BNXT_TF_RC_ERROR;
1675 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1677 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1678 struct ulp_rte_parser_params *params)
1680 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1682 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1684 vlan_pcp = action_item->conf;
1686 pcp = vlan_pcp->vlan_pcp;
1687 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1688 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1689 /* Update the hdr_bitmap with vlan vid */
1690 ULP_BITMAP_SET(params->act_bitmap.bits,
1691 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1692 return BNXT_TF_RC_SUCCESS;
1694 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1695 return BNXT_TF_RC_ERROR;