1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (uint32_t)
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
404 enum bnxt_ulp_direction_type item_dir)
407 enum bnxt_ulp_direction_type dir;
408 struct ulp_rte_hdr_field *hdr_field;
409 enum bnxt_ulp_svif_type svif_type;
410 enum bnxt_ulp_intf_type port_type;
412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 BNXT_ULP_INVALID_SVIF_VAL) {
415 "SVIF already set,multiple source not support'd\n");
416 return BNXT_TF_RC_ERROR;
419 /* Get port type details */
420 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
421 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
422 BNXT_TF_DBG(ERR, "Invalid port type\n");
423 return BNXT_TF_RC_ERROR;
426 /* Update the match port type */
427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
429 /* compute the direction */
430 bnxt_ulp_rte_parser_direction_compute(params);
432 /* Get the computed direction */
433 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
434 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
435 if (dir == BNXT_ULP_DIR_INGRESS &&
436 port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
437 svif_type = BNXT_ULP_PHY_PORT_SVIF;
439 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
440 item_dir != BNXT_ULP_DIR_EGRESS)
441 svif_type = BNXT_ULP_VF_FUNC_SVIF;
443 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
445 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
447 svif = rte_cpu_to_be_16(svif);
448 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
449 memcpy(hdr_field->spec, &svif, sizeof(svif));
450 memcpy(hdr_field->mask, &mask, sizeof(mask));
451 hdr_field->size = sizeof(svif);
452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
453 rte_be_to_cpu_16(svif));
454 return BNXT_TF_RC_SUCCESS;
457 /* Function to handle the parsing of the RTE port id */
459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
461 uint16_t port_id = 0;
462 uint16_t svif_mask = 0xFFFF;
464 int32_t rc = BNXT_TF_RC_ERROR;
466 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
467 BNXT_ULP_INVALID_SVIF_VAL)
468 return BNXT_TF_RC_SUCCESS;
470 /* SVIF not set. So get the port id */
471 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
473 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
476 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
480 /* Update the SVIF details */
481 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
482 BNXT_ULP_DIR_INVALID);
486 /* Function to handle the implicit action port id */
488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
490 struct rte_flow_action action_item = {0};
491 struct rte_flow_action_port_id port_id = {0};
493 /* Read the action port set bit */
494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
495 /* Already set, so just exit */
496 return BNXT_TF_RC_SUCCESS;
498 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
499 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
500 action_item.conf = &port_id;
502 /* Update the action port based on incoming port */
503 ulp_rte_port_act_handler(&action_item, params);
505 /* Reset the action port set bit */
506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
507 return BNXT_TF_RC_SUCCESS;
510 /* Function to handle the parsing of RTE Flow item PF Header. */
512 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
513 struct ulp_rte_parser_params *params)
515 uint16_t port_id = 0;
516 uint16_t svif_mask = 0xFFFF;
519 /* Get the implicit port id */
520 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
522 /* perform the conversion from dpdk port to bnxt ifindex */
523 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
526 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
527 return BNXT_TF_RC_ERROR;
530 /* Update the SVIF details */
531 return ulp_rte_parser_svif_set(params, ifindex, svif_mask,
532 BNXT_ULP_DIR_INVALID);
535 /* Function to handle the parsing of RTE Flow item VF Header. */
537 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
538 struct ulp_rte_parser_params *params)
540 const struct rte_flow_item_vf *vf_spec = item->spec;
541 const struct rte_flow_item_vf *vf_mask = item->mask;
544 int32_t rc = BNXT_TF_RC_PARSE_ERR;
546 /* Get VF rte_flow_item for Port details */
548 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
552 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
557 /* perform the conversion from VF Func id to bnxt ifindex */
558 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
561 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
564 /* Update the SVIF details */
565 return ulp_rte_parser_svif_set(params, ifindex, mask,
566 BNXT_ULP_DIR_INVALID);
569 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
571 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
572 struct ulp_rte_parser_params *params)
574 enum bnxt_ulp_direction_type item_dir;
577 int32_t rc = BNXT_TF_RC_PARSE_ERR;
581 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
585 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
589 switch (item->type) {
590 case RTE_FLOW_ITEM_TYPE_PORT_ID: {
591 const struct rte_flow_item_port_id *port_spec = item->spec;
592 const struct rte_flow_item_port_id *port_mask = item->mask;
594 item_dir = BNXT_ULP_DIR_INVALID;
595 ethdev_id = port_spec->id;
596 mask = port_mask->id;
599 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
600 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
601 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
603 item_dir = BNXT_ULP_DIR_INGRESS;
604 ethdev_id = ethdev_spec->port_id;
605 mask = ethdev_mask->port_id;
608 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
609 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
610 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
612 item_dir = BNXT_ULP_DIR_EGRESS;
613 ethdev_id = ethdev_spec->port_id;
614 mask = ethdev_mask->port_id;
618 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
622 /* perform the conversion from dpdk port to bnxt ifindex */
623 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
626 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
629 /* Update the SVIF details */
630 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
633 /* Function to handle the parsing of RTE Flow item phy port Header. */
635 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
636 struct ulp_rte_parser_params *params)
638 const struct rte_flow_item_phy_port *port_spec = item->spec;
639 const struct rte_flow_item_phy_port *port_mask = item->mask;
641 int32_t rc = BNXT_TF_RC_ERROR;
643 enum bnxt_ulp_direction_type dir;
644 struct ulp_rte_hdr_field *hdr_field;
646 /* Copy the rte_flow_item for phy port into hdr_field */
648 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
652 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
655 mask = port_mask->index;
657 /* Update the match port type */
658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
659 BNXT_ULP_INTF_TYPE_PHY_PORT);
661 /* Compute the Hw direction */
662 bnxt_ulp_rte_parser_direction_compute(params);
664 /* Direction validation */
665 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
666 if (dir == BNXT_ULP_DIR_EGRESS) {
668 "Parse Err:Phy ports are valid only for ingress\n");
669 return BNXT_TF_RC_PARSE_ERR;
672 /* Get the physical port details from port db */
673 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
676 BNXT_TF_DBG(ERR, "Failed to get port details\n");
677 return BNXT_TF_RC_PARSE_ERR;
680 /* Update the SVIF details */
681 svif = rte_cpu_to_be_16(svif);
682 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
683 memcpy(hdr_field->spec, &svif, sizeof(svif));
684 memcpy(hdr_field->mask, &mask, sizeof(mask));
685 hdr_field->size = sizeof(svif);
686 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
687 rte_be_to_cpu_16(svif));
688 return BNXT_TF_RC_SUCCESS;
691 /* Function to handle the update of proto header based on field values */
693 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
694 uint16_t type, uint32_t in_flag)
696 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
698 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
699 BNXT_ULP_HDR_BIT_I_IPV4);
700 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
702 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
703 BNXT_ULP_HDR_BIT_O_IPV4);
704 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
706 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
708 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
709 BNXT_ULP_HDR_BIT_I_IPV6);
710 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
712 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
713 BNXT_ULP_HDR_BIT_O_IPV6);
714 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
719 /* Internal Function to identify broadcast or multicast packets */
721 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
723 if (rte_is_multicast_ether_addr(eth_addr) ||
724 rte_is_broadcast_ether_addr(eth_addr)) {
726 "No support for bcast or mcast addr offload\n");
732 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
734 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
735 struct ulp_rte_parser_params *params)
737 const struct rte_flow_item_eth *eth_spec = item->spec;
738 const struct rte_flow_item_eth *eth_mask = item->mask;
739 uint32_t idx = 0, dmac_idx = 0;
741 uint16_t eth_type = 0;
742 uint32_t inner_flag = 0;
744 /* Perform validations */
746 /* Todo: work around to avoid multicast and broadcast addr */
747 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
748 return BNXT_TF_RC_PARSE_ERR;
750 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
751 return BNXT_TF_RC_PARSE_ERR;
753 eth_type = eth_spec->type;
756 if (ulp_rte_prsr_fld_size_validate(params, &idx,
757 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
758 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
759 return BNXT_TF_RC_ERROR;
762 * Copy the rte_flow_item for eth into hdr_field using ethernet
766 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
767 ulp_rte_prsr_fld_mask(params, &idx, size,
768 ulp_deference_struct(eth_spec, dst.addr_bytes),
769 ulp_deference_struct(eth_mask, dst.addr_bytes),
770 ULP_PRSR_ACT_DEFAULT);
772 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
773 ulp_rte_prsr_fld_mask(params, &idx, size,
774 ulp_deference_struct(eth_spec, src.addr_bytes),
775 ulp_deference_struct(eth_mask, src.addr_bytes),
776 ULP_PRSR_ACT_DEFAULT);
778 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
779 ulp_rte_prsr_fld_mask(params, &idx, size,
780 ulp_deference_struct(eth_spec, type),
781 ulp_deference_struct(eth_mask, type),
782 ULP_PRSR_ACT_MATCH_IGNORE);
784 /* Update the protocol hdr bitmap */
785 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
786 BNXT_ULP_HDR_BIT_O_ETH) ||
787 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
788 BNXT_ULP_HDR_BIT_O_IPV4) ||
789 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
790 BNXT_ULP_HDR_BIT_O_IPV6) ||
791 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
792 BNXT_ULP_HDR_BIT_O_UDP) ||
793 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
794 BNXT_ULP_HDR_BIT_O_TCP)) {
795 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
798 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
799 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
802 /* Update the field protocol hdr bitmap */
803 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
805 return BNXT_TF_RC_SUCCESS;
808 /* Function to handle the parsing of RTE Flow item Vlan Header. */
810 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
811 struct ulp_rte_parser_params *params)
813 const struct rte_flow_item_vlan *vlan_spec = item->spec;
814 const struct rte_flow_item_vlan *vlan_mask = item->mask;
815 struct ulp_rte_hdr_bitmap *hdr_bit;
817 uint16_t vlan_tag = 0, priority = 0;
818 uint16_t vlan_tag_mask = 0, priority_mask = 0;
819 uint32_t outer_vtag_num;
820 uint32_t inner_vtag_num;
821 uint16_t eth_type = 0;
822 uint32_t inner_flag = 0;
826 vlan_tag = ntohs(vlan_spec->tci);
827 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
828 vlan_tag &= ULP_VLAN_TAG_MASK;
829 vlan_tag = htons(vlan_tag);
830 eth_type = vlan_spec->inner_type;
834 vlan_tag_mask = ntohs(vlan_mask->tci);
835 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
836 vlan_tag_mask &= 0xfff;
839 * the storage for priority and vlan tag is 2 bytes
840 * The mask of priority which is 3 bits if it is all 1's
841 * then make the rest bits 13 bits as 1's
842 * so that it is matched as exact match.
844 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
845 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
846 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
847 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
848 vlan_tag_mask = htons(vlan_tag_mask);
851 if (ulp_rte_prsr_fld_size_validate(params, &idx,
852 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
853 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
854 return BNXT_TF_RC_ERROR;
858 * Copy the rte_flow_item for vlan into hdr_field using Vlan
861 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
863 * The priority field is ignored since OVS is setting it as
864 * wild card match and it is not supported. This is a work
865 * around and shall be addressed in the future.
867 ulp_rte_prsr_fld_mask(params, &idx, size,
869 (vlan_mask) ? &priority_mask : NULL,
870 ULP_PRSR_ACT_MASK_IGNORE);
872 ulp_rte_prsr_fld_mask(params, &idx, size,
874 (vlan_mask) ? &vlan_tag_mask : NULL,
875 ULP_PRSR_ACT_DEFAULT);
877 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
878 ulp_rte_prsr_fld_mask(params, &idx, size,
879 ulp_deference_struct(vlan_spec, inner_type),
880 ulp_deference_struct(vlan_mask, inner_type),
881 ULP_PRSR_ACT_MATCH_IGNORE);
883 /* Get the outer tag and inner tag counts */
884 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
885 BNXT_ULP_CF_IDX_O_VTAG_NUM);
886 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
887 BNXT_ULP_CF_IDX_I_VTAG_NUM);
889 /* Update the hdr_bitmap of the vlans */
890 hdr_bit = ¶ms->hdr_bitmap;
891 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
892 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
894 /* Update the vlan tag num */
896 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
898 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
899 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
900 ULP_BITMAP_SET(params->hdr_bitmap.bits,
901 BNXT_ULP_HDR_BIT_OO_VLAN);
902 if (vlan_mask && vlan_tag_mask)
903 ULP_COMP_FLD_IDX_WR(params,
904 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
906 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
907 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
908 outer_vtag_num == 1) {
909 /* update the vlan tag num */
911 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
914 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
915 ULP_BITMAP_SET(params->hdr_bitmap.bits,
916 BNXT_ULP_HDR_BIT_OI_VLAN);
917 if (vlan_mask && vlan_tag_mask)
918 ULP_COMP_FLD_IDX_WR(params,
919 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
921 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
922 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
924 /* update the vlan tag num */
926 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
928 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
929 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
930 ULP_BITMAP_SET(params->hdr_bitmap.bits,
931 BNXT_ULP_HDR_BIT_IO_VLAN);
932 if (vlan_mask && vlan_tag_mask)
933 ULP_COMP_FLD_IDX_WR(params,
934 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
936 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
937 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
938 inner_vtag_num == 1) {
939 /* update the vlan tag num */
941 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
943 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
944 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
945 ULP_BITMAP_SET(params->hdr_bitmap.bits,
946 BNXT_ULP_HDR_BIT_II_VLAN);
947 if (vlan_mask && vlan_tag_mask)
948 ULP_COMP_FLD_IDX_WR(params,
949 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
952 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
953 return BNXT_TF_RC_ERROR;
955 /* Update the field protocol hdr bitmap */
956 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
957 return BNXT_TF_RC_SUCCESS;
960 /* Function to handle the update of proto header based on field values */
962 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
963 uint8_t proto, uint32_t in_flag)
965 if (proto == IPPROTO_UDP) {
967 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
968 BNXT_ULP_HDR_BIT_I_UDP);
969 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
971 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
972 BNXT_ULP_HDR_BIT_O_UDP);
973 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
975 } else if (proto == IPPROTO_TCP) {
977 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
978 BNXT_ULP_HDR_BIT_I_TCP);
979 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
981 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
982 BNXT_ULP_HDR_BIT_O_TCP);
983 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
985 } else if (proto == IPPROTO_GRE) {
986 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
987 } else if (proto == IPPROTO_ICMP) {
988 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
989 ULP_BITMAP_SET(param->hdr_bitmap.bits,
990 BNXT_ULP_HDR_BIT_I_ICMP);
992 ULP_BITMAP_SET(param->hdr_bitmap.bits,
993 BNXT_ULP_HDR_BIT_O_ICMP);
997 ULP_COMP_FLD_IDX_WR(param,
998 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1000 ULP_COMP_FLD_IDX_WR(param,
1001 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1004 ULP_COMP_FLD_IDX_WR(param,
1005 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1007 ULP_COMP_FLD_IDX_WR(param,
1008 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1014 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1016 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1017 struct ulp_rte_parser_params *params)
1019 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1020 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1021 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1022 uint32_t idx = 0, dip_idx = 0;
1025 uint32_t inner_flag = 0;
1028 /* validate there are no 3rd L3 header */
1029 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1031 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1032 return BNXT_TF_RC_ERROR;
1035 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1036 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
1037 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1038 return BNXT_TF_RC_ERROR;
1042 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1045 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1046 ulp_rte_prsr_fld_mask(params, &idx, size,
1047 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1048 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1049 ULP_PRSR_ACT_DEFAULT);
1052 * The tos field is ignored since OVS is setting it as wild card
1053 * match and it is not supported. This is a work around and
1054 * shall be addressed in the future.
1056 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1057 ulp_rte_prsr_fld_mask(params, &idx, size,
1058 ulp_deference_struct(ipv4_spec,
1059 hdr.type_of_service),
1060 ulp_deference_struct(ipv4_mask,
1061 hdr.type_of_service),
1062 ULP_PRSR_ACT_MASK_IGNORE);
1064 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1065 ulp_rte_prsr_fld_mask(params, &idx, size,
1066 ulp_deference_struct(ipv4_spec, hdr.total_length),
1067 ulp_deference_struct(ipv4_mask, hdr.total_length),
1068 ULP_PRSR_ACT_DEFAULT);
1070 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1071 ulp_rte_prsr_fld_mask(params, &idx, size,
1072 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1073 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1074 ULP_PRSR_ACT_DEFAULT);
1076 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1077 ulp_rte_prsr_fld_mask(params, &idx, size,
1078 ulp_deference_struct(ipv4_spec,
1079 hdr.fragment_offset),
1080 ulp_deference_struct(ipv4_mask,
1081 hdr.fragment_offset),
1082 ULP_PRSR_ACT_DEFAULT);
1084 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1085 ulp_rte_prsr_fld_mask(params, &idx, size,
1086 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1087 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1088 ULP_PRSR_ACT_DEFAULT);
1090 /* Ignore proto for matching templates */
1091 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1092 ulp_rte_prsr_fld_mask(params, &idx, size,
1093 ulp_deference_struct(ipv4_spec,
1095 ulp_deference_struct(ipv4_mask,
1097 ULP_PRSR_ACT_MATCH_IGNORE);
1099 proto = ipv4_spec->hdr.next_proto_id;
1101 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1102 ulp_rte_prsr_fld_mask(params, &idx, size,
1103 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1104 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1105 ULP_PRSR_ACT_DEFAULT);
1107 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1108 ulp_rte_prsr_fld_mask(params, &idx, size,
1109 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1110 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1111 ULP_PRSR_ACT_DEFAULT);
1114 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1115 ulp_rte_prsr_fld_mask(params, &idx, size,
1116 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1117 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1118 ULP_PRSR_ACT_DEFAULT);
1120 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1121 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1122 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1123 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1124 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1127 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1128 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1129 /* Update the tunnel offload dest ip offset */
1130 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1134 /* Some of the PMD applications may set the protocol field
1135 * in the IPv4 spec but don't set the mask. So, consider
1136 * the mask in the proto value calculation.
1139 proto &= ipv4_mask->hdr.next_proto_id;
1141 /* Update the field protocol hdr bitmap */
1142 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1143 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1144 return BNXT_TF_RC_SUCCESS;
1147 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1149 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1150 struct ulp_rte_parser_params *params)
1152 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1153 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1154 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1155 uint32_t idx = 0, dip_idx = 0;
1157 uint32_t ver_spec = 0, ver_mask = 0;
1158 uint32_t tc_spec = 0, tc_mask = 0;
1159 uint32_t lab_spec = 0, lab_mask = 0;
1161 uint32_t inner_flag = 0;
1164 /* validate there are no 3rd L3 header */
1165 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1167 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1168 return BNXT_TF_RC_ERROR;
1171 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1172 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1173 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1174 return BNXT_TF_RC_ERROR;
1178 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1182 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1183 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1184 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1185 proto = ipv6_spec->hdr.proto;
1189 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1190 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1191 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1193 /* Some of the PMD applications may set the protocol field
1194 * in the IPv6 spec but don't set the mask. So, consider
1195 * the mask in proto value calculation.
1197 proto &= ipv6_mask->hdr.proto;
1200 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1201 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1202 ULP_PRSR_ACT_DEFAULT);
1204 * The TC and flow label field are ignored since OVS is
1205 * setting it for match and it is not supported.
1206 * This is a work around and
1207 * shall be addressed in the future.
1209 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1210 ULP_PRSR_ACT_MASK_IGNORE);
1211 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1212 ULP_PRSR_ACT_MASK_IGNORE);
1214 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1215 ulp_rte_prsr_fld_mask(params, &idx, size,
1216 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1217 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1218 ULP_PRSR_ACT_DEFAULT);
1220 /* Ignore proto for template matching */
1221 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1222 ulp_rte_prsr_fld_mask(params, &idx, size,
1223 ulp_deference_struct(ipv6_spec, hdr.proto),
1224 ulp_deference_struct(ipv6_mask, hdr.proto),
1225 ULP_PRSR_ACT_MATCH_IGNORE);
1227 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1228 ulp_rte_prsr_fld_mask(params, &idx, size,
1229 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1230 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1231 ULP_PRSR_ACT_DEFAULT);
1233 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1234 ulp_rte_prsr_fld_mask(params, &idx, size,
1235 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1236 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1237 ULP_PRSR_ACT_DEFAULT);
1240 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1241 ulp_rte_prsr_fld_mask(params, &idx, size,
1242 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1243 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1244 ULP_PRSR_ACT_DEFAULT);
1246 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1247 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1248 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1249 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1250 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1253 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1254 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1255 /* Update the tunnel offload dest ip offset */
1256 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1260 /* Update the field protocol hdr bitmap */
1261 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1262 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1264 return BNXT_TF_RC_SUCCESS;
1267 /* Function to handle the update of proto header based on field values */
1269 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1270 uint16_t src_port, uint16_t src_mask,
1271 uint16_t dst_port, uint16_t dst_mask,
1272 enum bnxt_ulp_hdr_bit hdr_bit)
1275 case BNXT_ULP_HDR_BIT_I_UDP:
1276 case BNXT_ULP_HDR_BIT_I_TCP:
1277 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1280 (uint64_t)rte_be_to_cpu_16(src_port));
1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1282 (uint64_t)rte_be_to_cpu_16(dst_port));
1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1284 (uint64_t)rte_be_to_cpu_16(src_mask));
1285 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1286 (uint64_t)rte_be_to_cpu_16(dst_mask));
1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1290 !!(src_port & src_mask));
1291 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1292 !!(dst_port & dst_mask));
1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1294 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1295 IPPROTO_UDP : IPPROTO_TCP);
1297 case BNXT_ULP_HDR_BIT_O_UDP:
1298 case BNXT_ULP_HDR_BIT_O_TCP:
1299 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1301 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1302 (uint64_t)rte_be_to_cpu_16(src_port));
1303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1304 (uint64_t)rte_be_to_cpu_16(dst_port));
1305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1306 (uint64_t)rte_be_to_cpu_16(src_mask));
1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1308 (uint64_t)rte_be_to_cpu_16(dst_mask));
1309 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1311 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1312 !!(src_port & src_mask));
1313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1314 !!(dst_port & dst_mask));
1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1316 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1317 IPPROTO_UDP : IPPROTO_TCP);
1323 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1324 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1325 ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1326 BNXT_ULP_HDR_BIT_T_VXLAN);
1327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1331 /* Function to handle the parsing of RTE Flow item UDP Header. */
1333 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1334 struct ulp_rte_parser_params *params)
1336 const struct rte_flow_item_udp *udp_spec = item->spec;
1337 const struct rte_flow_item_udp *udp_mask = item->mask;
1338 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1341 uint16_t dport = 0, sport = 0;
1342 uint16_t dport_mask = 0, sport_mask = 0;
1344 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1346 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1348 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1349 return BNXT_TF_RC_ERROR;
1353 sport = udp_spec->hdr.src_port;
1354 dport = udp_spec->hdr.dst_port;
1357 sport_mask = udp_mask->hdr.src_port;
1358 dport_mask = udp_mask->hdr.dst_port;
1361 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1362 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1363 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1364 return BNXT_TF_RC_ERROR;
1368 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1371 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1372 ulp_rte_prsr_fld_mask(params, &idx, size,
1373 ulp_deference_struct(udp_spec, hdr.src_port),
1374 ulp_deference_struct(udp_mask, hdr.src_port),
1375 ULP_PRSR_ACT_DEFAULT);
1377 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1378 ulp_rte_prsr_fld_mask(params, &idx, size,
1379 ulp_deference_struct(udp_spec, hdr.dst_port),
1380 ulp_deference_struct(udp_mask, hdr.dst_port),
1381 ULP_PRSR_ACT_DEFAULT);
1383 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1384 ulp_rte_prsr_fld_mask(params, &idx, size,
1385 ulp_deference_struct(udp_spec, hdr.dgram_len),
1386 ulp_deference_struct(udp_mask, hdr.dgram_len),
1387 ULP_PRSR_ACT_DEFAULT);
1389 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1390 ulp_rte_prsr_fld_mask(params, &idx, size,
1391 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1392 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1393 ULP_PRSR_ACT_DEFAULT);
1395 /* Set the udp header bitmap and computed l4 header bitmaps */
1396 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1397 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1398 out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1400 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1401 dport_mask, out_l4);
1402 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1403 return BNXT_TF_RC_SUCCESS;
1406 /* Function to handle the parsing of RTE Flow item TCP Header. */
1408 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1409 struct ulp_rte_parser_params *params)
1411 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1412 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1413 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1415 uint16_t dport = 0, sport = 0;
1416 uint16_t dport_mask = 0, sport_mask = 0;
1419 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1421 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1423 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1424 return BNXT_TF_RC_ERROR;
1428 sport = tcp_spec->hdr.src_port;
1429 dport = tcp_spec->hdr.dst_port;
1432 sport_mask = tcp_mask->hdr.src_port;
1433 dport_mask = tcp_mask->hdr.dst_port;
1436 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1437 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1438 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1439 return BNXT_TF_RC_ERROR;
1443 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1446 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1447 ulp_rte_prsr_fld_mask(params, &idx, size,
1448 ulp_deference_struct(tcp_spec, hdr.src_port),
1449 ulp_deference_struct(tcp_mask, hdr.src_port),
1450 ULP_PRSR_ACT_DEFAULT);
1452 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1453 ulp_rte_prsr_fld_mask(params, &idx, size,
1454 ulp_deference_struct(tcp_spec, hdr.dst_port),
1455 ulp_deference_struct(tcp_mask, hdr.dst_port),
1456 ULP_PRSR_ACT_DEFAULT);
1458 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1459 ulp_rte_prsr_fld_mask(params, &idx, size,
1460 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1461 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1462 ULP_PRSR_ACT_DEFAULT);
1464 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1465 ulp_rte_prsr_fld_mask(params, &idx, size,
1466 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1467 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1468 ULP_PRSR_ACT_DEFAULT);
1470 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1471 ulp_rte_prsr_fld_mask(params, &idx, size,
1472 ulp_deference_struct(tcp_spec, hdr.data_off),
1473 ulp_deference_struct(tcp_mask, hdr.data_off),
1474 ULP_PRSR_ACT_DEFAULT);
1476 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1477 ulp_rte_prsr_fld_mask(params, &idx, size,
1478 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1479 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1480 ULP_PRSR_ACT_DEFAULT);
1482 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1483 ulp_rte_prsr_fld_mask(params, &idx, size,
1484 ulp_deference_struct(tcp_spec, hdr.rx_win),
1485 ulp_deference_struct(tcp_mask, hdr.rx_win),
1486 ULP_PRSR_ACT_DEFAULT);
1488 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1489 ulp_rte_prsr_fld_mask(params, &idx, size,
1490 ulp_deference_struct(tcp_spec, hdr.cksum),
1491 ulp_deference_struct(tcp_mask, hdr.cksum),
1492 ULP_PRSR_ACT_DEFAULT);
1494 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1495 ulp_rte_prsr_fld_mask(params, &idx, size,
1496 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1497 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1498 ULP_PRSR_ACT_DEFAULT);
1500 /* Set the udp header bitmap and computed l4 header bitmaps */
1501 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1502 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1503 out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1505 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1506 dport_mask, out_l4);
1507 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1508 return BNXT_TF_RC_SUCCESS;
1511 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1513 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1514 struct ulp_rte_parser_params *params)
1516 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1517 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1518 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1522 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1523 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1524 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1525 return BNXT_TF_RC_ERROR;
1529 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1532 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1533 ulp_rte_prsr_fld_mask(params, &idx, size,
1534 ulp_deference_struct(vxlan_spec, flags),
1535 ulp_deference_struct(vxlan_mask, flags),
1536 ULP_PRSR_ACT_DEFAULT);
1538 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1539 ulp_rte_prsr_fld_mask(params, &idx, size,
1540 ulp_deference_struct(vxlan_spec, rsvd0),
1541 ulp_deference_struct(vxlan_mask, rsvd0),
1542 ULP_PRSR_ACT_DEFAULT);
1544 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1545 ulp_rte_prsr_fld_mask(params, &idx, size,
1546 ulp_deference_struct(vxlan_spec, vni),
1547 ulp_deference_struct(vxlan_mask, vni),
1548 ULP_PRSR_ACT_DEFAULT);
1550 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1551 ulp_rte_prsr_fld_mask(params, &idx, size,
1552 ulp_deference_struct(vxlan_spec, rsvd1),
1553 ulp_deference_struct(vxlan_mask, rsvd1),
1554 ULP_PRSR_ACT_DEFAULT);
1556 /* Update the hdr_bitmap with vxlan */
1557 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1558 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1559 return BNXT_TF_RC_SUCCESS;
1562 /* Function to handle the parsing of RTE Flow item GRE Header. */
1564 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1565 struct ulp_rte_parser_params *params)
1567 const struct rte_flow_item_gre *gre_spec = item->spec;
1568 const struct rte_flow_item_gre *gre_mask = item->mask;
1569 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1573 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1574 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1575 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1576 return BNXT_TF_RC_ERROR;
1579 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1580 ulp_rte_prsr_fld_mask(params, &idx, size,
1581 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1582 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1583 ULP_PRSR_ACT_DEFAULT);
1585 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1586 ulp_rte_prsr_fld_mask(params, &idx, size,
1587 ulp_deference_struct(gre_spec, protocol),
1588 ulp_deference_struct(gre_mask, protocol),
1589 ULP_PRSR_ACT_DEFAULT);
1591 /* Update the hdr_bitmap with GRE */
1592 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1593 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1594 return BNXT_TF_RC_SUCCESS;
1597 /* Function to handle the parsing of RTE Flow item ANY. */
1599 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1600 struct ulp_rte_parser_params *params __rte_unused)
1602 return BNXT_TF_RC_SUCCESS;
1605 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1607 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1608 struct ulp_rte_parser_params *params)
1610 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1611 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1612 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1616 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1617 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1618 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1619 return BNXT_TF_RC_ERROR;
1622 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1623 ulp_rte_prsr_fld_mask(params, &idx, size,
1624 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1625 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1626 ULP_PRSR_ACT_DEFAULT);
1628 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1629 ulp_rte_prsr_fld_mask(params, &idx, size,
1630 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1631 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1632 ULP_PRSR_ACT_DEFAULT);
1634 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1635 ulp_rte_prsr_fld_mask(params, &idx, size,
1636 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1637 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1638 ULP_PRSR_ACT_DEFAULT);
1640 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1641 ulp_rte_prsr_fld_mask(params, &idx, size,
1642 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1643 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1644 ULP_PRSR_ACT_DEFAULT);
1646 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1647 ulp_rte_prsr_fld_mask(params, &idx, size,
1648 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1649 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1650 ULP_PRSR_ACT_DEFAULT);
1652 /* Update the hdr_bitmap with ICMP */
1653 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1654 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1656 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1657 return BNXT_TF_RC_SUCCESS;
1660 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1662 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1663 struct ulp_rte_parser_params *params)
1665 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1666 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1667 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1671 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1672 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1673 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1674 return BNXT_TF_RC_ERROR;
1677 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1678 ulp_rte_prsr_fld_mask(params, &idx, size,
1679 ulp_deference_struct(icmp_spec, type),
1680 ulp_deference_struct(icmp_mask, type),
1681 ULP_PRSR_ACT_DEFAULT);
1683 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1684 ulp_rte_prsr_fld_mask(params, &idx, size,
1685 ulp_deference_struct(icmp_spec, code),
1686 ulp_deference_struct(icmp_mask, code),
1687 ULP_PRSR_ACT_DEFAULT);
1689 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1690 ulp_rte_prsr_fld_mask(params, &idx, size,
1691 ulp_deference_struct(icmp_spec, checksum),
1692 ulp_deference_struct(icmp_mask, checksum),
1693 ULP_PRSR_ACT_DEFAULT);
1695 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1696 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1697 return BNXT_TF_RC_ERROR;
1700 /* Update the hdr_bitmap with ICMP */
1701 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1702 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1704 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1705 return BNXT_TF_RC_SUCCESS;
1708 /* Function to handle the parsing of RTE Flow item void Header */
1710 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1711 struct ulp_rte_parser_params *params __rte_unused)
1713 return BNXT_TF_RC_SUCCESS;
1716 /* Function to handle the parsing of RTE Flow action void Header. */
1718 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1719 struct ulp_rte_parser_params *params __rte_unused)
1721 return BNXT_TF_RC_SUCCESS;
1724 /* Function to handle the parsing of RTE Flow action Mark Header. */
1726 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1727 struct ulp_rte_parser_params *param)
1729 const struct rte_flow_action_mark *mark;
1730 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1733 mark = action_item->conf;
1735 mark_id = tfp_cpu_to_be_32(mark->id);
1736 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1737 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1739 /* Update the hdr_bitmap with vxlan */
1740 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1741 return BNXT_TF_RC_SUCCESS;
1743 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1744 return BNXT_TF_RC_ERROR;
1747 /* Function to handle the parsing of RTE Flow action RSS Header. */
1749 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1750 struct ulp_rte_parser_params *param)
1752 const struct rte_flow_action_rss *rss;
1753 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1755 if (action_item == NULL || action_item->conf == NULL) {
1756 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1757 return BNXT_TF_RC_ERROR;
1760 rss = action_item->conf;
1761 /* Copy the rss into the specific action properties */
1762 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1763 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1764 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1765 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1766 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1767 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1769 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1770 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1771 return BNXT_TF_RC_ERROR;
1773 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1776 /* set the RSS action header bit */
1777 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1779 return BNXT_TF_RC_SUCCESS;
1782 /* Function to handle the parsing of RTE Flow item eth Header. */
1784 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1785 const struct rte_flow_item_eth *eth_spec)
1787 struct ulp_rte_hdr_field *field;
1790 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1791 size = sizeof(eth_spec->dst.addr_bytes);
1792 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1794 size = sizeof(eth_spec->src.addr_bytes);
1795 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1797 size = sizeof(eth_spec->type);
1798 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1800 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1803 /* Function to handle the parsing of RTE Flow item vlan Header. */
1805 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1806 const struct rte_flow_item_vlan *vlan_spec,
1809 struct ulp_rte_hdr_field *field;
1813 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1814 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1815 BNXT_ULP_HDR_BIT_OO_VLAN);
1817 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1818 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1819 BNXT_ULP_HDR_BIT_OI_VLAN);
1822 size = sizeof(vlan_spec->tci);
1823 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1825 size = sizeof(vlan_spec->inner_type);
1826 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1829 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1831 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1832 const struct rte_flow_item_ipv4 *ip)
1834 struct ulp_rte_hdr_field *field;
1838 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1839 size = sizeof(ip->hdr.version_ihl);
1840 if (!ip->hdr.version_ihl)
1841 val8 = RTE_IPV4_VHL_DEF;
1843 val8 = ip->hdr.version_ihl;
1844 field = ulp_rte_parser_fld_copy(field, &val8, size);
1846 size = sizeof(ip->hdr.type_of_service);
1847 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1849 size = sizeof(ip->hdr.packet_id);
1850 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1852 size = sizeof(ip->hdr.fragment_offset);
1853 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1855 size = sizeof(ip->hdr.time_to_live);
1856 if (!ip->hdr.time_to_live)
1857 val8 = BNXT_ULP_DEFAULT_TTL;
1859 val8 = ip->hdr.time_to_live;
1860 field = ulp_rte_parser_fld_copy(field, &val8, size);
1862 size = sizeof(ip->hdr.next_proto_id);
1863 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1865 size = sizeof(ip->hdr.src_addr);
1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1868 size = sizeof(ip->hdr.dst_addr);
1869 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1871 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1874 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1876 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1877 const struct rte_flow_item_ipv6 *ip)
1879 struct ulp_rte_hdr_field *field;
1884 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1885 size = sizeof(ip->hdr.vtc_flow);
1886 if (!ip->hdr.vtc_flow)
1887 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1889 val32 = ip->hdr.vtc_flow;
1890 field = ulp_rte_parser_fld_copy(field, &val32, size);
1892 size = sizeof(ip->hdr.proto);
1893 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1895 size = sizeof(ip->hdr.hop_limits);
1896 if (!ip->hdr.hop_limits)
1897 val8 = BNXT_ULP_DEFAULT_TTL;
1899 val8 = ip->hdr.hop_limits;
1900 field = ulp_rte_parser_fld_copy(field, &val8, size);
1902 size = sizeof(ip->hdr.src_addr);
1903 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1905 size = sizeof(ip->hdr.dst_addr);
1906 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1908 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1911 /* Function to handle the parsing of RTE Flow item UDP Header. */
1913 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1914 const struct rte_flow_item_udp *udp_spec)
1916 struct ulp_rte_hdr_field *field;
1918 uint8_t type = IPPROTO_UDP;
1920 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1921 size = sizeof(udp_spec->hdr.src_port);
1922 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1924 size = sizeof(udp_spec->hdr.dst_port);
1925 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1927 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1929 /* Update thhe ip header protocol */
1930 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1931 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1932 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1933 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1936 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1938 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1939 struct rte_flow_item_vxlan *vxlan_spec)
1941 struct ulp_rte_hdr_field *field;
1944 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1945 size = sizeof(vxlan_spec->flags);
1946 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1948 size = sizeof(vxlan_spec->rsvd0);
1949 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1951 size = sizeof(vxlan_spec->vni);
1952 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1954 size = sizeof(vxlan_spec->rsvd1);
1955 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1957 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1960 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1962 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1963 struct ulp_rte_parser_params *params)
1965 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1966 const struct rte_flow_item *item;
1967 const struct rte_flow_item_ipv4 *ipv4_spec;
1968 const struct rte_flow_item_ipv6 *ipv6_spec;
1969 struct rte_flow_item_vxlan vxlan_spec;
1970 uint32_t vlan_num = 0, vlan_size = 0;
1971 uint32_t ip_size = 0, ip_type = 0;
1972 uint32_t vxlan_size = 0;
1973 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1974 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1976 vxlan_encap = action_item->conf;
1978 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1979 return BNXT_TF_RC_ERROR;
1982 item = vxlan_encap->definition;
1984 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1985 return BNXT_TF_RC_ERROR;
1988 if (!ulp_rte_item_skip_void(&item, 0))
1989 return BNXT_TF_RC_ERROR;
1991 /* must have ethernet header */
1992 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1993 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1994 return BNXT_TF_RC_ERROR;
1997 /* Parse the ethernet header */
1999 ulp_rte_enc_eth_hdr_handler(params, item->spec);
2001 /* Goto the next item */
2002 if (!ulp_rte_item_skip_void(&item, 1))
2003 return BNXT_TF_RC_ERROR;
2005 /* May have vlan header */
2006 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2009 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2011 if (!ulp_rte_item_skip_void(&item, 1))
2012 return BNXT_TF_RC_ERROR;
2015 /* may have two vlan headers */
2016 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2019 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2021 if (!ulp_rte_item_skip_void(&item, 1))
2022 return BNXT_TF_RC_ERROR;
2025 /* Update the vlan count and size of more than one */
2027 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2028 vlan_num = tfp_cpu_to_be_32(vlan_num);
2029 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2032 vlan_size = tfp_cpu_to_be_32(vlan_size);
2033 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2038 /* L3 must be IPv4, IPv6 */
2039 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2040 ipv4_spec = item->spec;
2041 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2043 /* Update the ip size details */
2044 ip_size = tfp_cpu_to_be_32(ip_size);
2045 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2046 &ip_size, sizeof(uint32_t));
2048 /* update the ip type */
2049 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2050 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2051 &ip_type, sizeof(uint32_t));
2053 /* update the computed field to notify it is ipv4 header */
2054 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2057 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2059 if (!ulp_rte_item_skip_void(&item, 1))
2060 return BNXT_TF_RC_ERROR;
2061 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2062 ipv6_spec = item->spec;
2063 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2065 /* Update the ip size details */
2066 ip_size = tfp_cpu_to_be_32(ip_size);
2067 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2068 &ip_size, sizeof(uint32_t));
2070 /* update the ip type */
2071 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2072 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2073 &ip_type, sizeof(uint32_t));
2075 /* update the computed field to notify it is ipv6 header */
2076 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2079 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2081 if (!ulp_rte_item_skip_void(&item, 1))
2082 return BNXT_TF_RC_ERROR;
2084 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2085 return BNXT_TF_RC_ERROR;
2089 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2090 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2091 return BNXT_TF_RC_ERROR;
2094 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2096 if (!ulp_rte_item_skip_void(&item, 1))
2097 return BNXT_TF_RC_ERROR;
2100 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2101 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2102 return BNXT_TF_RC_ERROR;
2104 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2105 /* copy the vxlan details */
2106 memcpy(&vxlan_spec, item->spec, vxlan_size);
2107 vxlan_spec.flags = 0x08;
2108 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2109 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2110 &vxlan_size, sizeof(uint32_t));
2112 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2114 /* update the hdr_bitmap with vxlan */
2115 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2116 return BNXT_TF_RC_SUCCESS;
2119 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2121 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2123 struct ulp_rte_parser_params *params)
2125 /* update the hdr_bitmap with vxlan */
2126 ULP_BITMAP_SET(params->act_bitmap.bits,
2127 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2128 /* Update computational field with tunnel decap info */
2129 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2130 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2131 return BNXT_TF_RC_SUCCESS;
2134 /* Function to handle the parsing of RTE Flow action drop Header. */
2136 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2137 struct ulp_rte_parser_params *params)
2139 /* Update the hdr_bitmap with drop */
2140 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2141 return BNXT_TF_RC_SUCCESS;
2144 /* Function to handle the parsing of RTE Flow action count. */
2146 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2147 struct ulp_rte_parser_params *params)
2149 const struct rte_flow_action_count *act_count;
2150 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2152 act_count = action_item->conf;
2154 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2156 BNXT_ULP_ACT_PROP_SZ_COUNT);
2159 /* Update the hdr_bitmap with count */
2160 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2161 return BNXT_TF_RC_SUCCESS;
2164 /* Function to handle the parsing of action ports. */
2166 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2168 enum bnxt_ulp_direction_type act_dir)
2170 enum bnxt_ulp_direction_type dir;
2173 struct ulp_rte_act_prop *act = ¶m->act_prop;
2174 enum bnxt_ulp_intf_type port_type;
2177 /* Get the direction */
2178 /* If action implicitly specifies direction, use the specification. */
2179 dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2180 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2182 port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2183 if (dir == BNXT_ULP_DIR_EGRESS &&
2184 port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2185 /* For egress direction, fill vport */
2186 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2187 return BNXT_TF_RC_ERROR;
2190 pid = rte_cpu_to_be_32(pid);
2191 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2192 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2194 /* For ingress direction, fill vnic */
2196 * Action Destination
2197 * ------------------------------------
2198 * PORT_REPRESENTOR Driver Function
2199 * ------------------------------------
2200 * REPRESENTED_PORT VF
2201 * ------------------------------------
2204 if (act_dir != BNXT_ULP_DIR_INGRESS &&
2205 port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2206 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2208 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2210 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2212 return BNXT_TF_RC_ERROR;
2215 pid = rte_cpu_to_be_32(pid);
2216 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2217 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2220 /* Update the action port set bit */
2221 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2222 return BNXT_TF_RC_SUCCESS;
2225 /* Function to handle the parsing of RTE Flow action PF. */
2227 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2228 struct ulp_rte_parser_params *params)
2232 enum bnxt_ulp_intf_type intf_type;
2234 /* Get the port id of the current device */
2235 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2237 /* Get the port db ifindex */
2238 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2240 BNXT_TF_DBG(ERR, "Invalid port id\n");
2241 return BNXT_TF_RC_ERROR;
2244 /* Check the port is PF port */
2245 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2246 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2247 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2248 return BNXT_TF_RC_ERROR;
2250 /* Update the action properties */
2251 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2252 return ulp_rte_parser_act_port_set(params, ifindex,
2253 BNXT_ULP_DIR_INVALID);
2256 /* Function to handle the parsing of RTE Flow action VF. */
2258 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2259 struct ulp_rte_parser_params *params)
2261 const struct rte_flow_action_vf *vf_action;
2262 enum bnxt_ulp_intf_type intf_type;
2266 vf_action = action_item->conf;
2268 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2269 return BNXT_TF_RC_PARSE_ERR;
2272 if (vf_action->original) {
2273 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2274 return BNXT_TF_RC_PARSE_ERR;
2277 bp = bnxt_pmd_get_bp(params->port_id);
2279 BNXT_TF_DBG(ERR, "Invalid bp\n");
2280 return BNXT_TF_RC_ERROR;
2283 /* vf_action->id is a logical number which in this case is an
2284 * offset from the first VF. So, to get the absolute VF id, the
2285 * offset must be added to the absolute first vf id of that port.
2287 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2291 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2292 return BNXT_TF_RC_ERROR;
2294 /* Check the port is VF port */
2295 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2296 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2297 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2298 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2299 return BNXT_TF_RC_ERROR;
2302 /* Update the action properties */
2303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2304 return ulp_rte_parser_act_port_set(params, ifindex,
2305 BNXT_ULP_DIR_INVALID);
2308 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2310 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2311 struct ulp_rte_parser_params *param)
2315 enum bnxt_ulp_intf_type intf_type;
2316 enum bnxt_ulp_direction_type act_dir;
2318 if (!act_item->conf) {
2320 "ParseErr: Invalid Argument\n");
2321 return BNXT_TF_RC_PARSE_ERR;
2323 switch (act_item->type) {
2324 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2325 const struct rte_flow_action_port_id *port_id = act_item->conf;
2327 if (port_id->original) {
2329 "ParseErr:Portid Original not supported\n");
2330 return BNXT_TF_RC_PARSE_ERR;
2332 ethdev_id = port_id->id;
2333 act_dir = BNXT_ULP_DIR_INVALID;
2336 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2337 const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2339 ethdev_id = ethdev->port_id;
2340 act_dir = BNXT_ULP_DIR_INGRESS;
2343 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2344 const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2346 ethdev_id = ethdev->port_id;
2347 act_dir = BNXT_ULP_DIR_EGRESS;
2351 BNXT_TF_DBG(ERR, "Unknown port action\n");
2352 return BNXT_TF_RC_ERROR;
2355 /* Get the port db ifindex */
2356 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2358 BNXT_TF_DBG(ERR, "Invalid port id\n");
2359 return BNXT_TF_RC_ERROR;
2362 /* Get the intf type */
2363 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2365 BNXT_TF_DBG(ERR, "Invalid port type\n");
2366 return BNXT_TF_RC_ERROR;
2369 /* Set the action port */
2370 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2371 return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2374 /* Function to handle the parsing of RTE Flow action phy_port. */
2376 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2377 struct ulp_rte_parser_params *prm)
2379 const struct rte_flow_action_phy_port *phy_port;
2383 enum bnxt_ulp_direction_type dir;
2385 phy_port = action_item->conf;
2388 "ParseErr: Invalid Argument\n");
2389 return BNXT_TF_RC_PARSE_ERR;
2392 if (phy_port->original) {
2394 "Parse Err:Port Original not supported\n");
2395 return BNXT_TF_RC_PARSE_ERR;
2397 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2398 if (dir != BNXT_ULP_DIR_EGRESS) {
2400 "Parse Err:Phy ports are valid only for egress\n");
2401 return BNXT_TF_RC_PARSE_ERR;
2403 /* Get the physical port details from port db */
2404 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2407 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2412 pid = rte_cpu_to_be_32(pid);
2413 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2414 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2416 /* Update the action port set bit */
2417 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2418 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2419 BNXT_ULP_INTF_TYPE_PHY_PORT);
2420 return BNXT_TF_RC_SUCCESS;
2423 /* Function to handle the parsing of RTE Flow action pop vlan. */
2425 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2426 struct ulp_rte_parser_params *params)
2428 /* Update the act_bitmap with pop */
2429 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2430 return BNXT_TF_RC_SUCCESS;
2433 /* Function to handle the parsing of RTE Flow action push vlan. */
2435 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2436 struct ulp_rte_parser_params *params)
2438 const struct rte_flow_action_of_push_vlan *push_vlan;
2440 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2442 push_vlan = action_item->conf;
2444 ethertype = push_vlan->ethertype;
2445 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2447 "Parse Err: Ethertype not supported\n");
2448 return BNXT_TF_RC_PARSE_ERR;
2450 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2451 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2452 /* Update the hdr_bitmap with push vlan */
2453 ULP_BITMAP_SET(params->act_bitmap.bits,
2454 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2455 return BNXT_TF_RC_SUCCESS;
2457 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2458 return BNXT_TF_RC_ERROR;
2461 /* Function to handle the parsing of RTE Flow action set vlan id. */
2463 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2464 struct ulp_rte_parser_params *params)
2466 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2468 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2470 vlan_vid = action_item->conf;
2471 if (vlan_vid && vlan_vid->vlan_vid) {
2472 vid = vlan_vid->vlan_vid;
2473 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2474 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2475 /* Update the hdr_bitmap with vlan vid */
2476 ULP_BITMAP_SET(params->act_bitmap.bits,
2477 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2478 return BNXT_TF_RC_SUCCESS;
2480 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2481 return BNXT_TF_RC_ERROR;
2484 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2486 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2487 struct ulp_rte_parser_params *params)
2489 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2491 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2493 vlan_pcp = action_item->conf;
2495 pcp = vlan_pcp->vlan_pcp;
2496 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2497 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2498 /* Update the hdr_bitmap with vlan vid */
2499 ULP_BITMAP_SET(params->act_bitmap.bits,
2500 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2501 return BNXT_TF_RC_SUCCESS;
2503 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2504 return BNXT_TF_RC_ERROR;
2507 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2509 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2510 struct ulp_rte_parser_params *params)
2512 const struct rte_flow_action_set_ipv4 *set_ipv4;
2513 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2515 set_ipv4 = action_item->conf;
2517 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2518 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2519 /* Update the hdr_bitmap with set ipv4 src */
2520 ULP_BITMAP_SET(params->act_bitmap.bits,
2521 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2522 return BNXT_TF_RC_SUCCESS;
2524 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2525 return BNXT_TF_RC_ERROR;
2528 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2530 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2531 struct ulp_rte_parser_params *params)
2533 const struct rte_flow_action_set_ipv4 *set_ipv4;
2534 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2536 set_ipv4 = action_item->conf;
2538 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2539 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2540 /* Update the hdr_bitmap with set ipv4 dst */
2541 ULP_BITMAP_SET(params->act_bitmap.bits,
2542 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2543 return BNXT_TF_RC_SUCCESS;
2545 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2546 return BNXT_TF_RC_ERROR;
2549 /* Function to handle the parsing of RTE Flow action set tp src.*/
2551 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2552 struct ulp_rte_parser_params *params)
2554 const struct rte_flow_action_set_tp *set_tp;
2555 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2557 set_tp = action_item->conf;
2559 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2560 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2561 /* Update the hdr_bitmap with set tp src */
2562 ULP_BITMAP_SET(params->act_bitmap.bits,
2563 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2564 return BNXT_TF_RC_SUCCESS;
2567 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2568 return BNXT_TF_RC_ERROR;
2571 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2573 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2574 struct ulp_rte_parser_params *params)
2576 const struct rte_flow_action_set_tp *set_tp;
2577 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2579 set_tp = action_item->conf;
2581 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2582 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2583 /* Update the hdr_bitmap with set tp dst */
2584 ULP_BITMAP_SET(params->act_bitmap.bits,
2585 BNXT_ULP_ACT_BIT_SET_TP_DST);
2586 return BNXT_TF_RC_SUCCESS;
2589 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2590 return BNXT_TF_RC_ERROR;
2593 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2595 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2596 struct ulp_rte_parser_params *params)
2598 /* Update the act_bitmap with dec ttl */
2599 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2600 return BNXT_TF_RC_SUCCESS;
2603 /* Function to handle the parsing of RTE Flow action JUMP */
2605 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2606 struct ulp_rte_parser_params *params)
2608 /* Update the act_bitmap with dec ttl */
2609 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2610 return BNXT_TF_RC_SUCCESS;
2614 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2615 struct ulp_rte_parser_params *params)
2617 const struct rte_flow_action_sample *sample;
2620 sample = action_item->conf;
2622 /* if SAMPLE bit is set it means this sample action is nested within the
2623 * actions of another sample action; this is not allowed
2625 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2626 BNXT_ULP_ACT_BIT_SAMPLE))
2627 return BNXT_TF_RC_ERROR;
2629 /* a sample action is only allowed as a shared action */
2630 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2631 BNXT_ULP_ACT_BIT_SHARED))
2632 return BNXT_TF_RC_ERROR;
2634 /* only a ratio of 1 i.e. 100% is supported */
2635 if (sample->ratio != 1)
2636 return BNXT_TF_RC_ERROR;
2638 if (!sample->actions)
2639 return BNXT_TF_RC_ERROR;
2641 /* parse the nested actions for a sample action */
2642 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2643 if (ret == BNXT_TF_RC_SUCCESS)
2644 /* Update the act_bitmap with sample */
2645 ULP_BITMAP_SET(params->act_bitmap.bits,
2646 BNXT_ULP_ACT_BIT_SAMPLE);
2651 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2653 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2654 struct ulp_rte_parser_params *params)
2656 /* Set the F1 flow header bit */
2657 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2658 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2661 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2663 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2664 struct ulp_rte_parser_params *params)
2667 /* Set the F2 flow header bit */
2668 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2669 return ulp_rte_vxlan_decap_act_handler(NULL, params);