1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (typeof(item->type))
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
404 enum bnxt_ulp_direction_type item_dir)
407 enum bnxt_ulp_direction_type dir;
408 struct ulp_rte_hdr_field *hdr_field;
409 enum bnxt_ulp_svif_type svif_type;
410 enum bnxt_ulp_intf_type port_type;
412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 BNXT_ULP_INVALID_SVIF_VAL) {
415 "SVIF already set,multiple source not support'd\n");
416 return BNXT_TF_RC_ERROR;
419 /* Get port type details */
420 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
421 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
422 BNXT_TF_DBG(ERR, "Invalid port type\n");
423 return BNXT_TF_RC_ERROR;
426 /* Update the match port type */
427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
429 /* compute the direction */
430 bnxt_ulp_rte_parser_direction_compute(params);
432 /* Get the computed direction */
433 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
434 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
435 if (dir == BNXT_ULP_DIR_INGRESS &&
436 port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
437 svif_type = BNXT_ULP_PHY_PORT_SVIF;
439 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
440 item_dir != BNXT_ULP_DIR_EGRESS)
441 svif_type = BNXT_ULP_VF_FUNC_SVIF;
443 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
445 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
447 svif = rte_cpu_to_be_16(svif);
448 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
449 memcpy(hdr_field->spec, &svif, sizeof(svif));
450 memcpy(hdr_field->mask, &mask, sizeof(mask));
451 hdr_field->size = sizeof(svif);
452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
453 rte_be_to_cpu_16(svif));
454 return BNXT_TF_RC_SUCCESS;
457 /* Function to handle the parsing of the RTE port id */
459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
461 uint16_t port_id = 0;
462 uint16_t svif_mask = 0xFFFF;
464 int32_t rc = BNXT_TF_RC_ERROR;
466 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
467 BNXT_ULP_INVALID_SVIF_VAL)
468 return BNXT_TF_RC_SUCCESS;
470 /* SVIF not set. So get the port id */
471 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
473 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
476 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
480 /* Update the SVIF details */
481 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
482 BNXT_ULP_DIR_INVALID);
486 /* Function to handle the implicit action port id */
488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
490 struct rte_flow_action action_item = {0};
491 struct rte_flow_action_port_id port_id = {0};
493 /* Read the action port set bit */
494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
495 /* Already set, so just exit */
496 return BNXT_TF_RC_SUCCESS;
498 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
499 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
500 action_item.conf = &port_id;
502 /* Update the action port based on incoming port */
503 ulp_rte_port_act_handler(&action_item, params);
505 /* Reset the action port set bit */
506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
507 return BNXT_TF_RC_SUCCESS;
510 /* Function to handle the parsing of RTE Flow item PF Header. */
512 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
513 struct ulp_rte_parser_params *params)
515 uint16_t port_id = 0;
516 uint16_t svif_mask = 0xFFFF;
519 /* Get the implicit port id */
520 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
522 /* perform the conversion from dpdk port to bnxt ifindex */
523 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
526 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
527 return BNXT_TF_RC_ERROR;
530 /* Update the SVIF details */
531 return ulp_rte_parser_svif_set(params, ifindex, svif_mask,
532 BNXT_ULP_DIR_INVALID);
535 /* Function to handle the parsing of RTE Flow item VF Header. */
537 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
538 struct ulp_rte_parser_params *params)
540 const struct rte_flow_item_vf *vf_spec = item->spec;
541 const struct rte_flow_item_vf *vf_mask = item->mask;
544 int32_t rc = BNXT_TF_RC_PARSE_ERR;
546 /* Get VF rte_flow_item for Port details */
548 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
552 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
557 /* perform the conversion from VF Func id to bnxt ifindex */
558 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
561 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
564 /* Update the SVIF details */
565 return ulp_rte_parser_svif_set(params, ifindex, mask,
566 BNXT_ULP_DIR_INVALID);
569 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
571 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
572 struct ulp_rte_parser_params *params)
574 enum bnxt_ulp_direction_type item_dir;
577 int32_t rc = BNXT_TF_RC_PARSE_ERR;
581 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
585 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
589 switch (item->type) {
590 case RTE_FLOW_ITEM_TYPE_PORT_ID: {
591 const struct rte_flow_item_port_id *port_spec = item->spec;
592 const struct rte_flow_item_port_id *port_mask = item->mask;
594 item_dir = BNXT_ULP_DIR_INVALID;
595 ethdev_id = port_spec->id;
596 mask = port_mask->id;
599 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
600 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
601 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
603 item_dir = BNXT_ULP_DIR_INGRESS;
604 ethdev_id = ethdev_spec->port_id;
605 mask = ethdev_mask->port_id;
608 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
609 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
610 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
612 item_dir = BNXT_ULP_DIR_EGRESS;
613 ethdev_id = ethdev_spec->port_id;
614 mask = ethdev_mask->port_id;
618 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
622 /* perform the conversion from dpdk port to bnxt ifindex */
623 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
626 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
629 /* Update the SVIF details */
630 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
633 /* Function to handle the parsing of RTE Flow item phy port Header. */
635 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
636 struct ulp_rte_parser_params *params)
638 const struct rte_flow_item_phy_port *port_spec = item->spec;
639 const struct rte_flow_item_phy_port *port_mask = item->mask;
641 int32_t rc = BNXT_TF_RC_ERROR;
643 enum bnxt_ulp_direction_type dir;
644 struct ulp_rte_hdr_field *hdr_field;
646 /* Copy the rte_flow_item for phy port into hdr_field */
648 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
652 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
655 mask = port_mask->index;
657 /* Update the match port type */
658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
659 BNXT_ULP_INTF_TYPE_PHY_PORT);
661 /* Compute the Hw direction */
662 bnxt_ulp_rte_parser_direction_compute(params);
664 /* Direction validation */
665 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
666 if (dir == BNXT_ULP_DIR_EGRESS) {
668 "Parse Err:Phy ports are valid only for ingress\n");
669 return BNXT_TF_RC_PARSE_ERR;
672 /* Get the physical port details from port db */
673 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
676 BNXT_TF_DBG(ERR, "Failed to get port details\n");
677 return BNXT_TF_RC_PARSE_ERR;
680 /* Update the SVIF details */
681 svif = rte_cpu_to_be_16(svif);
682 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
683 memcpy(hdr_field->spec, &svif, sizeof(svif));
684 memcpy(hdr_field->mask, &mask, sizeof(mask));
685 hdr_field->size = sizeof(svif);
686 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
687 rte_be_to_cpu_16(svif));
688 return BNXT_TF_RC_SUCCESS;
691 /* Function to handle the update of proto header based on field values */
693 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
694 uint16_t type, uint32_t in_flag)
696 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
698 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
699 BNXT_ULP_HDR_BIT_I_IPV4);
700 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
702 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
703 BNXT_ULP_HDR_BIT_O_IPV4);
704 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
706 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
708 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
709 BNXT_ULP_HDR_BIT_I_IPV6);
710 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
712 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
713 BNXT_ULP_HDR_BIT_O_IPV6);
714 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
719 /* Internal Function to identify broadcast or multicast packets */
721 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
723 if (rte_is_multicast_ether_addr(eth_addr) ||
724 rte_is_broadcast_ether_addr(eth_addr)) {
726 "No support for bcast or mcast addr offload\n");
732 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
734 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
735 struct ulp_rte_parser_params *params)
737 const struct rte_flow_item_eth *eth_spec = item->spec;
738 const struct rte_flow_item_eth *eth_mask = item->mask;
739 uint32_t idx = 0, dmac_idx = 0;
741 uint16_t eth_type = 0;
742 uint32_t inner_flag = 0;
744 /* Perform validations */
746 /* Todo: work around to avoid multicast and broadcast addr */
747 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
748 return BNXT_TF_RC_PARSE_ERR;
750 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
751 return BNXT_TF_RC_PARSE_ERR;
753 eth_type = eth_spec->type;
756 if (ulp_rte_prsr_fld_size_validate(params, &idx,
757 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
758 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
759 return BNXT_TF_RC_ERROR;
762 * Copy the rte_flow_item for eth into hdr_field using ethernet
766 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
767 ulp_rte_prsr_fld_mask(params, &idx, size,
768 ulp_deference_struct(eth_spec, dst.addr_bytes),
769 ulp_deference_struct(eth_mask, dst.addr_bytes),
770 ULP_PRSR_ACT_DEFAULT);
772 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
773 ulp_rte_prsr_fld_mask(params, &idx, size,
774 ulp_deference_struct(eth_spec, src.addr_bytes),
775 ulp_deference_struct(eth_mask, src.addr_bytes),
776 ULP_PRSR_ACT_DEFAULT);
778 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
779 ulp_rte_prsr_fld_mask(params, &idx, size,
780 ulp_deference_struct(eth_spec, type),
781 ulp_deference_struct(eth_mask, type),
782 ULP_PRSR_ACT_MATCH_IGNORE);
784 /* Update the protocol hdr bitmap */
785 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
786 BNXT_ULP_HDR_BIT_O_ETH) ||
787 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
788 BNXT_ULP_HDR_BIT_O_IPV4) ||
789 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
790 BNXT_ULP_HDR_BIT_O_IPV6) ||
791 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
792 BNXT_ULP_HDR_BIT_O_UDP) ||
793 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
794 BNXT_ULP_HDR_BIT_O_TCP)) {
795 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
798 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
799 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
802 /* Update the field protocol hdr bitmap */
803 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
805 return BNXT_TF_RC_SUCCESS;
808 /* Function to handle the parsing of RTE Flow item Vlan Header. */
810 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
811 struct ulp_rte_parser_params *params)
813 const struct rte_flow_item_vlan *vlan_spec = item->spec;
814 const struct rte_flow_item_vlan *vlan_mask = item->mask;
815 struct ulp_rte_hdr_bitmap *hdr_bit;
817 uint16_t vlan_tag = 0, priority = 0;
818 uint16_t vlan_tag_mask = 0, priority_mask = 0;
819 uint32_t outer_vtag_num;
820 uint32_t inner_vtag_num;
821 uint16_t eth_type = 0;
822 uint32_t inner_flag = 0;
826 vlan_tag = ntohs(vlan_spec->tci);
827 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
828 vlan_tag &= ULP_VLAN_TAG_MASK;
829 vlan_tag = htons(vlan_tag);
830 eth_type = vlan_spec->inner_type;
834 vlan_tag_mask = ntohs(vlan_mask->tci);
835 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
836 vlan_tag_mask &= 0xfff;
839 * the storage for priority and vlan tag is 2 bytes
840 * The mask of priority which is 3 bits if it is all 1's
841 * then make the rest bits 13 bits as 1's
842 * so that it is matched as exact match.
844 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
845 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
846 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
847 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
848 vlan_tag_mask = htons(vlan_tag_mask);
851 if (ulp_rte_prsr_fld_size_validate(params, &idx,
852 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
853 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
854 return BNXT_TF_RC_ERROR;
858 * Copy the rte_flow_item for vlan into hdr_field using Vlan
861 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
863 * The priority field is ignored since OVS is setting it as
864 * wild card match and it is not supported. This is a work
865 * around and shall be addressed in the future.
867 ulp_rte_prsr_fld_mask(params, &idx, size,
869 (vlan_mask) ? &priority_mask : NULL,
870 ULP_PRSR_ACT_MASK_IGNORE);
872 ulp_rte_prsr_fld_mask(params, &idx, size,
874 (vlan_mask) ? &vlan_tag_mask : NULL,
875 ULP_PRSR_ACT_DEFAULT);
877 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
878 ulp_rte_prsr_fld_mask(params, &idx, size,
879 ulp_deference_struct(vlan_spec, inner_type),
880 ulp_deference_struct(vlan_mask, inner_type),
881 ULP_PRSR_ACT_MATCH_IGNORE);
883 /* Get the outer tag and inner tag counts */
884 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
885 BNXT_ULP_CF_IDX_O_VTAG_NUM);
886 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
887 BNXT_ULP_CF_IDX_I_VTAG_NUM);
889 /* Update the hdr_bitmap of the vlans */
890 hdr_bit = ¶ms->hdr_bitmap;
891 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
892 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
894 /* Update the vlan tag num */
896 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
898 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
899 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
900 ULP_BITMAP_SET(params->hdr_bitmap.bits,
901 BNXT_ULP_HDR_BIT_OO_VLAN);
902 if (vlan_mask && vlan_tag_mask)
903 ULP_COMP_FLD_IDX_WR(params,
904 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
906 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
907 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
908 outer_vtag_num == 1) {
909 /* update the vlan tag num */
911 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
914 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
915 ULP_BITMAP_SET(params->hdr_bitmap.bits,
916 BNXT_ULP_HDR_BIT_OI_VLAN);
917 if (vlan_mask && vlan_tag_mask)
918 ULP_COMP_FLD_IDX_WR(params,
919 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
921 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
922 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
924 /* update the vlan tag num */
926 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
928 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
929 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
930 ULP_BITMAP_SET(params->hdr_bitmap.bits,
931 BNXT_ULP_HDR_BIT_IO_VLAN);
932 if (vlan_mask && vlan_tag_mask)
933 ULP_COMP_FLD_IDX_WR(params,
934 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
936 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
937 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
938 inner_vtag_num == 1) {
939 /* update the vlan tag num */
941 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
943 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
944 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
945 ULP_BITMAP_SET(params->hdr_bitmap.bits,
946 BNXT_ULP_HDR_BIT_II_VLAN);
947 if (vlan_mask && vlan_tag_mask)
948 ULP_COMP_FLD_IDX_WR(params,
949 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
952 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
953 return BNXT_TF_RC_ERROR;
955 /* Update the field protocol hdr bitmap */
956 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
957 return BNXT_TF_RC_SUCCESS;
960 /* Function to handle the update of proto header based on field values */
962 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
963 uint8_t proto, uint32_t in_flag)
965 if (proto == IPPROTO_UDP) {
967 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
968 BNXT_ULP_HDR_BIT_I_UDP);
969 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
971 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
972 BNXT_ULP_HDR_BIT_O_UDP);
973 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
975 } else if (proto == IPPROTO_TCP) {
977 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
978 BNXT_ULP_HDR_BIT_I_TCP);
979 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
981 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
982 BNXT_ULP_HDR_BIT_O_TCP);
983 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
985 } else if (proto == IPPROTO_GRE) {
986 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
987 } else if (proto == IPPROTO_ICMP) {
988 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
989 ULP_BITMAP_SET(param->hdr_bitmap.bits,
990 BNXT_ULP_HDR_BIT_I_ICMP);
992 ULP_BITMAP_SET(param->hdr_bitmap.bits,
993 BNXT_ULP_HDR_BIT_O_ICMP);
997 ULP_COMP_FLD_IDX_WR(param,
998 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1000 ULP_COMP_FLD_IDX_WR(param,
1001 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1004 ULP_COMP_FLD_IDX_WR(param,
1005 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1007 ULP_COMP_FLD_IDX_WR(param,
1008 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1014 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1016 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1017 struct ulp_rte_parser_params *params)
1019 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1020 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1021 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1022 uint32_t idx = 0, dip_idx = 0;
1025 uint32_t inner_flag = 0;
1028 /* validate there are no 3rd L3 header */
1029 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1031 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1032 return BNXT_TF_RC_ERROR;
1035 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1036 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
1037 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1038 return BNXT_TF_RC_ERROR;
1042 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1045 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1046 ulp_rte_prsr_fld_mask(params, &idx, size,
1047 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1048 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1049 ULP_PRSR_ACT_DEFAULT);
1052 * The tos field is ignored since OVS is setting it as wild card
1053 * match and it is not supported. This is a work around and
1054 * shall be addressed in the future.
1056 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1057 ulp_rte_prsr_fld_mask(params, &idx, size,
1058 ulp_deference_struct(ipv4_spec,
1059 hdr.type_of_service),
1060 ulp_deference_struct(ipv4_mask,
1061 hdr.type_of_service),
1062 ULP_PRSR_ACT_MASK_IGNORE);
1064 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1065 ulp_rte_prsr_fld_mask(params, &idx, size,
1066 ulp_deference_struct(ipv4_spec, hdr.total_length),
1067 ulp_deference_struct(ipv4_mask, hdr.total_length),
1068 ULP_PRSR_ACT_DEFAULT);
1070 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1071 ulp_rte_prsr_fld_mask(params, &idx, size,
1072 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1073 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1074 ULP_PRSR_ACT_DEFAULT);
1076 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1077 ulp_rte_prsr_fld_mask(params, &idx, size,
1078 ulp_deference_struct(ipv4_spec,
1079 hdr.fragment_offset),
1080 ulp_deference_struct(ipv4_mask,
1081 hdr.fragment_offset),
1082 ULP_PRSR_ACT_DEFAULT);
1084 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1085 ulp_rte_prsr_fld_mask(params, &idx, size,
1086 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1087 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1088 ULP_PRSR_ACT_DEFAULT);
1090 /* Ignore proto for matching templates */
1091 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1092 ulp_rte_prsr_fld_mask(params, &idx, size,
1093 ulp_deference_struct(ipv4_spec,
1095 ulp_deference_struct(ipv4_mask,
1097 ULP_PRSR_ACT_MATCH_IGNORE);
1099 proto = ipv4_spec->hdr.next_proto_id;
1101 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1102 ulp_rte_prsr_fld_mask(params, &idx, size,
1103 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1104 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1105 ULP_PRSR_ACT_DEFAULT);
1107 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1108 ulp_rte_prsr_fld_mask(params, &idx, size,
1109 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1110 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1111 ULP_PRSR_ACT_DEFAULT);
1114 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1115 ulp_rte_prsr_fld_mask(params, &idx, size,
1116 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1117 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1118 ULP_PRSR_ACT_DEFAULT);
1120 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1121 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1122 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1123 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1124 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1125 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1128 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1129 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1130 /* Update the tunnel offload dest ip offset */
1131 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1135 /* Some of the PMD applications may set the protocol field
1136 * in the IPv4 spec but don't set the mask. So, consider
1137 * the mask in the proto value calculation.
1140 proto &= ipv4_mask->hdr.next_proto_id;
1142 /* Update the field protocol hdr bitmap */
1143 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1144 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1145 return BNXT_TF_RC_SUCCESS;
1148 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1150 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1151 struct ulp_rte_parser_params *params)
1153 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1154 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1155 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1156 uint32_t idx = 0, dip_idx = 0;
1158 uint32_t ver_spec = 0, ver_mask = 0;
1159 uint32_t tc_spec = 0, tc_mask = 0;
1160 uint32_t lab_spec = 0, lab_mask = 0;
1162 uint32_t inner_flag = 0;
1165 /* validate there are no 3rd L3 header */
1166 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1168 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1169 return BNXT_TF_RC_ERROR;
1172 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1173 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1174 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1175 return BNXT_TF_RC_ERROR;
1179 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1183 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1184 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1185 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1186 proto = ipv6_spec->hdr.proto;
1190 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1191 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1192 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1194 /* Some of the PMD applications may set the protocol field
1195 * in the IPv6 spec but don't set the mask. So, consider
1196 * the mask in proto value calculation.
1198 proto &= ipv6_mask->hdr.proto;
1201 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1202 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1203 ULP_PRSR_ACT_DEFAULT);
1205 * The TC and flow label field are ignored since OVS is
1206 * setting it for match and it is not supported.
1207 * This is a work around and
1208 * shall be addressed in the future.
1210 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1211 ULP_PRSR_ACT_MASK_IGNORE);
1212 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1213 ULP_PRSR_ACT_MASK_IGNORE);
1215 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1216 ulp_rte_prsr_fld_mask(params, &idx, size,
1217 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1218 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1219 ULP_PRSR_ACT_DEFAULT);
1221 /* Ignore proto for template matching */
1222 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1223 ulp_rte_prsr_fld_mask(params, &idx, size,
1224 ulp_deference_struct(ipv6_spec, hdr.proto),
1225 ulp_deference_struct(ipv6_mask, hdr.proto),
1226 ULP_PRSR_ACT_MATCH_IGNORE);
1228 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1229 ulp_rte_prsr_fld_mask(params, &idx, size,
1230 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1231 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1232 ULP_PRSR_ACT_DEFAULT);
1234 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1235 ulp_rte_prsr_fld_mask(params, &idx, size,
1236 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1237 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1238 ULP_PRSR_ACT_DEFAULT);
1241 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1242 ulp_rte_prsr_fld_mask(params, &idx, size,
1243 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1244 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1245 ULP_PRSR_ACT_DEFAULT);
1247 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1248 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1249 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1250 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1251 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1255 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1256 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1257 /* Update the tunnel offload dest ip offset */
1258 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1262 /* Update the field protocol hdr bitmap */
1263 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1264 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1266 return BNXT_TF_RC_SUCCESS;
1269 /* Function to handle the update of proto header based on field values */
1271 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1272 uint16_t src_port, uint16_t src_mask,
1273 uint16_t dst_port, uint16_t dst_mask,
1274 enum bnxt_ulp_hdr_bit hdr_bit)
1277 case BNXT_ULP_HDR_BIT_I_UDP:
1278 case BNXT_ULP_HDR_BIT_I_TCP:
1279 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1282 (uint64_t)rte_be_to_cpu_16(src_port));
1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1284 (uint64_t)rte_be_to_cpu_16(dst_port));
1285 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1286 (uint64_t)rte_be_to_cpu_16(src_mask));
1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1288 (uint64_t)rte_be_to_cpu_16(dst_mask));
1289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1291 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1292 !!(src_port & src_mask));
1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1294 !!(dst_port & dst_mask));
1295 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1296 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1297 IPPROTO_UDP : IPPROTO_TCP);
1299 case BNXT_ULP_HDR_BIT_O_UDP:
1300 case BNXT_ULP_HDR_BIT_O_TCP:
1301 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1302 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1304 (uint64_t)rte_be_to_cpu_16(src_port));
1305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1306 (uint64_t)rte_be_to_cpu_16(dst_port));
1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1308 (uint64_t)rte_be_to_cpu_16(src_mask));
1309 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1310 (uint64_t)rte_be_to_cpu_16(dst_mask));
1311 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1314 !!(src_port & src_mask));
1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1316 !!(dst_port & dst_mask));
1317 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1318 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1319 IPPROTO_UDP : IPPROTO_TCP);
1325 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1326 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1327 ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1328 BNXT_ULP_HDR_BIT_T_VXLAN);
1329 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1333 /* Function to handle the parsing of RTE Flow item UDP Header. */
1335 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1336 struct ulp_rte_parser_params *params)
1338 const struct rte_flow_item_udp *udp_spec = item->spec;
1339 const struct rte_flow_item_udp *udp_mask = item->mask;
1340 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1343 uint16_t dport = 0, sport = 0;
1344 uint16_t dport_mask = 0, sport_mask = 0;
1346 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1348 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1350 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1351 return BNXT_TF_RC_ERROR;
1355 sport = udp_spec->hdr.src_port;
1356 dport = udp_spec->hdr.dst_port;
1359 sport_mask = udp_mask->hdr.src_port;
1360 dport_mask = udp_mask->hdr.dst_port;
1363 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1364 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1365 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1366 return BNXT_TF_RC_ERROR;
1370 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1373 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1374 ulp_rte_prsr_fld_mask(params, &idx, size,
1375 ulp_deference_struct(udp_spec, hdr.src_port),
1376 ulp_deference_struct(udp_mask, hdr.src_port),
1377 ULP_PRSR_ACT_DEFAULT);
1379 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1380 ulp_rte_prsr_fld_mask(params, &idx, size,
1381 ulp_deference_struct(udp_spec, hdr.dst_port),
1382 ulp_deference_struct(udp_mask, hdr.dst_port),
1383 ULP_PRSR_ACT_DEFAULT);
1385 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1386 ulp_rte_prsr_fld_mask(params, &idx, size,
1387 ulp_deference_struct(udp_spec, hdr.dgram_len),
1388 ulp_deference_struct(udp_mask, hdr.dgram_len),
1389 ULP_PRSR_ACT_DEFAULT);
1391 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1392 ulp_rte_prsr_fld_mask(params, &idx, size,
1393 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1394 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1395 ULP_PRSR_ACT_DEFAULT);
1397 /* Set the udp header bitmap and computed l4 header bitmaps */
1398 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1399 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1400 out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1402 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1403 dport_mask, out_l4);
1404 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1405 return BNXT_TF_RC_SUCCESS;
1408 /* Function to handle the parsing of RTE Flow item TCP Header. */
1410 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1411 struct ulp_rte_parser_params *params)
1413 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1414 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1415 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1417 uint16_t dport = 0, sport = 0;
1418 uint16_t dport_mask = 0, sport_mask = 0;
1421 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1423 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1425 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1426 return BNXT_TF_RC_ERROR;
1430 sport = tcp_spec->hdr.src_port;
1431 dport = tcp_spec->hdr.dst_port;
1434 sport_mask = tcp_mask->hdr.src_port;
1435 dport_mask = tcp_mask->hdr.dst_port;
1438 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1439 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1440 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1441 return BNXT_TF_RC_ERROR;
1445 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1448 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1449 ulp_rte_prsr_fld_mask(params, &idx, size,
1450 ulp_deference_struct(tcp_spec, hdr.src_port),
1451 ulp_deference_struct(tcp_mask, hdr.src_port),
1452 ULP_PRSR_ACT_DEFAULT);
1454 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1455 ulp_rte_prsr_fld_mask(params, &idx, size,
1456 ulp_deference_struct(tcp_spec, hdr.dst_port),
1457 ulp_deference_struct(tcp_mask, hdr.dst_port),
1458 ULP_PRSR_ACT_DEFAULT);
1460 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1461 ulp_rte_prsr_fld_mask(params, &idx, size,
1462 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1463 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1464 ULP_PRSR_ACT_DEFAULT);
1466 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1467 ulp_rte_prsr_fld_mask(params, &idx, size,
1468 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1469 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1470 ULP_PRSR_ACT_DEFAULT);
1472 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1473 ulp_rte_prsr_fld_mask(params, &idx, size,
1474 ulp_deference_struct(tcp_spec, hdr.data_off),
1475 ulp_deference_struct(tcp_mask, hdr.data_off),
1476 ULP_PRSR_ACT_DEFAULT);
1478 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1479 ulp_rte_prsr_fld_mask(params, &idx, size,
1480 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1481 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1482 ULP_PRSR_ACT_DEFAULT);
1484 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1485 ulp_rte_prsr_fld_mask(params, &idx, size,
1486 ulp_deference_struct(tcp_spec, hdr.rx_win),
1487 ulp_deference_struct(tcp_mask, hdr.rx_win),
1488 ULP_PRSR_ACT_DEFAULT);
1490 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1491 ulp_rte_prsr_fld_mask(params, &idx, size,
1492 ulp_deference_struct(tcp_spec, hdr.cksum),
1493 ulp_deference_struct(tcp_mask, hdr.cksum),
1494 ULP_PRSR_ACT_DEFAULT);
1496 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1497 ulp_rte_prsr_fld_mask(params, &idx, size,
1498 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1499 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1500 ULP_PRSR_ACT_DEFAULT);
1502 /* Set the udp header bitmap and computed l4 header bitmaps */
1503 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1504 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1505 out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1507 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1508 dport_mask, out_l4);
1509 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1510 return BNXT_TF_RC_SUCCESS;
1513 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1515 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1516 struct ulp_rte_parser_params *params)
1518 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1519 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1520 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1524 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1525 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1526 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1527 return BNXT_TF_RC_ERROR;
1531 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1534 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1535 ulp_rte_prsr_fld_mask(params, &idx, size,
1536 ulp_deference_struct(vxlan_spec, flags),
1537 ulp_deference_struct(vxlan_mask, flags),
1538 ULP_PRSR_ACT_DEFAULT);
1540 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1541 ulp_rte_prsr_fld_mask(params, &idx, size,
1542 ulp_deference_struct(vxlan_spec, rsvd0),
1543 ulp_deference_struct(vxlan_mask, rsvd0),
1544 ULP_PRSR_ACT_DEFAULT);
1546 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1547 ulp_rte_prsr_fld_mask(params, &idx, size,
1548 ulp_deference_struct(vxlan_spec, vni),
1549 ulp_deference_struct(vxlan_mask, vni),
1550 ULP_PRSR_ACT_DEFAULT);
1552 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1553 ulp_rte_prsr_fld_mask(params, &idx, size,
1554 ulp_deference_struct(vxlan_spec, rsvd1),
1555 ulp_deference_struct(vxlan_mask, rsvd1),
1556 ULP_PRSR_ACT_DEFAULT);
1558 /* Update the hdr_bitmap with vxlan */
1559 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1560 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1561 return BNXT_TF_RC_SUCCESS;
1564 /* Function to handle the parsing of RTE Flow item GRE Header. */
1566 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1567 struct ulp_rte_parser_params *params)
1569 const struct rte_flow_item_gre *gre_spec = item->spec;
1570 const struct rte_flow_item_gre *gre_mask = item->mask;
1571 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1575 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1576 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1577 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1578 return BNXT_TF_RC_ERROR;
1581 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1582 ulp_rte_prsr_fld_mask(params, &idx, size,
1583 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1584 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1585 ULP_PRSR_ACT_DEFAULT);
1587 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1588 ulp_rte_prsr_fld_mask(params, &idx, size,
1589 ulp_deference_struct(gre_spec, protocol),
1590 ulp_deference_struct(gre_mask, protocol),
1591 ULP_PRSR_ACT_DEFAULT);
1593 /* Update the hdr_bitmap with GRE */
1594 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1595 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1596 return BNXT_TF_RC_SUCCESS;
1599 /* Function to handle the parsing of RTE Flow item ANY. */
1601 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1602 struct ulp_rte_parser_params *params __rte_unused)
1604 return BNXT_TF_RC_SUCCESS;
1607 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1609 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1610 struct ulp_rte_parser_params *params)
1612 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1613 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1614 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1618 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1619 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1620 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1621 return BNXT_TF_RC_ERROR;
1624 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1625 ulp_rte_prsr_fld_mask(params, &idx, size,
1626 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1627 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1628 ULP_PRSR_ACT_DEFAULT);
1630 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1631 ulp_rte_prsr_fld_mask(params, &idx, size,
1632 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1633 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1634 ULP_PRSR_ACT_DEFAULT);
1636 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1637 ulp_rte_prsr_fld_mask(params, &idx, size,
1638 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1639 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1640 ULP_PRSR_ACT_DEFAULT);
1642 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1643 ulp_rte_prsr_fld_mask(params, &idx, size,
1644 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1645 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1646 ULP_PRSR_ACT_DEFAULT);
1648 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1649 ulp_rte_prsr_fld_mask(params, &idx, size,
1650 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1651 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1652 ULP_PRSR_ACT_DEFAULT);
1654 /* Update the hdr_bitmap with ICMP */
1655 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1656 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1658 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1659 return BNXT_TF_RC_SUCCESS;
1662 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1664 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1665 struct ulp_rte_parser_params *params)
1667 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1668 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1669 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1673 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1674 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1675 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1676 return BNXT_TF_RC_ERROR;
1679 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1680 ulp_rte_prsr_fld_mask(params, &idx, size,
1681 ulp_deference_struct(icmp_spec, type),
1682 ulp_deference_struct(icmp_mask, type),
1683 ULP_PRSR_ACT_DEFAULT);
1685 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1686 ulp_rte_prsr_fld_mask(params, &idx, size,
1687 ulp_deference_struct(icmp_spec, code),
1688 ulp_deference_struct(icmp_mask, code),
1689 ULP_PRSR_ACT_DEFAULT);
1691 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1692 ulp_rte_prsr_fld_mask(params, &idx, size,
1693 ulp_deference_struct(icmp_spec, checksum),
1694 ulp_deference_struct(icmp_mask, checksum),
1695 ULP_PRSR_ACT_DEFAULT);
1697 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1698 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1699 return BNXT_TF_RC_ERROR;
1702 /* Update the hdr_bitmap with ICMP */
1703 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1704 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1706 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1707 return BNXT_TF_RC_SUCCESS;
1710 /* Function to handle the parsing of RTE Flow item void Header */
1712 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1713 struct ulp_rte_parser_params *params __rte_unused)
1715 return BNXT_TF_RC_SUCCESS;
1718 /* Function to handle the parsing of RTE Flow action void Header. */
1720 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1721 struct ulp_rte_parser_params *params __rte_unused)
1723 return BNXT_TF_RC_SUCCESS;
1726 /* Function to handle the parsing of RTE Flow action Mark Header. */
1728 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1729 struct ulp_rte_parser_params *param)
1731 const struct rte_flow_action_mark *mark;
1732 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1735 mark = action_item->conf;
1737 mark_id = tfp_cpu_to_be_32(mark->id);
1738 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1739 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1741 /* Update the hdr_bitmap with vxlan */
1742 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1743 return BNXT_TF_RC_SUCCESS;
1745 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1746 return BNXT_TF_RC_ERROR;
1749 /* Function to handle the parsing of RTE Flow action RSS Header. */
1751 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1752 struct ulp_rte_parser_params *param)
1754 const struct rte_flow_action_rss *rss;
1755 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1757 if (action_item == NULL || action_item->conf == NULL) {
1758 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1759 return BNXT_TF_RC_ERROR;
1762 rss = action_item->conf;
1763 /* Copy the rss into the specific action properties */
1764 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1765 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1766 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1767 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1768 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1769 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1771 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1772 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1773 return BNXT_TF_RC_ERROR;
1775 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1778 /* set the RSS action header bit */
1779 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1781 return BNXT_TF_RC_SUCCESS;
1784 /* Function to handle the parsing of RTE Flow item eth Header. */
1786 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1787 const struct rte_flow_item_eth *eth_spec)
1789 struct ulp_rte_hdr_field *field;
1792 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1793 size = sizeof(eth_spec->dst.addr_bytes);
1794 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1796 size = sizeof(eth_spec->src.addr_bytes);
1797 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1799 size = sizeof(eth_spec->type);
1800 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1802 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1805 /* Function to handle the parsing of RTE Flow item vlan Header. */
1807 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1808 const struct rte_flow_item_vlan *vlan_spec,
1811 struct ulp_rte_hdr_field *field;
1815 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1816 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1817 BNXT_ULP_HDR_BIT_OO_VLAN);
1819 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1820 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1821 BNXT_ULP_HDR_BIT_OI_VLAN);
1824 size = sizeof(vlan_spec->tci);
1825 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1827 size = sizeof(vlan_spec->inner_type);
1828 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1831 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1833 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1834 const struct rte_flow_item_ipv4 *ip)
1836 struct ulp_rte_hdr_field *field;
1840 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1841 size = sizeof(ip->hdr.version_ihl);
1842 if (!ip->hdr.version_ihl)
1843 val8 = RTE_IPV4_VHL_DEF;
1845 val8 = ip->hdr.version_ihl;
1846 field = ulp_rte_parser_fld_copy(field, &val8, size);
1848 size = sizeof(ip->hdr.type_of_service);
1849 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1851 size = sizeof(ip->hdr.packet_id);
1852 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1854 size = sizeof(ip->hdr.fragment_offset);
1855 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1857 size = sizeof(ip->hdr.time_to_live);
1858 if (!ip->hdr.time_to_live)
1859 val8 = BNXT_ULP_DEFAULT_TTL;
1861 val8 = ip->hdr.time_to_live;
1862 field = ulp_rte_parser_fld_copy(field, &val8, size);
1864 size = sizeof(ip->hdr.next_proto_id);
1865 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1867 size = sizeof(ip->hdr.src_addr);
1868 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1870 size = sizeof(ip->hdr.dst_addr);
1871 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1873 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1876 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1878 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1879 const struct rte_flow_item_ipv6 *ip)
1881 struct ulp_rte_hdr_field *field;
1886 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1887 size = sizeof(ip->hdr.vtc_flow);
1888 if (!ip->hdr.vtc_flow)
1889 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1891 val32 = ip->hdr.vtc_flow;
1892 field = ulp_rte_parser_fld_copy(field, &val32, size);
1894 size = sizeof(ip->hdr.proto);
1895 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1897 size = sizeof(ip->hdr.hop_limits);
1898 if (!ip->hdr.hop_limits)
1899 val8 = BNXT_ULP_DEFAULT_TTL;
1901 val8 = ip->hdr.hop_limits;
1902 field = ulp_rte_parser_fld_copy(field, &val8, size);
1904 size = sizeof(ip->hdr.src_addr);
1905 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1907 size = sizeof(ip->hdr.dst_addr);
1908 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1910 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1913 /* Function to handle the parsing of RTE Flow item UDP Header. */
1915 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1916 const struct rte_flow_item_udp *udp_spec)
1918 struct ulp_rte_hdr_field *field;
1920 uint8_t type = IPPROTO_UDP;
1922 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1923 size = sizeof(udp_spec->hdr.src_port);
1924 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1926 size = sizeof(udp_spec->hdr.dst_port);
1927 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1929 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1931 /* Update thhe ip header protocol */
1932 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1933 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1934 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1935 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1938 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1940 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1941 struct rte_flow_item_vxlan *vxlan_spec)
1943 struct ulp_rte_hdr_field *field;
1946 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1947 size = sizeof(vxlan_spec->flags);
1948 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1950 size = sizeof(vxlan_spec->rsvd0);
1951 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1953 size = sizeof(vxlan_spec->vni);
1954 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1956 size = sizeof(vxlan_spec->rsvd1);
1957 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1959 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1962 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1964 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1965 struct ulp_rte_parser_params *params)
1967 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1968 const struct rte_flow_item *item;
1969 const struct rte_flow_item_ipv4 *ipv4_spec;
1970 const struct rte_flow_item_ipv6 *ipv6_spec;
1971 struct rte_flow_item_vxlan vxlan_spec;
1972 uint32_t vlan_num = 0, vlan_size = 0;
1973 uint32_t ip_size = 0, ip_type = 0;
1974 uint32_t vxlan_size = 0;
1975 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1976 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1978 vxlan_encap = action_item->conf;
1980 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1981 return BNXT_TF_RC_ERROR;
1984 item = vxlan_encap->definition;
1986 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1987 return BNXT_TF_RC_ERROR;
1990 if (!ulp_rte_item_skip_void(&item, 0))
1991 return BNXT_TF_RC_ERROR;
1993 /* must have ethernet header */
1994 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1995 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1996 return BNXT_TF_RC_ERROR;
1999 /* Parse the ethernet header */
2001 ulp_rte_enc_eth_hdr_handler(params, item->spec);
2003 /* Goto the next item */
2004 if (!ulp_rte_item_skip_void(&item, 1))
2005 return BNXT_TF_RC_ERROR;
2007 /* May have vlan header */
2008 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2011 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2013 if (!ulp_rte_item_skip_void(&item, 1))
2014 return BNXT_TF_RC_ERROR;
2017 /* may have two vlan headers */
2018 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2021 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2023 if (!ulp_rte_item_skip_void(&item, 1))
2024 return BNXT_TF_RC_ERROR;
2027 /* Update the vlan count and size of more than one */
2029 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2030 vlan_num = tfp_cpu_to_be_32(vlan_num);
2031 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2034 vlan_size = tfp_cpu_to_be_32(vlan_size);
2035 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2040 /* L3 must be IPv4, IPv6 */
2041 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2042 ipv4_spec = item->spec;
2043 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2045 /* Update the ip size details */
2046 ip_size = tfp_cpu_to_be_32(ip_size);
2047 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2048 &ip_size, sizeof(uint32_t));
2050 /* update the ip type */
2051 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2052 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2053 &ip_type, sizeof(uint32_t));
2055 /* update the computed field to notify it is ipv4 header */
2056 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2059 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2061 if (!ulp_rte_item_skip_void(&item, 1))
2062 return BNXT_TF_RC_ERROR;
2063 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2064 ipv6_spec = item->spec;
2065 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2067 /* Update the ip size details */
2068 ip_size = tfp_cpu_to_be_32(ip_size);
2069 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2070 &ip_size, sizeof(uint32_t));
2072 /* update the ip type */
2073 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2074 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2075 &ip_type, sizeof(uint32_t));
2077 /* update the computed field to notify it is ipv6 header */
2078 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2081 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2083 if (!ulp_rte_item_skip_void(&item, 1))
2084 return BNXT_TF_RC_ERROR;
2086 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2087 return BNXT_TF_RC_ERROR;
2091 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2092 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2093 return BNXT_TF_RC_ERROR;
2096 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2098 if (!ulp_rte_item_skip_void(&item, 1))
2099 return BNXT_TF_RC_ERROR;
2102 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2103 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2104 return BNXT_TF_RC_ERROR;
2106 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2107 /* copy the vxlan details */
2108 memcpy(&vxlan_spec, item->spec, vxlan_size);
2109 vxlan_spec.flags = 0x08;
2110 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2111 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2112 &vxlan_size, sizeof(uint32_t));
2114 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2116 /* update the hdr_bitmap with vxlan */
2117 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2118 return BNXT_TF_RC_SUCCESS;
2121 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2123 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2125 struct ulp_rte_parser_params *params)
2127 /* update the hdr_bitmap with vxlan */
2128 ULP_BITMAP_SET(params->act_bitmap.bits,
2129 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2130 /* Update computational field with tunnel decap info */
2131 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2132 return BNXT_TF_RC_SUCCESS;
2135 /* Function to handle the parsing of RTE Flow action drop Header. */
2137 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2138 struct ulp_rte_parser_params *params)
2140 /* Update the hdr_bitmap with drop */
2141 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2142 return BNXT_TF_RC_SUCCESS;
2145 /* Function to handle the parsing of RTE Flow action count. */
2147 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2148 struct ulp_rte_parser_params *params)
2150 const struct rte_flow_action_count *act_count;
2151 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2153 act_count = action_item->conf;
2155 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2157 BNXT_ULP_ACT_PROP_SZ_COUNT);
2160 /* Update the hdr_bitmap with count */
2161 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2162 return BNXT_TF_RC_SUCCESS;
2165 /* Function to handle the parsing of action ports. */
2167 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2169 enum bnxt_ulp_direction_type act_dir)
2171 enum bnxt_ulp_direction_type dir;
2174 struct ulp_rte_act_prop *act = ¶m->act_prop;
2175 enum bnxt_ulp_intf_type port_type;
2178 /* Get the direction */
2179 /* If action implicitly specifies direction, use the specification. */
2180 dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2181 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2183 port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2184 if (dir == BNXT_ULP_DIR_EGRESS &&
2185 port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2186 /* For egress direction, fill vport */
2187 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2188 return BNXT_TF_RC_ERROR;
2191 pid = rte_cpu_to_be_32(pid);
2192 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2193 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2195 /* For ingress direction, fill vnic */
2197 * Action Destination
2198 * ------------------------------------
2199 * PORT_REPRESENTOR Driver Function
2200 * ------------------------------------
2201 * REPRESENTED_PORT VF
2202 * ------------------------------------
2205 if (act_dir != BNXT_ULP_DIR_INGRESS &&
2206 port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2207 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2209 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2211 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2213 return BNXT_TF_RC_ERROR;
2216 pid = rte_cpu_to_be_32(pid);
2217 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2218 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2221 /* Update the action port set bit */
2222 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2223 return BNXT_TF_RC_SUCCESS;
2226 /* Function to handle the parsing of RTE Flow action PF. */
2228 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2229 struct ulp_rte_parser_params *params)
2233 enum bnxt_ulp_intf_type intf_type;
2235 /* Get the port id of the current device */
2236 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2238 /* Get the port db ifindex */
2239 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2241 BNXT_TF_DBG(ERR, "Invalid port id\n");
2242 return BNXT_TF_RC_ERROR;
2245 /* Check the port is PF port */
2246 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2247 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2248 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2249 return BNXT_TF_RC_ERROR;
2251 /* Update the action properties */
2252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2253 return ulp_rte_parser_act_port_set(params, ifindex,
2254 BNXT_ULP_DIR_INVALID);
2257 /* Function to handle the parsing of RTE Flow action VF. */
2259 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2260 struct ulp_rte_parser_params *params)
2262 const struct rte_flow_action_vf *vf_action;
2263 enum bnxt_ulp_intf_type intf_type;
2267 vf_action = action_item->conf;
2269 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2270 return BNXT_TF_RC_PARSE_ERR;
2273 if (vf_action->original) {
2274 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2275 return BNXT_TF_RC_PARSE_ERR;
2278 bp = bnxt_pmd_get_bp(params->port_id);
2280 BNXT_TF_DBG(ERR, "Invalid bp\n");
2281 return BNXT_TF_RC_ERROR;
2284 /* vf_action->id is a logical number which in this case is an
2285 * offset from the first VF. So, to get the absolute VF id, the
2286 * offset must be added to the absolute first vf id of that port.
2288 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2292 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2293 return BNXT_TF_RC_ERROR;
2295 /* Check the port is VF port */
2296 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2297 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2298 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2299 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2300 return BNXT_TF_RC_ERROR;
2303 /* Update the action properties */
2304 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2305 return ulp_rte_parser_act_port_set(params, ifindex,
2306 BNXT_ULP_DIR_INVALID);
2309 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2311 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2312 struct ulp_rte_parser_params *param)
2316 enum bnxt_ulp_intf_type intf_type;
2317 enum bnxt_ulp_direction_type act_dir;
2319 if (!act_item->conf) {
2321 "ParseErr: Invalid Argument\n");
2322 return BNXT_TF_RC_PARSE_ERR;
2324 switch (act_item->type) {
2325 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2326 const struct rte_flow_action_port_id *port_id = act_item->conf;
2328 if (port_id->original) {
2330 "ParseErr:Portid Original not supported\n");
2331 return BNXT_TF_RC_PARSE_ERR;
2333 ethdev_id = port_id->id;
2334 act_dir = BNXT_ULP_DIR_INVALID;
2337 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2338 const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2340 ethdev_id = ethdev->port_id;
2341 act_dir = BNXT_ULP_DIR_INGRESS;
2344 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2345 const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2347 ethdev_id = ethdev->port_id;
2348 act_dir = BNXT_ULP_DIR_EGRESS;
2352 BNXT_TF_DBG(ERR, "Unknown port action\n");
2353 return BNXT_TF_RC_ERROR;
2356 /* Get the port db ifindex */
2357 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2359 BNXT_TF_DBG(ERR, "Invalid port id\n");
2360 return BNXT_TF_RC_ERROR;
2363 /* Get the intf type */
2364 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2366 BNXT_TF_DBG(ERR, "Invalid port type\n");
2367 return BNXT_TF_RC_ERROR;
2370 /* Set the action port */
2371 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2372 return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2375 /* Function to handle the parsing of RTE Flow action phy_port. */
2377 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2378 struct ulp_rte_parser_params *prm)
2380 const struct rte_flow_action_phy_port *phy_port;
2384 enum bnxt_ulp_direction_type dir;
2386 phy_port = action_item->conf;
2389 "ParseErr: Invalid Argument\n");
2390 return BNXT_TF_RC_PARSE_ERR;
2393 if (phy_port->original) {
2395 "Parse Err:Port Original not supported\n");
2396 return BNXT_TF_RC_PARSE_ERR;
2398 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2399 if (dir != BNXT_ULP_DIR_EGRESS) {
2401 "Parse Err:Phy ports are valid only for egress\n");
2402 return BNXT_TF_RC_PARSE_ERR;
2404 /* Get the physical port details from port db */
2405 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2408 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2413 pid = rte_cpu_to_be_32(pid);
2414 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2415 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2417 /* Update the action port set bit */
2418 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2419 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2420 BNXT_ULP_INTF_TYPE_PHY_PORT);
2421 return BNXT_TF_RC_SUCCESS;
2424 /* Function to handle the parsing of RTE Flow action pop vlan. */
2426 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2427 struct ulp_rte_parser_params *params)
2429 /* Update the act_bitmap with pop */
2430 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2431 return BNXT_TF_RC_SUCCESS;
2434 /* Function to handle the parsing of RTE Flow action push vlan. */
2436 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2437 struct ulp_rte_parser_params *params)
2439 const struct rte_flow_action_of_push_vlan *push_vlan;
2441 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2443 push_vlan = action_item->conf;
2445 ethertype = push_vlan->ethertype;
2446 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2448 "Parse Err: Ethertype not supported\n");
2449 return BNXT_TF_RC_PARSE_ERR;
2451 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2452 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2453 /* Update the hdr_bitmap with push vlan */
2454 ULP_BITMAP_SET(params->act_bitmap.bits,
2455 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2456 return BNXT_TF_RC_SUCCESS;
2458 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2459 return BNXT_TF_RC_ERROR;
2462 /* Function to handle the parsing of RTE Flow action set vlan id. */
2464 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2465 struct ulp_rte_parser_params *params)
2467 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2469 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2471 vlan_vid = action_item->conf;
2472 if (vlan_vid && vlan_vid->vlan_vid) {
2473 vid = vlan_vid->vlan_vid;
2474 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2475 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2476 /* Update the hdr_bitmap with vlan vid */
2477 ULP_BITMAP_SET(params->act_bitmap.bits,
2478 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2479 return BNXT_TF_RC_SUCCESS;
2481 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2482 return BNXT_TF_RC_ERROR;
2485 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2487 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2488 struct ulp_rte_parser_params *params)
2490 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2492 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2494 vlan_pcp = action_item->conf;
2496 pcp = vlan_pcp->vlan_pcp;
2497 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2498 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2499 /* Update the hdr_bitmap with vlan vid */
2500 ULP_BITMAP_SET(params->act_bitmap.bits,
2501 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2502 return BNXT_TF_RC_SUCCESS;
2504 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2505 return BNXT_TF_RC_ERROR;
2508 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2510 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2511 struct ulp_rte_parser_params *params)
2513 const struct rte_flow_action_set_ipv4 *set_ipv4;
2514 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2516 set_ipv4 = action_item->conf;
2518 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2519 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2520 /* Update the hdr_bitmap with set ipv4 src */
2521 ULP_BITMAP_SET(params->act_bitmap.bits,
2522 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2523 return BNXT_TF_RC_SUCCESS;
2525 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2526 return BNXT_TF_RC_ERROR;
2529 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2531 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2532 struct ulp_rte_parser_params *params)
2534 const struct rte_flow_action_set_ipv4 *set_ipv4;
2535 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2537 set_ipv4 = action_item->conf;
2539 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2540 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2541 /* Update the hdr_bitmap with set ipv4 dst */
2542 ULP_BITMAP_SET(params->act_bitmap.bits,
2543 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2544 return BNXT_TF_RC_SUCCESS;
2546 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2547 return BNXT_TF_RC_ERROR;
2550 /* Function to handle the parsing of RTE Flow action set tp src.*/
2552 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2553 struct ulp_rte_parser_params *params)
2555 const struct rte_flow_action_set_tp *set_tp;
2556 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2558 set_tp = action_item->conf;
2560 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2561 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2562 /* Update the hdr_bitmap with set tp src */
2563 ULP_BITMAP_SET(params->act_bitmap.bits,
2564 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2565 return BNXT_TF_RC_SUCCESS;
2568 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2569 return BNXT_TF_RC_ERROR;
2572 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2574 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2575 struct ulp_rte_parser_params *params)
2577 const struct rte_flow_action_set_tp *set_tp;
2578 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2580 set_tp = action_item->conf;
2582 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2583 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2584 /* Update the hdr_bitmap with set tp dst */
2585 ULP_BITMAP_SET(params->act_bitmap.bits,
2586 BNXT_ULP_ACT_BIT_SET_TP_DST);
2587 return BNXT_TF_RC_SUCCESS;
2590 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2591 return BNXT_TF_RC_ERROR;
2594 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2596 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2597 struct ulp_rte_parser_params *params)
2599 /* Update the act_bitmap with dec ttl */
2600 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2601 return BNXT_TF_RC_SUCCESS;
2604 /* Function to handle the parsing of RTE Flow action JUMP */
2606 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2607 struct ulp_rte_parser_params *params)
2609 /* Update the act_bitmap with dec ttl */
2610 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2611 return BNXT_TF_RC_SUCCESS;
2615 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2616 struct ulp_rte_parser_params *params)
2618 const struct rte_flow_action_sample *sample;
2621 sample = action_item->conf;
2623 /* if SAMPLE bit is set it means this sample action is nested within the
2624 * actions of another sample action; this is not allowed
2626 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2627 BNXT_ULP_ACT_BIT_SAMPLE))
2628 return BNXT_TF_RC_ERROR;
2630 /* a sample action is only allowed as a shared action */
2631 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2632 BNXT_ULP_ACT_BIT_SHARED))
2633 return BNXT_TF_RC_ERROR;
2635 /* only a ratio of 1 i.e. 100% is supported */
2636 if (sample->ratio != 1)
2637 return BNXT_TF_RC_ERROR;
2639 if (!sample->actions)
2640 return BNXT_TF_RC_ERROR;
2642 /* parse the nested actions for a sample action */
2643 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2644 if (ret == BNXT_TF_RC_SUCCESS)
2645 /* Update the act_bitmap with sample */
2646 ULP_BITMAP_SET(params->act_bitmap.bits,
2647 BNXT_ULP_ACT_BIT_SAMPLE);
2652 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2654 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2655 struct ulp_rte_parser_params *params)
2657 /* Set the F1 flow header bit */
2658 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2659 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2662 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2664 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2665 struct ulp_rte_parser_params *params)
2668 /* Set the F2 flow header bit */
2669 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2670 return ulp_rte_vxlan_decap_act_handler(NULL, params);