1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
50 memcpy(field->spec, buffer, field->size);
55 /* Utility function to update the field_bitmap */
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 enum bnxt_ulp_prsr_action prsr_act)
61 struct ulp_rte_hdr_field *field;
63 field = ¶ms->hdr_field[idx];
64 if (ulp_bitmap_notzero(field->mask, field->size)) {
65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 if (!ulp_bitmap_is_ones(field->mask, field->size))
70 ULP_COMP_FLD_IDX_WR(params,
71 BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
83 const void *spec_buff,
84 const void *mask_buff,
85 enum bnxt_ulp_prsr_action prsr_act)
87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
89 /* update the field size */
92 /* copy the mask specifications only if mask is not null */
93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 memcpy(field->mask, mask_buff, size);
95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
98 /* copy the protocol specifications only if mask is not null*/
99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 memcpy(field->spec, spec_buff, size);
102 /* Increment the index */
106 /* Utility function to copy field spec and masks items */
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
116 *idx = params->field_idx;
117 params->field_idx += size;
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow items into the ulp structures.
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_item *item = pattern;
130 struct bnxt_ulp_rte_hdr_info *hdr_info;
132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 /* Set the computed flags for no vlan tags before parsing */
135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
138 /* Parse all the items in the pattern */
139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 if (item->type >= (uint32_t)
141 BNXT_RTE_FLOW_ITEM_TYPE_END) {
143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 goto hdr_parser_error;
145 /* get the header information */
146 hdr_info = &ulp_vendor_hdr_info[item->type -
147 BNXT_RTE_FLOW_ITEM_TYPE_END];
149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 goto hdr_parser_error;
151 hdr_info = &ulp_hdr_info[item->type];
153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 goto hdr_parser_error;
155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 /* call the registered callback handler */
157 if (hdr_info->proto_hdr_func) {
158 if (hdr_info->proto_hdr_func(item, params) !=
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied SVIF */
167 return ulp_rte_parser_implicit_match_port_process(params);
170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
172 return BNXT_TF_RC_PARSE_ERR;
176 * Function to handle the parsing of RTE Flows and placing
177 * the RTE flow actions into the ulp structures.
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 struct ulp_rte_parser_params *params)
183 const struct rte_flow_action *action_item = actions;
184 struct bnxt_ulp_rte_act_info *hdr_info;
186 /* Parse all the items in the pattern */
187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 if (action_item->type >=
189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 if (action_item->type >=
191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 goto act_parser_error;
193 /* get the header information from bnxt actinfo table */
194 hdr_info = &ulp_vendor_act_info[action_item->type -
195 BNXT_RTE_FLOW_ACTION_TYPE_END];
197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 goto act_parser_error;
199 /* get the header information from the act info table */
200 hdr_info = &ulp_act_info[action_item->type];
202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 goto act_parser_error;
204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 /* call the registered callback handler */
206 if (hdr_info->proto_act_func) {
207 if (hdr_info->proto_act_func(action_item,
209 BNXT_TF_RC_SUCCESS) {
210 return BNXT_TF_RC_ERROR;
216 /* update the implied port details */
217 ulp_rte_parser_implicit_act_port_process(params);
218 return BNXT_TF_RC_SUCCESS;
221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
223 return BNXT_TF_RC_ERROR;
227 * Function to handle the post processing of the computed
228 * fields for the interface.
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
234 uint16_t port_id, parif;
236 enum bnxt_ulp_direction_type dir;
238 /* get the direction details */
239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
241 /* read the port id details */
242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
250 if (dir == BNXT_ULP_DIR_INGRESS) {
252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
260 /* Get the match port type */
261 mtype = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 ULP_COMP_FLD_IDX_WR(params,
265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
267 /* Set VF func PARIF */
268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 BNXT_ULP_VF_FUNC_PARIF,
272 "ParseErr:ifindex is not valid\n");
275 ULP_COMP_FLD_IDX_WR(params,
276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
280 /* Set DRV func PARIF */
281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 BNXT_ULP_DRV_FUNC_PARIF,
285 "ParseErr:ifindex is not valid\n");
288 ULP_COMP_FLD_IDX_WR(params,
289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
292 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
303 enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 enum bnxt_ulp_direction_type dir;
305 uint32_t act_port_set;
307 /* Get the computed details */
308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
316 /* set the flow direction in the proto and action header */
317 if (dir == BNXT_ULP_DIR_EGRESS) {
318 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 ULP_BITMAP_SET(params->act_bitmap.bits,
321 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
324 /* calculate the VF to VF flag */
325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
329 /* Update the decrement ttl computational fields */
330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 BNXT_ULP_ACT_BIT_DEC_TTL)) {
333 * Check that vxlan proto is included and vxlan decap
334 * action is not set then decrement tunnel ttl.
335 * Similarly add GRE and NVGRE in future.
337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 ULP_COMP_FLD_IDX_WR(params,
342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
344 ULP_COMP_FLD_IDX_WR(params,
345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
349 /* Merge the hdr_fp_bit into the proto header bit */
350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
352 /* Update the comp fld fid */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
355 /* Update the computed interface parameters */
356 bnxt_ulp_comp_fld_intf_update(params);
358 /* TBD: Handle the flow rejection scenarios */
363 * Function to handle the post processing of the parsing details
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
368 ulp_post_process_normal_flow(params);
372 * Function to compute the flow direction based on the match port details
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
377 enum bnxt_ulp_intf_type match_port_type;
379 /* Get the match port type */
380 match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
383 /* If ingress flow and matchport is vf rep then dir is egress*/
384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 BNXT_ULP_DIR_EGRESS);
389 /* Assign the input direction */
390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 BNXT_ULP_DIR_INGRESS);
394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 BNXT_ULP_DIR_EGRESS);
399 /* Function to handle the parsing of RTE Flow item PF Header. */
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
404 enum bnxt_ulp_direction_type item_dir)
407 enum bnxt_ulp_direction_type dir;
408 struct ulp_rte_hdr_field *hdr_field;
409 enum bnxt_ulp_svif_type svif_type;
410 enum bnxt_ulp_intf_type port_type;
412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 BNXT_ULP_INVALID_SVIF_VAL) {
415 "SVIF already set,multiple source not support'd\n");
416 return BNXT_TF_RC_ERROR;
419 /* Get port type details */
420 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
421 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
422 BNXT_TF_DBG(ERR, "Invalid port type\n");
423 return BNXT_TF_RC_ERROR;
426 /* Update the match port type */
427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
429 /* compute the direction */
430 bnxt_ulp_rte_parser_direction_compute(params);
432 /* Get the computed direction */
433 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
434 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
435 if (dir == BNXT_ULP_DIR_INGRESS &&
436 port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
437 svif_type = BNXT_ULP_PHY_PORT_SVIF;
439 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
440 item_dir != BNXT_ULP_DIR_EGRESS)
441 svif_type = BNXT_ULP_VF_FUNC_SVIF;
443 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
445 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
447 svif = rte_cpu_to_be_16(svif);
448 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
449 memcpy(hdr_field->spec, &svif, sizeof(svif));
450 memcpy(hdr_field->mask, &mask, sizeof(mask));
451 hdr_field->size = sizeof(svif);
452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
453 rte_be_to_cpu_16(svif));
454 return BNXT_TF_RC_SUCCESS;
457 /* Function to handle the parsing of the RTE port id */
459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
461 uint16_t port_id = 0;
462 uint16_t svif_mask = 0xFFFF;
464 int32_t rc = BNXT_TF_RC_ERROR;
466 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
467 BNXT_ULP_INVALID_SVIF_VAL)
468 return BNXT_TF_RC_SUCCESS;
470 /* SVIF not set. So get the port id */
471 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
473 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
476 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
480 /* Update the SVIF details */
481 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
482 BNXT_ULP_DIR_INVALID);
486 /* Function to handle the implicit action port id */
488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
490 struct rte_flow_action action_item = {0};
491 struct rte_flow_action_port_id port_id = {0};
493 /* Read the action port set bit */
494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
495 /* Already set, so just exit */
496 return BNXT_TF_RC_SUCCESS;
498 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
499 action_item.conf = &port_id;
501 /* Update the action port based on incoming port */
502 ulp_rte_port_id_act_handler(&action_item, params);
504 /* Reset the action port set bit */
505 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
506 return BNXT_TF_RC_SUCCESS;
509 /* Function to handle the parsing of RTE Flow item PF Header. */
511 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
512 struct ulp_rte_parser_params *params)
514 uint16_t port_id = 0;
515 uint16_t svif_mask = 0xFFFF;
518 /* Get the implicit port id */
519 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
521 /* perform the conversion from dpdk port to bnxt ifindex */
522 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
525 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
526 return BNXT_TF_RC_ERROR;
529 /* Update the SVIF details */
530 return ulp_rte_parser_svif_set(params, ifindex, svif_mask,
531 BNXT_ULP_DIR_INVALID);
534 /* Function to handle the parsing of RTE Flow item VF Header. */
536 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
537 struct ulp_rte_parser_params *params)
539 const struct rte_flow_item_vf *vf_spec = item->spec;
540 const struct rte_flow_item_vf *vf_mask = item->mask;
543 int32_t rc = BNXT_TF_RC_PARSE_ERR;
545 /* Get VF rte_flow_item for Port details */
547 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
551 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
556 /* perform the conversion from VF Func id to bnxt ifindex */
557 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
560 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
563 /* Update the SVIF details */
564 return ulp_rte_parser_svif_set(params, ifindex, mask,
565 BNXT_ULP_DIR_INVALID);
568 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
570 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
571 struct ulp_rte_parser_params *params)
573 enum bnxt_ulp_direction_type item_dir;
576 int32_t rc = BNXT_TF_RC_PARSE_ERR;
580 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
584 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
588 switch (item->type) {
589 case RTE_FLOW_ITEM_TYPE_PORT_ID: {
590 const struct rte_flow_item_port_id *port_spec = item->spec;
591 const struct rte_flow_item_port_id *port_mask = item->mask;
593 item_dir = BNXT_ULP_DIR_INVALID;
594 ethdev_id = port_spec->id;
595 mask = port_mask->id;
598 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
599 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
600 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
602 item_dir = BNXT_ULP_DIR_INGRESS;
603 ethdev_id = ethdev_spec->port_id;
604 mask = ethdev_mask->port_id;
607 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
608 const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
609 const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
611 item_dir = BNXT_ULP_DIR_EGRESS;
612 ethdev_id = ethdev_spec->port_id;
613 mask = ethdev_mask->port_id;
617 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
621 /* perform the conversion from dpdk port to bnxt ifindex */
622 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
625 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
628 /* Update the SVIF details */
629 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
632 /* Function to handle the parsing of RTE Flow item phy port Header. */
634 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
635 struct ulp_rte_parser_params *params)
637 const struct rte_flow_item_phy_port *port_spec = item->spec;
638 const struct rte_flow_item_phy_port *port_mask = item->mask;
640 int32_t rc = BNXT_TF_RC_ERROR;
642 enum bnxt_ulp_direction_type dir;
643 struct ulp_rte_hdr_field *hdr_field;
645 /* Copy the rte_flow_item for phy port into hdr_field */
647 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
651 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
654 mask = port_mask->index;
656 /* Update the match port type */
657 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
658 BNXT_ULP_INTF_TYPE_PHY_PORT);
660 /* Compute the Hw direction */
661 bnxt_ulp_rte_parser_direction_compute(params);
663 /* Direction validation */
664 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
665 if (dir == BNXT_ULP_DIR_EGRESS) {
667 "Parse Err:Phy ports are valid only for ingress\n");
668 return BNXT_TF_RC_PARSE_ERR;
671 /* Get the physical port details from port db */
672 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
675 BNXT_TF_DBG(ERR, "Failed to get port details\n");
676 return BNXT_TF_RC_PARSE_ERR;
679 /* Update the SVIF details */
680 svif = rte_cpu_to_be_16(svif);
681 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
682 memcpy(hdr_field->spec, &svif, sizeof(svif));
683 memcpy(hdr_field->mask, &mask, sizeof(mask));
684 hdr_field->size = sizeof(svif);
685 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
686 rte_be_to_cpu_16(svif));
687 return BNXT_TF_RC_SUCCESS;
690 /* Function to handle the update of proto header based on field values */
692 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
693 uint16_t type, uint32_t in_flag)
695 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
697 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
698 BNXT_ULP_HDR_BIT_I_IPV4);
699 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
701 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
702 BNXT_ULP_HDR_BIT_O_IPV4);
703 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
705 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
707 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
708 BNXT_ULP_HDR_BIT_I_IPV6);
709 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
711 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
712 BNXT_ULP_HDR_BIT_O_IPV6);
713 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
718 /* Internal Function to identify broadcast or multicast packets */
720 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
722 if (rte_is_multicast_ether_addr(eth_addr) ||
723 rte_is_broadcast_ether_addr(eth_addr)) {
725 "No support for bcast or mcast addr offload\n");
731 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
733 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
734 struct ulp_rte_parser_params *params)
736 const struct rte_flow_item_eth *eth_spec = item->spec;
737 const struct rte_flow_item_eth *eth_mask = item->mask;
738 uint32_t idx = 0, dmac_idx = 0;
740 uint16_t eth_type = 0;
741 uint32_t inner_flag = 0;
743 /* Perform validations */
745 /* Todo: work around to avoid multicast and broadcast addr */
746 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
747 return BNXT_TF_RC_PARSE_ERR;
749 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
750 return BNXT_TF_RC_PARSE_ERR;
752 eth_type = eth_spec->type;
755 if (ulp_rte_prsr_fld_size_validate(params, &idx,
756 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
757 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
758 return BNXT_TF_RC_ERROR;
761 * Copy the rte_flow_item for eth into hdr_field using ethernet
765 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
766 ulp_rte_prsr_fld_mask(params, &idx, size,
767 ulp_deference_struct(eth_spec, dst.addr_bytes),
768 ulp_deference_struct(eth_mask, dst.addr_bytes),
769 ULP_PRSR_ACT_DEFAULT);
771 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
772 ulp_rte_prsr_fld_mask(params, &idx, size,
773 ulp_deference_struct(eth_spec, src.addr_bytes),
774 ulp_deference_struct(eth_mask, src.addr_bytes),
775 ULP_PRSR_ACT_DEFAULT);
777 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
778 ulp_rte_prsr_fld_mask(params, &idx, size,
779 ulp_deference_struct(eth_spec, type),
780 ulp_deference_struct(eth_mask, type),
781 ULP_PRSR_ACT_MATCH_IGNORE);
783 /* Update the protocol hdr bitmap */
784 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
785 BNXT_ULP_HDR_BIT_O_ETH) ||
786 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
787 BNXT_ULP_HDR_BIT_O_IPV4) ||
788 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
789 BNXT_ULP_HDR_BIT_O_IPV6) ||
790 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
791 BNXT_ULP_HDR_BIT_O_UDP) ||
792 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
793 BNXT_ULP_HDR_BIT_O_TCP)) {
794 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
797 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
798 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
801 /* Update the field protocol hdr bitmap */
802 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
804 return BNXT_TF_RC_SUCCESS;
807 /* Function to handle the parsing of RTE Flow item Vlan Header. */
809 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
810 struct ulp_rte_parser_params *params)
812 const struct rte_flow_item_vlan *vlan_spec = item->spec;
813 const struct rte_flow_item_vlan *vlan_mask = item->mask;
814 struct ulp_rte_hdr_bitmap *hdr_bit;
816 uint16_t vlan_tag = 0, priority = 0;
817 uint16_t vlan_tag_mask = 0, priority_mask = 0;
818 uint32_t outer_vtag_num;
819 uint32_t inner_vtag_num;
820 uint16_t eth_type = 0;
821 uint32_t inner_flag = 0;
825 vlan_tag = ntohs(vlan_spec->tci);
826 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
827 vlan_tag &= ULP_VLAN_TAG_MASK;
828 vlan_tag = htons(vlan_tag);
829 eth_type = vlan_spec->inner_type;
833 vlan_tag_mask = ntohs(vlan_mask->tci);
834 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
835 vlan_tag_mask &= 0xfff;
838 * the storage for priority and vlan tag is 2 bytes
839 * The mask of priority which is 3 bits if it is all 1's
840 * then make the rest bits 13 bits as 1's
841 * so that it is matched as exact match.
843 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
844 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
845 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
846 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
847 vlan_tag_mask = htons(vlan_tag_mask);
850 if (ulp_rte_prsr_fld_size_validate(params, &idx,
851 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
852 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
853 return BNXT_TF_RC_ERROR;
857 * Copy the rte_flow_item for vlan into hdr_field using Vlan
860 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
862 * The priority field is ignored since OVS is setting it as
863 * wild card match and it is not supported. This is a work
864 * around and shall be addressed in the future.
866 ulp_rte_prsr_fld_mask(params, &idx, size,
868 (vlan_mask) ? &priority_mask : NULL,
869 ULP_PRSR_ACT_MASK_IGNORE);
871 ulp_rte_prsr_fld_mask(params, &idx, size,
873 (vlan_mask) ? &vlan_tag_mask : NULL,
874 ULP_PRSR_ACT_DEFAULT);
876 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
877 ulp_rte_prsr_fld_mask(params, &idx, size,
878 ulp_deference_struct(vlan_spec, inner_type),
879 ulp_deference_struct(vlan_mask, inner_type),
880 ULP_PRSR_ACT_MATCH_IGNORE);
882 /* Get the outer tag and inner tag counts */
883 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
884 BNXT_ULP_CF_IDX_O_VTAG_NUM);
885 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
886 BNXT_ULP_CF_IDX_I_VTAG_NUM);
888 /* Update the hdr_bitmap of the vlans */
889 hdr_bit = ¶ms->hdr_bitmap;
890 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
891 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
893 /* Update the vlan tag num */
895 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
897 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
898 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
899 ULP_BITMAP_SET(params->hdr_bitmap.bits,
900 BNXT_ULP_HDR_BIT_OO_VLAN);
901 if (vlan_mask && vlan_tag_mask)
902 ULP_COMP_FLD_IDX_WR(params,
903 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
905 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
906 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
907 outer_vtag_num == 1) {
908 /* update the vlan tag num */
910 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
912 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
914 ULP_BITMAP_SET(params->hdr_bitmap.bits,
915 BNXT_ULP_HDR_BIT_OI_VLAN);
916 if (vlan_mask && vlan_tag_mask)
917 ULP_COMP_FLD_IDX_WR(params,
918 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
920 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
921 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
923 /* update the vlan tag num */
925 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
927 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
928 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
929 ULP_BITMAP_SET(params->hdr_bitmap.bits,
930 BNXT_ULP_HDR_BIT_IO_VLAN);
931 if (vlan_mask && vlan_tag_mask)
932 ULP_COMP_FLD_IDX_WR(params,
933 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
935 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
936 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
937 inner_vtag_num == 1) {
938 /* update the vlan tag num */
940 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
942 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
943 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
944 ULP_BITMAP_SET(params->hdr_bitmap.bits,
945 BNXT_ULP_HDR_BIT_II_VLAN);
946 if (vlan_mask && vlan_tag_mask)
947 ULP_COMP_FLD_IDX_WR(params,
948 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
951 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
952 return BNXT_TF_RC_ERROR;
954 /* Update the field protocol hdr bitmap */
955 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
956 return BNXT_TF_RC_SUCCESS;
959 /* Function to handle the update of proto header based on field values */
961 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
962 uint8_t proto, uint32_t in_flag)
964 if (proto == IPPROTO_UDP) {
966 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
967 BNXT_ULP_HDR_BIT_I_UDP);
968 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
970 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
971 BNXT_ULP_HDR_BIT_O_UDP);
972 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
974 } else if (proto == IPPROTO_TCP) {
976 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
977 BNXT_ULP_HDR_BIT_I_TCP);
978 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
980 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
981 BNXT_ULP_HDR_BIT_O_TCP);
982 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
984 } else if (proto == IPPROTO_GRE) {
985 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
986 } else if (proto == IPPROTO_ICMP) {
987 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
988 ULP_BITMAP_SET(param->hdr_bitmap.bits,
989 BNXT_ULP_HDR_BIT_I_ICMP);
991 ULP_BITMAP_SET(param->hdr_bitmap.bits,
992 BNXT_ULP_HDR_BIT_O_ICMP);
996 ULP_COMP_FLD_IDX_WR(param,
997 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
999 ULP_COMP_FLD_IDX_WR(param,
1000 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1003 ULP_COMP_FLD_IDX_WR(param,
1004 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1006 ULP_COMP_FLD_IDX_WR(param,
1007 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1013 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1015 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1016 struct ulp_rte_parser_params *params)
1018 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1019 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1020 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1021 uint32_t idx = 0, dip_idx = 0;
1024 uint32_t inner_flag = 0;
1027 /* validate there are no 3rd L3 header */
1028 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1030 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1031 return BNXT_TF_RC_ERROR;
1034 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1035 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
1036 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1037 return BNXT_TF_RC_ERROR;
1041 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1044 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1045 ulp_rte_prsr_fld_mask(params, &idx, size,
1046 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1047 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1048 ULP_PRSR_ACT_DEFAULT);
1051 * The tos field is ignored since OVS is setting it as wild card
1052 * match and it is not supported. This is a work around and
1053 * shall be addressed in the future.
1055 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1056 ulp_rte_prsr_fld_mask(params, &idx, size,
1057 ulp_deference_struct(ipv4_spec,
1058 hdr.type_of_service),
1059 ulp_deference_struct(ipv4_mask,
1060 hdr.type_of_service),
1061 ULP_PRSR_ACT_MASK_IGNORE);
1063 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1064 ulp_rte_prsr_fld_mask(params, &idx, size,
1065 ulp_deference_struct(ipv4_spec, hdr.total_length),
1066 ulp_deference_struct(ipv4_mask, hdr.total_length),
1067 ULP_PRSR_ACT_DEFAULT);
1069 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1070 ulp_rte_prsr_fld_mask(params, &idx, size,
1071 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1072 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1073 ULP_PRSR_ACT_DEFAULT);
1075 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1076 ulp_rte_prsr_fld_mask(params, &idx, size,
1077 ulp_deference_struct(ipv4_spec,
1078 hdr.fragment_offset),
1079 ulp_deference_struct(ipv4_mask,
1080 hdr.fragment_offset),
1081 ULP_PRSR_ACT_DEFAULT);
1083 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1084 ulp_rte_prsr_fld_mask(params, &idx, size,
1085 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1086 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1087 ULP_PRSR_ACT_DEFAULT);
1089 /* Ignore proto for matching templates */
1090 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1091 ulp_rte_prsr_fld_mask(params, &idx, size,
1092 ulp_deference_struct(ipv4_spec,
1094 ulp_deference_struct(ipv4_mask,
1096 ULP_PRSR_ACT_MATCH_IGNORE);
1098 proto = ipv4_spec->hdr.next_proto_id;
1100 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1101 ulp_rte_prsr_fld_mask(params, &idx, size,
1102 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1103 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1104 ULP_PRSR_ACT_DEFAULT);
1106 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1107 ulp_rte_prsr_fld_mask(params, &idx, size,
1108 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1109 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1110 ULP_PRSR_ACT_DEFAULT);
1113 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1114 ulp_rte_prsr_fld_mask(params, &idx, size,
1115 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1116 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1117 ULP_PRSR_ACT_DEFAULT);
1119 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1120 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1121 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1122 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1126 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1127 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1128 /* Update the tunnel offload dest ip offset */
1129 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1133 /* Some of the PMD applications may set the protocol field
1134 * in the IPv4 spec but don't set the mask. So, consider
1135 * the mask in the proto value calculation.
1138 proto &= ipv4_mask->hdr.next_proto_id;
1140 /* Update the field protocol hdr bitmap */
1141 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1142 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1143 return BNXT_TF_RC_SUCCESS;
1146 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1148 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1149 struct ulp_rte_parser_params *params)
1151 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1152 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1153 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1154 uint32_t idx = 0, dip_idx = 0;
1156 uint32_t ver_spec = 0, ver_mask = 0;
1157 uint32_t tc_spec = 0, tc_mask = 0;
1158 uint32_t lab_spec = 0, lab_mask = 0;
1160 uint32_t inner_flag = 0;
1163 /* validate there are no 3rd L3 header */
1164 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1166 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1167 return BNXT_TF_RC_ERROR;
1170 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1171 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1172 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1173 return BNXT_TF_RC_ERROR;
1177 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1181 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1182 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1183 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1184 proto = ipv6_spec->hdr.proto;
1188 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1189 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1190 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1192 /* Some of the PMD applications may set the protocol field
1193 * in the IPv6 spec but don't set the mask. So, consider
1194 * the mask in proto value calculation.
1196 proto &= ipv6_mask->hdr.proto;
1199 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1200 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1201 ULP_PRSR_ACT_DEFAULT);
1203 * The TC and flow label field are ignored since OVS is
1204 * setting it for match and it is not supported.
1205 * This is a work around and
1206 * shall be addressed in the future.
1208 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1209 ULP_PRSR_ACT_MASK_IGNORE);
1210 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1211 ULP_PRSR_ACT_MASK_IGNORE);
1213 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1214 ulp_rte_prsr_fld_mask(params, &idx, size,
1215 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1216 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1217 ULP_PRSR_ACT_DEFAULT);
1219 /* Ignore proto for template matching */
1220 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1221 ulp_rte_prsr_fld_mask(params, &idx, size,
1222 ulp_deference_struct(ipv6_spec, hdr.proto),
1223 ulp_deference_struct(ipv6_mask, hdr.proto),
1224 ULP_PRSR_ACT_MATCH_IGNORE);
1226 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1227 ulp_rte_prsr_fld_mask(params, &idx, size,
1228 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1229 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1230 ULP_PRSR_ACT_DEFAULT);
1232 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1233 ulp_rte_prsr_fld_mask(params, &idx, size,
1234 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1235 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1236 ULP_PRSR_ACT_DEFAULT);
1239 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1240 ulp_rte_prsr_fld_mask(params, &idx, size,
1241 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1242 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1243 ULP_PRSR_ACT_DEFAULT);
1245 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1246 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1247 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1248 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1249 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1252 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1254 /* Update the tunnel offload dest ip offset */
1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1259 /* Update the field protocol hdr bitmap */
1260 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1261 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1263 return BNXT_TF_RC_SUCCESS;
1266 /* Function to handle the update of proto header based on field values */
1268 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1269 uint16_t src_port, uint16_t src_mask,
1270 uint16_t dst_port, uint16_t dst_mask,
1271 enum bnxt_ulp_hdr_bit hdr_bit)
1274 case BNXT_ULP_HDR_BIT_I_UDP:
1275 case BNXT_ULP_HDR_BIT_I_TCP:
1276 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1277 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1279 (uint64_t)rte_be_to_cpu_16(src_port));
1280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1281 (uint64_t)rte_be_to_cpu_16(dst_port));
1282 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1283 (uint64_t)rte_be_to_cpu_16(src_mask));
1284 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1285 (uint64_t)rte_be_to_cpu_16(dst_mask));
1286 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1288 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1289 !!(src_port & src_mask));
1290 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1291 !!(dst_port & dst_mask));
1292 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1293 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1294 IPPROTO_UDP : IPPROTO_TCP);
1296 case BNXT_ULP_HDR_BIT_O_UDP:
1297 case BNXT_ULP_HDR_BIT_O_TCP:
1298 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1301 (uint64_t)rte_be_to_cpu_16(src_port));
1302 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1303 (uint64_t)rte_be_to_cpu_16(dst_port));
1304 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1305 (uint64_t)rte_be_to_cpu_16(src_mask));
1306 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1307 (uint64_t)rte_be_to_cpu_16(dst_mask));
1308 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1310 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1311 !!(src_port & src_mask));
1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1313 !!(dst_port & dst_mask));
1314 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1315 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1316 IPPROTO_UDP : IPPROTO_TCP);
1322 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1323 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1324 ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1325 BNXT_ULP_HDR_BIT_T_VXLAN);
1326 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1330 /* Function to handle the parsing of RTE Flow item UDP Header. */
1332 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1333 struct ulp_rte_parser_params *params)
1335 const struct rte_flow_item_udp *udp_spec = item->spec;
1336 const struct rte_flow_item_udp *udp_mask = item->mask;
1337 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1340 uint16_t dport = 0, sport = 0;
1341 uint16_t dport_mask = 0, sport_mask = 0;
1343 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1345 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1347 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1348 return BNXT_TF_RC_ERROR;
1352 sport = udp_spec->hdr.src_port;
1353 dport = udp_spec->hdr.dst_port;
1356 sport_mask = udp_mask->hdr.src_port;
1357 dport_mask = udp_mask->hdr.dst_port;
1360 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1361 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1362 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1363 return BNXT_TF_RC_ERROR;
1367 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1370 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1371 ulp_rte_prsr_fld_mask(params, &idx, size,
1372 ulp_deference_struct(udp_spec, hdr.src_port),
1373 ulp_deference_struct(udp_mask, hdr.src_port),
1374 ULP_PRSR_ACT_DEFAULT);
1376 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1377 ulp_rte_prsr_fld_mask(params, &idx, size,
1378 ulp_deference_struct(udp_spec, hdr.dst_port),
1379 ulp_deference_struct(udp_mask, hdr.dst_port),
1380 ULP_PRSR_ACT_DEFAULT);
1382 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1383 ulp_rte_prsr_fld_mask(params, &idx, size,
1384 ulp_deference_struct(udp_spec, hdr.dgram_len),
1385 ulp_deference_struct(udp_mask, hdr.dgram_len),
1386 ULP_PRSR_ACT_DEFAULT);
1388 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1389 ulp_rte_prsr_fld_mask(params, &idx, size,
1390 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1391 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1392 ULP_PRSR_ACT_DEFAULT);
1394 /* Set the udp header bitmap and computed l4 header bitmaps */
1395 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1396 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1397 out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1399 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1400 dport_mask, out_l4);
1401 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1402 return BNXT_TF_RC_SUCCESS;
1405 /* Function to handle the parsing of RTE Flow item TCP Header. */
1407 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1408 struct ulp_rte_parser_params *params)
1410 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1411 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1412 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1414 uint16_t dport = 0, sport = 0;
1415 uint16_t dport_mask = 0, sport_mask = 0;
1418 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1420 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1422 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1423 return BNXT_TF_RC_ERROR;
1427 sport = tcp_spec->hdr.src_port;
1428 dport = tcp_spec->hdr.dst_port;
1431 sport_mask = tcp_mask->hdr.src_port;
1432 dport_mask = tcp_mask->hdr.dst_port;
1435 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1436 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1437 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1438 return BNXT_TF_RC_ERROR;
1442 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1445 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1446 ulp_rte_prsr_fld_mask(params, &idx, size,
1447 ulp_deference_struct(tcp_spec, hdr.src_port),
1448 ulp_deference_struct(tcp_mask, hdr.src_port),
1449 ULP_PRSR_ACT_DEFAULT);
1451 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1452 ulp_rte_prsr_fld_mask(params, &idx, size,
1453 ulp_deference_struct(tcp_spec, hdr.dst_port),
1454 ulp_deference_struct(tcp_mask, hdr.dst_port),
1455 ULP_PRSR_ACT_DEFAULT);
1457 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1458 ulp_rte_prsr_fld_mask(params, &idx, size,
1459 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1460 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1461 ULP_PRSR_ACT_DEFAULT);
1463 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1464 ulp_rte_prsr_fld_mask(params, &idx, size,
1465 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1466 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1467 ULP_PRSR_ACT_DEFAULT);
1469 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1470 ulp_rte_prsr_fld_mask(params, &idx, size,
1471 ulp_deference_struct(tcp_spec, hdr.data_off),
1472 ulp_deference_struct(tcp_mask, hdr.data_off),
1473 ULP_PRSR_ACT_DEFAULT);
1475 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1476 ulp_rte_prsr_fld_mask(params, &idx, size,
1477 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1478 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1479 ULP_PRSR_ACT_DEFAULT);
1481 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1482 ulp_rte_prsr_fld_mask(params, &idx, size,
1483 ulp_deference_struct(tcp_spec, hdr.rx_win),
1484 ulp_deference_struct(tcp_mask, hdr.rx_win),
1485 ULP_PRSR_ACT_DEFAULT);
1487 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1488 ulp_rte_prsr_fld_mask(params, &idx, size,
1489 ulp_deference_struct(tcp_spec, hdr.cksum),
1490 ulp_deference_struct(tcp_mask, hdr.cksum),
1491 ULP_PRSR_ACT_DEFAULT);
1493 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1494 ulp_rte_prsr_fld_mask(params, &idx, size,
1495 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1496 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1497 ULP_PRSR_ACT_DEFAULT);
1499 /* Set the udp header bitmap and computed l4 header bitmaps */
1500 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1501 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1502 out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1504 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1505 dport_mask, out_l4);
1506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1507 return BNXT_TF_RC_SUCCESS;
1510 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1512 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1513 struct ulp_rte_parser_params *params)
1515 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1516 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1517 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1521 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1522 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1523 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1524 return BNXT_TF_RC_ERROR;
1528 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1531 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1532 ulp_rte_prsr_fld_mask(params, &idx, size,
1533 ulp_deference_struct(vxlan_spec, flags),
1534 ulp_deference_struct(vxlan_mask, flags),
1535 ULP_PRSR_ACT_DEFAULT);
1537 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1538 ulp_rte_prsr_fld_mask(params, &idx, size,
1539 ulp_deference_struct(vxlan_spec, rsvd0),
1540 ulp_deference_struct(vxlan_mask, rsvd0),
1541 ULP_PRSR_ACT_DEFAULT);
1543 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1544 ulp_rte_prsr_fld_mask(params, &idx, size,
1545 ulp_deference_struct(vxlan_spec, vni),
1546 ulp_deference_struct(vxlan_mask, vni),
1547 ULP_PRSR_ACT_DEFAULT);
1549 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1550 ulp_rte_prsr_fld_mask(params, &idx, size,
1551 ulp_deference_struct(vxlan_spec, rsvd1),
1552 ulp_deference_struct(vxlan_mask, rsvd1),
1553 ULP_PRSR_ACT_DEFAULT);
1555 /* Update the hdr_bitmap with vxlan */
1556 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1557 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1558 return BNXT_TF_RC_SUCCESS;
1561 /* Function to handle the parsing of RTE Flow item GRE Header. */
1563 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1564 struct ulp_rte_parser_params *params)
1566 const struct rte_flow_item_gre *gre_spec = item->spec;
1567 const struct rte_flow_item_gre *gre_mask = item->mask;
1568 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1572 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1573 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1574 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1575 return BNXT_TF_RC_ERROR;
1578 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1579 ulp_rte_prsr_fld_mask(params, &idx, size,
1580 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1581 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1582 ULP_PRSR_ACT_DEFAULT);
1584 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1585 ulp_rte_prsr_fld_mask(params, &idx, size,
1586 ulp_deference_struct(gre_spec, protocol),
1587 ulp_deference_struct(gre_mask, protocol),
1588 ULP_PRSR_ACT_DEFAULT);
1590 /* Update the hdr_bitmap with GRE */
1591 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1592 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1593 return BNXT_TF_RC_SUCCESS;
1596 /* Function to handle the parsing of RTE Flow item ANY. */
1598 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1599 struct ulp_rte_parser_params *params __rte_unused)
1601 return BNXT_TF_RC_SUCCESS;
1604 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1606 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1607 struct ulp_rte_parser_params *params)
1609 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1610 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1611 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1615 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1616 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1617 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1618 return BNXT_TF_RC_ERROR;
1621 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1622 ulp_rte_prsr_fld_mask(params, &idx, size,
1623 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1624 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1625 ULP_PRSR_ACT_DEFAULT);
1627 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1628 ulp_rte_prsr_fld_mask(params, &idx, size,
1629 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1630 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1631 ULP_PRSR_ACT_DEFAULT);
1633 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1634 ulp_rte_prsr_fld_mask(params, &idx, size,
1635 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1636 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1637 ULP_PRSR_ACT_DEFAULT);
1639 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1640 ulp_rte_prsr_fld_mask(params, &idx, size,
1641 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1642 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1643 ULP_PRSR_ACT_DEFAULT);
1645 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1646 ulp_rte_prsr_fld_mask(params, &idx, size,
1647 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1648 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1649 ULP_PRSR_ACT_DEFAULT);
1651 /* Update the hdr_bitmap with ICMP */
1652 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1653 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1655 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1656 return BNXT_TF_RC_SUCCESS;
1659 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1661 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1662 struct ulp_rte_parser_params *params)
1664 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1665 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1666 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1670 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1671 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1672 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1673 return BNXT_TF_RC_ERROR;
1676 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1677 ulp_rte_prsr_fld_mask(params, &idx, size,
1678 ulp_deference_struct(icmp_spec, type),
1679 ulp_deference_struct(icmp_mask, type),
1680 ULP_PRSR_ACT_DEFAULT);
1682 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1683 ulp_rte_prsr_fld_mask(params, &idx, size,
1684 ulp_deference_struct(icmp_spec, code),
1685 ulp_deference_struct(icmp_mask, code),
1686 ULP_PRSR_ACT_DEFAULT);
1688 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1689 ulp_rte_prsr_fld_mask(params, &idx, size,
1690 ulp_deference_struct(icmp_spec, checksum),
1691 ulp_deference_struct(icmp_mask, checksum),
1692 ULP_PRSR_ACT_DEFAULT);
1694 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1695 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1696 return BNXT_TF_RC_ERROR;
1699 /* Update the hdr_bitmap with ICMP */
1700 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1701 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1703 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1704 return BNXT_TF_RC_SUCCESS;
1707 /* Function to handle the parsing of RTE Flow item void Header */
1709 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1710 struct ulp_rte_parser_params *params __rte_unused)
1712 return BNXT_TF_RC_SUCCESS;
1715 /* Function to handle the parsing of RTE Flow action void Header. */
1717 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1718 struct ulp_rte_parser_params *params __rte_unused)
1720 return BNXT_TF_RC_SUCCESS;
1723 /* Function to handle the parsing of RTE Flow action Mark Header. */
1725 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1726 struct ulp_rte_parser_params *param)
1728 const struct rte_flow_action_mark *mark;
1729 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1732 mark = action_item->conf;
1734 mark_id = tfp_cpu_to_be_32(mark->id);
1735 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1736 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1738 /* Update the hdr_bitmap with vxlan */
1739 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1740 return BNXT_TF_RC_SUCCESS;
1742 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1743 return BNXT_TF_RC_ERROR;
1746 /* Function to handle the parsing of RTE Flow action RSS Header. */
1748 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1749 struct ulp_rte_parser_params *param)
1751 const struct rte_flow_action_rss *rss;
1752 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1754 if (action_item == NULL || action_item->conf == NULL) {
1755 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1756 return BNXT_TF_RC_ERROR;
1759 rss = action_item->conf;
1760 /* Copy the rss into the specific action properties */
1761 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1762 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1763 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1764 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1765 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1766 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1768 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1769 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1770 return BNXT_TF_RC_ERROR;
1772 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1775 /* set the RSS action header bit */
1776 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1778 return BNXT_TF_RC_SUCCESS;
1781 /* Function to handle the parsing of RTE Flow item eth Header. */
1783 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1784 const struct rte_flow_item_eth *eth_spec)
1786 struct ulp_rte_hdr_field *field;
1789 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1790 size = sizeof(eth_spec->dst.addr_bytes);
1791 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1793 size = sizeof(eth_spec->src.addr_bytes);
1794 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1796 size = sizeof(eth_spec->type);
1797 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
1799 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1802 /* Function to handle the parsing of RTE Flow item vlan Header. */
1804 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1805 const struct rte_flow_item_vlan *vlan_spec,
1808 struct ulp_rte_hdr_field *field;
1812 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1813 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1814 BNXT_ULP_HDR_BIT_OO_VLAN);
1816 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1817 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1818 BNXT_ULP_HDR_BIT_OI_VLAN);
1821 size = sizeof(vlan_spec->tci);
1822 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1824 size = sizeof(vlan_spec->inner_type);
1825 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1828 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1830 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1831 const struct rte_flow_item_ipv4 *ip)
1833 struct ulp_rte_hdr_field *field;
1837 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1838 size = sizeof(ip->hdr.version_ihl);
1839 if (!ip->hdr.version_ihl)
1840 val8 = RTE_IPV4_VHL_DEF;
1842 val8 = ip->hdr.version_ihl;
1843 field = ulp_rte_parser_fld_copy(field, &val8, size);
1845 size = sizeof(ip->hdr.type_of_service);
1846 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1848 size = sizeof(ip->hdr.packet_id);
1849 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1851 size = sizeof(ip->hdr.fragment_offset);
1852 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1854 size = sizeof(ip->hdr.time_to_live);
1855 if (!ip->hdr.time_to_live)
1856 val8 = BNXT_ULP_DEFAULT_TTL;
1858 val8 = ip->hdr.time_to_live;
1859 field = ulp_rte_parser_fld_copy(field, &val8, size);
1861 size = sizeof(ip->hdr.next_proto_id);
1862 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1864 size = sizeof(ip->hdr.src_addr);
1865 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1867 size = sizeof(ip->hdr.dst_addr);
1868 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1870 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1873 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1875 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1876 const struct rte_flow_item_ipv6 *ip)
1878 struct ulp_rte_hdr_field *field;
1883 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1884 size = sizeof(ip->hdr.vtc_flow);
1885 if (!ip->hdr.vtc_flow)
1886 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1888 val32 = ip->hdr.vtc_flow;
1889 field = ulp_rte_parser_fld_copy(field, &val32, size);
1891 size = sizeof(ip->hdr.proto);
1892 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1894 size = sizeof(ip->hdr.hop_limits);
1895 if (!ip->hdr.hop_limits)
1896 val8 = BNXT_ULP_DEFAULT_TTL;
1898 val8 = ip->hdr.hop_limits;
1899 field = ulp_rte_parser_fld_copy(field, &val8, size);
1901 size = sizeof(ip->hdr.src_addr);
1902 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1904 size = sizeof(ip->hdr.dst_addr);
1905 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1907 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1910 /* Function to handle the parsing of RTE Flow item UDP Header. */
1912 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1913 const struct rte_flow_item_udp *udp_spec)
1915 struct ulp_rte_hdr_field *field;
1917 uint8_t type = IPPROTO_UDP;
1919 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1920 size = sizeof(udp_spec->hdr.src_port);
1921 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1923 size = sizeof(udp_spec->hdr.dst_port);
1924 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1926 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1928 /* Update thhe ip header protocol */
1929 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1930 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1931 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1932 ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1935 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1937 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1938 struct rte_flow_item_vxlan *vxlan_spec)
1940 struct ulp_rte_hdr_field *field;
1943 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1944 size = sizeof(vxlan_spec->flags);
1945 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1947 size = sizeof(vxlan_spec->rsvd0);
1948 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1950 size = sizeof(vxlan_spec->vni);
1951 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1953 size = sizeof(vxlan_spec->rsvd1);
1954 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1956 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1959 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1961 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1962 struct ulp_rte_parser_params *params)
1964 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1965 const struct rte_flow_item *item;
1966 const struct rte_flow_item_ipv4 *ipv4_spec;
1967 const struct rte_flow_item_ipv6 *ipv6_spec;
1968 struct rte_flow_item_vxlan vxlan_spec;
1969 uint32_t vlan_num = 0, vlan_size = 0;
1970 uint32_t ip_size = 0, ip_type = 0;
1971 uint32_t vxlan_size = 0;
1972 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1973 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1975 vxlan_encap = action_item->conf;
1977 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1978 return BNXT_TF_RC_ERROR;
1981 item = vxlan_encap->definition;
1983 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1984 return BNXT_TF_RC_ERROR;
1987 if (!ulp_rte_item_skip_void(&item, 0))
1988 return BNXT_TF_RC_ERROR;
1990 /* must have ethernet header */
1991 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1992 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1993 return BNXT_TF_RC_ERROR;
1996 /* Parse the ethernet header */
1998 ulp_rte_enc_eth_hdr_handler(params, item->spec);
2000 /* Goto the next item */
2001 if (!ulp_rte_item_skip_void(&item, 1))
2002 return BNXT_TF_RC_ERROR;
2004 /* May have vlan header */
2005 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2008 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2010 if (!ulp_rte_item_skip_void(&item, 1))
2011 return BNXT_TF_RC_ERROR;
2014 /* may have two vlan headers */
2015 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2018 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2020 if (!ulp_rte_item_skip_void(&item, 1))
2021 return BNXT_TF_RC_ERROR;
2024 /* Update the vlan count and size of more than one */
2026 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2027 vlan_num = tfp_cpu_to_be_32(vlan_num);
2028 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2031 vlan_size = tfp_cpu_to_be_32(vlan_size);
2032 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2037 /* L3 must be IPv4, IPv6 */
2038 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2039 ipv4_spec = item->spec;
2040 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2042 /* Update the ip size details */
2043 ip_size = tfp_cpu_to_be_32(ip_size);
2044 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2045 &ip_size, sizeof(uint32_t));
2047 /* update the ip type */
2048 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2049 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2050 &ip_type, sizeof(uint32_t));
2052 /* update the computed field to notify it is ipv4 header */
2053 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2056 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2058 if (!ulp_rte_item_skip_void(&item, 1))
2059 return BNXT_TF_RC_ERROR;
2060 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2061 ipv6_spec = item->spec;
2062 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2064 /* Update the ip size details */
2065 ip_size = tfp_cpu_to_be_32(ip_size);
2066 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2067 &ip_size, sizeof(uint32_t));
2069 /* update the ip type */
2070 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2071 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2072 &ip_type, sizeof(uint32_t));
2074 /* update the computed field to notify it is ipv6 header */
2075 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2078 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2080 if (!ulp_rte_item_skip_void(&item, 1))
2081 return BNXT_TF_RC_ERROR;
2083 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2084 return BNXT_TF_RC_ERROR;
2088 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2089 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2090 return BNXT_TF_RC_ERROR;
2093 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2095 if (!ulp_rte_item_skip_void(&item, 1))
2096 return BNXT_TF_RC_ERROR;
2099 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2100 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2101 return BNXT_TF_RC_ERROR;
2103 vxlan_size = sizeof(struct rte_flow_item_vxlan);
2104 /* copy the vxlan details */
2105 memcpy(&vxlan_spec, item->spec, vxlan_size);
2106 vxlan_spec.flags = 0x08;
2107 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2108 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2109 &vxlan_size, sizeof(uint32_t));
2111 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2113 /* update the hdr_bitmap with vxlan */
2114 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2115 return BNXT_TF_RC_SUCCESS;
2118 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2120 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2122 struct ulp_rte_parser_params *params)
2124 /* update the hdr_bitmap with vxlan */
2125 ULP_BITMAP_SET(params->act_bitmap.bits,
2126 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2127 /* Update computational field with tunnel decap info */
2128 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2129 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2130 return BNXT_TF_RC_SUCCESS;
2133 /* Function to handle the parsing of RTE Flow action drop Header. */
2135 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2136 struct ulp_rte_parser_params *params)
2138 /* Update the hdr_bitmap with drop */
2139 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2140 return BNXT_TF_RC_SUCCESS;
2143 /* Function to handle the parsing of RTE Flow action count. */
2145 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2146 struct ulp_rte_parser_params *params)
2148 const struct rte_flow_action_count *act_count;
2149 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2151 act_count = action_item->conf;
2153 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2155 BNXT_ULP_ACT_PROP_SZ_COUNT);
2158 /* Update the hdr_bitmap with count */
2159 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2160 return BNXT_TF_RC_SUCCESS;
2163 /* Function to handle the parsing of action ports. */
2165 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2168 enum bnxt_ulp_direction_type dir;
2171 struct ulp_rte_act_prop *act = ¶m->act_prop;
2172 enum bnxt_ulp_intf_type port_type;
2175 /* Get the direction */
2176 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2177 if (dir == BNXT_ULP_DIR_EGRESS) {
2178 /* For egress direction, fill vport */
2179 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2180 return BNXT_TF_RC_ERROR;
2183 pid = rte_cpu_to_be_32(pid);
2184 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2185 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2187 /* For ingress direction, fill vnic */
2188 port_type = ULP_COMP_FLD_IDX_RD(param,
2189 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2190 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2191 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2193 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2195 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2197 return BNXT_TF_RC_ERROR;
2200 pid = rte_cpu_to_be_32(pid);
2201 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2202 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2205 /* Update the action port set bit */
2206 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2207 return BNXT_TF_RC_SUCCESS;
2210 /* Function to handle the parsing of RTE Flow action PF. */
2212 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2213 struct ulp_rte_parser_params *params)
2217 enum bnxt_ulp_intf_type intf_type;
2219 /* Get the port id of the current device */
2220 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2222 /* Get the port db ifindex */
2223 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2225 BNXT_TF_DBG(ERR, "Invalid port id\n");
2226 return BNXT_TF_RC_ERROR;
2229 /* Check the port is PF port */
2230 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2231 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2232 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2233 return BNXT_TF_RC_ERROR;
2235 /* Update the action properties */
2236 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2237 return ulp_rte_parser_act_port_set(params, ifindex);
2240 /* Function to handle the parsing of RTE Flow action VF. */
2242 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2243 struct ulp_rte_parser_params *params)
2245 const struct rte_flow_action_vf *vf_action;
2246 enum bnxt_ulp_intf_type intf_type;
2250 vf_action = action_item->conf;
2252 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2253 return BNXT_TF_RC_PARSE_ERR;
2256 if (vf_action->original) {
2257 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2258 return BNXT_TF_RC_PARSE_ERR;
2261 bp = bnxt_pmd_get_bp(params->port_id);
2263 BNXT_TF_DBG(ERR, "Invalid bp\n");
2264 return BNXT_TF_RC_ERROR;
2267 /* vf_action->id is a logical number which in this case is an
2268 * offset from the first VF. So, to get the absolute VF id, the
2269 * offset must be added to the absolute first vf id of that port.
2271 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2275 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2276 return BNXT_TF_RC_ERROR;
2278 /* Check the port is VF port */
2279 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2280 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2281 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2282 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2283 return BNXT_TF_RC_ERROR;
2286 /* Update the action properties */
2287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2288 return ulp_rte_parser_act_port_set(params, ifindex);
2291 /* Function to handle the parsing of RTE Flow action port_id. */
2293 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2294 struct ulp_rte_parser_params *param)
2296 const struct rte_flow_action_port_id *port_id = act_item->conf;
2298 enum bnxt_ulp_intf_type intf_type;
2302 "ParseErr: Invalid Argument\n");
2303 return BNXT_TF_RC_PARSE_ERR;
2305 if (port_id->original) {
2307 "ParseErr:Portid Original not supported\n");
2308 return BNXT_TF_RC_PARSE_ERR;
2311 /* Get the port db ifindex */
2312 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2314 BNXT_TF_DBG(ERR, "Invalid port id\n");
2315 return BNXT_TF_RC_ERROR;
2318 /* Get the intf type */
2319 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2321 BNXT_TF_DBG(ERR, "Invalid port type\n");
2322 return BNXT_TF_RC_ERROR;
2325 /* Set the action port */
2326 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2327 return ulp_rte_parser_act_port_set(param, ifindex);
2330 /* Function to handle the parsing of RTE Flow action phy_port. */
2332 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2333 struct ulp_rte_parser_params *prm)
2335 const struct rte_flow_action_phy_port *phy_port;
2339 enum bnxt_ulp_direction_type dir;
2341 phy_port = action_item->conf;
2344 "ParseErr: Invalid Argument\n");
2345 return BNXT_TF_RC_PARSE_ERR;
2348 if (phy_port->original) {
2350 "Parse Err:Port Original not supported\n");
2351 return BNXT_TF_RC_PARSE_ERR;
2353 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2354 if (dir != BNXT_ULP_DIR_EGRESS) {
2356 "Parse Err:Phy ports are valid only for egress\n");
2357 return BNXT_TF_RC_PARSE_ERR;
2359 /* Get the physical port details from port db */
2360 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2363 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2368 pid = rte_cpu_to_be_32(pid);
2369 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2370 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2372 /* Update the action port set bit */
2373 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2374 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2375 BNXT_ULP_INTF_TYPE_PHY_PORT);
2376 return BNXT_TF_RC_SUCCESS;
2379 /* Function to handle the parsing of RTE Flow action pop vlan. */
2381 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2382 struct ulp_rte_parser_params *params)
2384 /* Update the act_bitmap with pop */
2385 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2386 return BNXT_TF_RC_SUCCESS;
2389 /* Function to handle the parsing of RTE Flow action push vlan. */
2391 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2392 struct ulp_rte_parser_params *params)
2394 const struct rte_flow_action_of_push_vlan *push_vlan;
2396 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2398 push_vlan = action_item->conf;
2400 ethertype = push_vlan->ethertype;
2401 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2403 "Parse Err: Ethertype not supported\n");
2404 return BNXT_TF_RC_PARSE_ERR;
2406 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2407 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2408 /* Update the hdr_bitmap with push vlan */
2409 ULP_BITMAP_SET(params->act_bitmap.bits,
2410 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2411 return BNXT_TF_RC_SUCCESS;
2413 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2414 return BNXT_TF_RC_ERROR;
2417 /* Function to handle the parsing of RTE Flow action set vlan id. */
2419 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2420 struct ulp_rte_parser_params *params)
2422 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2424 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2426 vlan_vid = action_item->conf;
2427 if (vlan_vid && vlan_vid->vlan_vid) {
2428 vid = vlan_vid->vlan_vid;
2429 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2430 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2431 /* Update the hdr_bitmap with vlan vid */
2432 ULP_BITMAP_SET(params->act_bitmap.bits,
2433 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2434 return BNXT_TF_RC_SUCCESS;
2436 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2437 return BNXT_TF_RC_ERROR;
2440 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2442 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2443 struct ulp_rte_parser_params *params)
2445 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2447 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2449 vlan_pcp = action_item->conf;
2451 pcp = vlan_pcp->vlan_pcp;
2452 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2453 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2454 /* Update the hdr_bitmap with vlan vid */
2455 ULP_BITMAP_SET(params->act_bitmap.bits,
2456 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2457 return BNXT_TF_RC_SUCCESS;
2459 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2460 return BNXT_TF_RC_ERROR;
2463 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2465 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2466 struct ulp_rte_parser_params *params)
2468 const struct rte_flow_action_set_ipv4 *set_ipv4;
2469 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2471 set_ipv4 = action_item->conf;
2473 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2474 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2475 /* Update the hdr_bitmap with set ipv4 src */
2476 ULP_BITMAP_SET(params->act_bitmap.bits,
2477 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2478 return BNXT_TF_RC_SUCCESS;
2480 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2481 return BNXT_TF_RC_ERROR;
2484 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2486 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2487 struct ulp_rte_parser_params *params)
2489 const struct rte_flow_action_set_ipv4 *set_ipv4;
2490 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2492 set_ipv4 = action_item->conf;
2494 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2495 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2496 /* Update the hdr_bitmap with set ipv4 dst */
2497 ULP_BITMAP_SET(params->act_bitmap.bits,
2498 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2499 return BNXT_TF_RC_SUCCESS;
2501 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2502 return BNXT_TF_RC_ERROR;
2505 /* Function to handle the parsing of RTE Flow action set tp src.*/
2507 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2508 struct ulp_rte_parser_params *params)
2510 const struct rte_flow_action_set_tp *set_tp;
2511 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2513 set_tp = action_item->conf;
2515 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2516 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2517 /* Update the hdr_bitmap with set tp src */
2518 ULP_BITMAP_SET(params->act_bitmap.bits,
2519 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2520 return BNXT_TF_RC_SUCCESS;
2523 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2524 return BNXT_TF_RC_ERROR;
2527 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2529 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2530 struct ulp_rte_parser_params *params)
2532 const struct rte_flow_action_set_tp *set_tp;
2533 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2535 set_tp = action_item->conf;
2537 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2538 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2539 /* Update the hdr_bitmap with set tp dst */
2540 ULP_BITMAP_SET(params->act_bitmap.bits,
2541 BNXT_ULP_ACT_BIT_SET_TP_DST);
2542 return BNXT_TF_RC_SUCCESS;
2545 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2546 return BNXT_TF_RC_ERROR;
2549 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2551 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2552 struct ulp_rte_parser_params *params)
2554 /* Update the act_bitmap with dec ttl */
2555 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2556 return BNXT_TF_RC_SUCCESS;
2559 /* Function to handle the parsing of RTE Flow action JUMP */
2561 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2562 struct ulp_rte_parser_params *params)
2564 /* Update the act_bitmap with dec ttl */
2565 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2566 return BNXT_TF_RC_SUCCESS;
2570 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2571 struct ulp_rte_parser_params *params)
2573 const struct rte_flow_action_sample *sample;
2576 sample = action_item->conf;
2578 /* if SAMPLE bit is set it means this sample action is nested within the
2579 * actions of another sample action; this is not allowed
2581 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2582 BNXT_ULP_ACT_BIT_SAMPLE))
2583 return BNXT_TF_RC_ERROR;
2585 /* a sample action is only allowed as a shared action */
2586 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2587 BNXT_ULP_ACT_BIT_SHARED))
2588 return BNXT_TF_RC_ERROR;
2590 /* only a ratio of 1 i.e. 100% is supported */
2591 if (sample->ratio != 1)
2592 return BNXT_TF_RC_ERROR;
2594 if (!sample->actions)
2595 return BNXT_TF_RC_ERROR;
2597 /* parse the nested actions for a sample action */
2598 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2599 if (ret == BNXT_TF_RC_SUCCESS)
2600 /* Update the act_bitmap with sample */
2601 ULP_BITMAP_SET(params->act_bitmap.bits,
2602 BNXT_ULP_ACT_BIT_SAMPLE);
2607 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2609 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2610 struct ulp_rte_parser_params *params)
2612 /* Set the F1 flow header bit */
2613 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2614 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2617 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2619 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2620 struct ulp_rte_parser_params *params)
2623 /* Set the F2 flow header bit */
2624 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2625 return ulp_rte_vxlan_decap_act_handler(NULL, params);