1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
19 #include "ulp_template_db_tbl.h"
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK 0x700
24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN 4789
27 /* Utility function to skip the void items. */
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42 /* Utility function to update the field_bitmap */
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 enum bnxt_ulp_prsr_action prsr_act)
48 struct ulp_rte_hdr_field *field;
50 field = ¶ms->hdr_field[idx];
51 if (ulp_bitmap_notzero(field->mask, field->size)) {
52 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
53 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
54 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
56 if (!ulp_bitmap_is_ones(field->mask, field->size))
57 ULP_COMP_FLD_IDX_WR(params,
58 BNXT_ULP_CF_IDX_WC_MATCH, 1);
60 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
64 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
65 /* Utility function to copy field spec and masks items */
67 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
70 const void *spec_buff,
71 const void *mask_buff,
72 enum bnxt_ulp_prsr_action prsr_act)
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 /* update the field size */
79 /* copy the mask specifications only if mask is not null */
80 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
81 memcpy(field->mask, mask_buff, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
85 /* copy the protocol specifications only if mask is not null*/
86 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
87 memcpy(field->spec, spec_buff, size);
89 /* Increment the index */
93 /* Utility function to copy field spec and masks items */
95 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
99 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
100 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
103 *idx = params->field_idx;
104 params->field_idx += size;
109 * Function to handle the parsing of RTE Flows and placing
110 * the RTE flow items into the ulp structures.
113 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
114 struct ulp_rte_parser_params *params)
116 const struct rte_flow_item *item = pattern;
117 struct bnxt_ulp_rte_hdr_info *hdr_info;
119 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
121 /* Set the computed flags for no vlan tags before parsing */
122 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
125 /* Parse all the items in the pattern */
126 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
127 /* get the header information from the flow_hdr_info table */
128 hdr_info = &ulp_hdr_info[item->type];
129 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
131 "Truflow parser does not support type %d\n",
133 return BNXT_TF_RC_PARSE_ERR;
134 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
135 /* call the registered callback handler */
136 if (hdr_info->proto_hdr_func) {
137 if (hdr_info->proto_hdr_func(item, params) !=
138 BNXT_TF_RC_SUCCESS) {
139 return BNXT_TF_RC_ERROR;
145 /* update the implied SVIF */
146 return ulp_rte_parser_implicit_match_port_process(params);
150 * Function to handle the parsing of RTE Flows and placing
151 * the RTE flow actions into the ulp structures.
154 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
155 struct ulp_rte_parser_params *params)
157 const struct rte_flow_action *action_item = actions;
158 struct bnxt_ulp_rte_act_info *hdr_info;
160 /* Parse all the items in the pattern */
161 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
162 /* get the header information from the flow_hdr_info table */
163 hdr_info = &ulp_act_info[action_item->type];
164 if (hdr_info->act_type ==
165 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
167 "Truflow parser does not support act %u\n",
169 return BNXT_TF_RC_ERROR;
170 } else if (hdr_info->act_type ==
171 BNXT_ULP_ACT_TYPE_SUPPORTED) {
172 /* call the registered callback handler */
173 if (hdr_info->proto_act_func) {
174 if (hdr_info->proto_act_func(action_item,
176 BNXT_TF_RC_SUCCESS) {
177 return BNXT_TF_RC_ERROR;
183 /* update the implied port details */
184 ulp_rte_parser_implicit_act_port_process(params);
185 return BNXT_TF_RC_SUCCESS;
189 * Function to handle the post processing of the computed
190 * fields for the interface.
193 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
196 uint16_t port_id, parif;
198 enum bnxt_ulp_direction_type dir;
200 /* get the direction details */
201 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
203 /* read the port id details */
204 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
205 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
208 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
212 if (dir == BNXT_ULP_DIR_INGRESS) {
214 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
215 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
216 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
219 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
222 /* Get the match port type */
223 mtype = ULP_COMP_FLD_IDX_RD(params,
224 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
225 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
229 /* Set VF func PARIF */
230 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
231 BNXT_ULP_VF_FUNC_PARIF,
234 "ParseErr:ifindex is not valid\n");
237 ULP_COMP_FLD_IDX_WR(params,
238 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
242 /* Set DRV func PARIF */
243 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
244 BNXT_ULP_DRV_FUNC_PARIF,
247 "ParseErr:ifindex is not valid\n");
250 ULP_COMP_FLD_IDX_WR(params,
251 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
254 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
255 ULP_COMP_FLD_IDX_WR(params,
256 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
263 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
265 enum bnxt_ulp_intf_type match_port_type, act_port_type;
266 enum bnxt_ulp_direction_type dir;
267 uint32_t act_port_set;
269 /* Get the computed details */
270 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
271 match_port_type = ULP_COMP_FLD_IDX_RD(params,
272 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
273 act_port_type = ULP_COMP_FLD_IDX_RD(params,
274 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
275 act_port_set = ULP_COMP_FLD_IDX_RD(params,
276 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
278 /* set the flow direction in the proto and action header */
279 if (dir == BNXT_ULP_DIR_EGRESS) {
280 ULP_BITMAP_SET(params->hdr_bitmap.bits,
281 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
282 ULP_BITMAP_SET(params->act_bitmap.bits,
283 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
286 /* calculate the VF to VF flag */
287 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
288 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
291 /* Update the decrement ttl computational fields */
292 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
293 BNXT_ULP_ACT_BIT_DEC_TTL)) {
295 * Check that vxlan proto is included and vxlan decap
296 * action is not set then decrement tunnel ttl.
297 * Similarly add GRE and NVGRE in future.
299 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
300 BNXT_ULP_HDR_BIT_T_VXLAN) &&
301 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
302 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
303 ULP_COMP_FLD_IDX_WR(params,
304 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
306 ULP_COMP_FLD_IDX_WR(params,
307 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
311 /* Merge the hdr_fp_bit into the proto header bit */
312 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
314 /* Update the comp fld fid */
315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
317 /* Update the computed interface parameters */
318 bnxt_ulp_comp_fld_intf_update(params);
320 /* TBD: Handle the flow rejection scenarios */
325 * Function to handle the post processing of the parsing details
328 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
330 ulp_post_process_normal_flow(params);
331 return ulp_post_process_tun_flow(params);
335 * Function to compute the flow direction based on the match port details
338 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
340 enum bnxt_ulp_intf_type match_port_type;
342 /* Get the match port type */
343 match_port_type = ULP_COMP_FLD_IDX_RD(params,
344 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
346 /* If ingress flow and matchport is vf rep then dir is egress*/
347 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
348 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
349 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
350 BNXT_ULP_DIR_EGRESS);
352 /* Assign the input direction */
353 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
354 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
355 BNXT_ULP_DIR_INGRESS);
357 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
358 BNXT_ULP_DIR_EGRESS);
362 /* Function to handle the parsing of RTE Flow item PF Header. */
364 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
369 enum bnxt_ulp_direction_type dir;
370 struct ulp_rte_hdr_field *hdr_field;
371 enum bnxt_ulp_svif_type svif_type;
372 enum bnxt_ulp_intf_type port_type;
374 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
375 BNXT_ULP_INVALID_SVIF_VAL) {
377 "SVIF already set,multiple source not support'd\n");
378 return BNXT_TF_RC_ERROR;
381 /* Get port type details */
382 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
383 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
384 BNXT_TF_DBG(ERR, "Invalid port type\n");
385 return BNXT_TF_RC_ERROR;
388 /* Update the match port type */
389 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
391 /* compute the direction */
392 bnxt_ulp_rte_parser_direction_compute(params);
394 /* Get the computed direction */
395 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
396 if (dir == BNXT_ULP_DIR_INGRESS) {
397 svif_type = BNXT_ULP_PHY_PORT_SVIF;
399 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
400 svif_type = BNXT_ULP_VF_FUNC_SVIF;
402 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
404 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
406 svif = rte_cpu_to_be_16(svif);
407 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
408 memcpy(hdr_field->spec, &svif, sizeof(svif));
409 memcpy(hdr_field->mask, &mask, sizeof(mask));
410 hdr_field->size = sizeof(svif);
411 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
412 rte_be_to_cpu_16(svif));
413 return BNXT_TF_RC_SUCCESS;
416 /* Function to handle the parsing of the RTE port id */
418 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
420 uint16_t port_id = 0;
421 uint16_t svif_mask = 0xFFFF;
423 int32_t rc = BNXT_TF_RC_ERROR;
425 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
426 BNXT_ULP_INVALID_SVIF_VAL)
427 return BNXT_TF_RC_SUCCESS;
429 /* SVIF not set. So get the port id */
430 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
432 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
435 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
439 /* Update the SVIF details */
440 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
444 /* Function to handle the implicit action port id */
446 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
448 struct rte_flow_action action_item = {0};
449 struct rte_flow_action_port_id port_id = {0};
451 /* Read the action port set bit */
452 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
453 /* Already set, so just exit */
454 return BNXT_TF_RC_SUCCESS;
456 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
457 action_item.conf = &port_id;
459 /* Update the action port based on incoming port */
460 ulp_rte_port_id_act_handler(&action_item, params);
462 /* Reset the action port set bit */
463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
464 return BNXT_TF_RC_SUCCESS;
467 /* Function to handle the parsing of RTE Flow item PF Header. */
469 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
470 struct ulp_rte_parser_params *params)
472 uint16_t port_id = 0;
473 uint16_t svif_mask = 0xFFFF;
476 /* Get the implicit port id */
477 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
479 /* perform the conversion from dpdk port to bnxt ifindex */
480 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
483 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
484 return BNXT_TF_RC_ERROR;
487 /* Update the SVIF details */
488 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
491 /* Function to handle the parsing of RTE Flow item VF Header. */
493 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
494 struct ulp_rte_parser_params *params)
496 const struct rte_flow_item_vf *vf_spec = item->spec;
497 const struct rte_flow_item_vf *vf_mask = item->mask;
500 int32_t rc = BNXT_TF_RC_PARSE_ERR;
502 /* Get VF rte_flow_item for Port details */
504 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
508 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
513 /* perform the conversion from VF Func id to bnxt ifindex */
514 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
517 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
520 /* Update the SVIF details */
521 return ulp_rte_parser_svif_set(params, ifindex, mask);
524 /* Function to handle the parsing of RTE Flow item port id Header. */
526 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
527 struct ulp_rte_parser_params *params)
529 const struct rte_flow_item_port_id *port_spec = item->spec;
530 const struct rte_flow_item_port_id *port_mask = item->mask;
532 int32_t rc = BNXT_TF_RC_PARSE_ERR;
536 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
540 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
543 mask = port_mask->id;
545 /* perform the conversion from dpdk port to bnxt ifindex */
546 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
549 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
552 /* Update the SVIF details */
553 return ulp_rte_parser_svif_set(params, ifindex, mask);
556 /* Function to handle the parsing of RTE Flow item phy port Header. */
558 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
559 struct ulp_rte_parser_params *params)
561 const struct rte_flow_item_phy_port *port_spec = item->spec;
562 const struct rte_flow_item_phy_port *port_mask = item->mask;
564 int32_t rc = BNXT_TF_RC_ERROR;
566 enum bnxt_ulp_direction_type dir;
567 struct ulp_rte_hdr_field *hdr_field;
569 /* Copy the rte_flow_item for phy port into hdr_field */
571 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
575 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
578 mask = port_mask->index;
580 /* Update the match port type */
581 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
582 BNXT_ULP_INTF_TYPE_PHY_PORT);
584 /* Compute the Hw direction */
585 bnxt_ulp_rte_parser_direction_compute(params);
587 /* Direction validation */
588 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
589 if (dir == BNXT_ULP_DIR_EGRESS) {
591 "Parse Err:Phy ports are valid only for ingress\n");
592 return BNXT_TF_RC_PARSE_ERR;
595 /* Get the physical port details from port db */
596 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
599 BNXT_TF_DBG(ERR, "Failed to get port details\n");
600 return BNXT_TF_RC_PARSE_ERR;
603 /* Update the SVIF details */
604 svif = rte_cpu_to_be_16(svif);
605 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
606 memcpy(hdr_field->spec, &svif, sizeof(svif));
607 memcpy(hdr_field->mask, &mask, sizeof(mask));
608 hdr_field->size = sizeof(svif);
609 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
610 rte_be_to_cpu_16(svif));
611 return BNXT_TF_RC_SUCCESS;
614 /* Function to handle the update of proto header based on field values */
616 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
617 uint16_t type, uint32_t in_flag)
619 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
621 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 BNXT_ULP_HDR_BIT_I_IPV4);
623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
625 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
626 BNXT_ULP_HDR_BIT_O_IPV4);
627 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
629 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
631 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
632 BNXT_ULP_HDR_BIT_I_IPV6);
633 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
635 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
636 BNXT_ULP_HDR_BIT_O_IPV6);
637 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
642 /* Internal Function to identify broadcast or multicast packets */
644 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
646 if (rte_is_multicast_ether_addr(eth_addr) ||
647 rte_is_broadcast_ether_addr(eth_addr)) {
649 "No support for bcast or mcast addr offload\n");
655 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
657 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
658 struct ulp_rte_parser_params *params)
660 const struct rte_flow_item_eth *eth_spec = item->spec;
661 const struct rte_flow_item_eth *eth_mask = item->mask;
664 uint16_t eth_type = 0;
665 uint32_t inner_flag = 0;
667 /* Perform validations */
669 /* Todo: work around to avoid multicast and broadcast addr */
670 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
671 return BNXT_TF_RC_PARSE_ERR;
673 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
674 return BNXT_TF_RC_PARSE_ERR;
676 eth_type = eth_spec->type;
679 if (ulp_rte_prsr_fld_size_validate(params, &idx,
680 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
681 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
682 return BNXT_TF_RC_ERROR;
685 * Copy the rte_flow_item for eth into hdr_field using ethernet
688 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
689 ulp_rte_prsr_fld_mask(params, &idx, size,
690 ulp_deference_struct(eth_spec, dst.addr_bytes),
691 ulp_deference_struct(eth_mask, dst.addr_bytes),
692 ULP_PRSR_ACT_DEFAULT);
694 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
695 ulp_rte_prsr_fld_mask(params, &idx, size,
696 ulp_deference_struct(eth_spec, src.addr_bytes),
697 ulp_deference_struct(eth_mask, src.addr_bytes),
698 ULP_PRSR_ACT_DEFAULT);
700 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
701 ulp_rte_prsr_fld_mask(params, &idx, size,
702 ulp_deference_struct(eth_spec, type),
703 ulp_deference_struct(eth_mask, type),
704 ULP_PRSR_ACT_MATCH_IGNORE);
706 /* Update the protocol hdr bitmap */
707 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
708 BNXT_ULP_HDR_BIT_O_ETH) ||
709 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
710 BNXT_ULP_HDR_BIT_O_IPV4) ||
711 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
712 BNXT_ULP_HDR_BIT_O_IPV6) ||
713 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
714 BNXT_ULP_HDR_BIT_O_UDP) ||
715 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
716 BNXT_ULP_HDR_BIT_O_TCP)) {
717 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
720 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
722 /* Update the field protocol hdr bitmap */
723 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
725 return BNXT_TF_RC_SUCCESS;
728 /* Function to handle the parsing of RTE Flow item Vlan Header. */
730 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
731 struct ulp_rte_parser_params *params)
733 const struct rte_flow_item_vlan *vlan_spec = item->spec;
734 const struct rte_flow_item_vlan *vlan_mask = item->mask;
735 struct ulp_rte_hdr_bitmap *hdr_bit;
737 uint16_t vlan_tag = 0, priority = 0;
738 uint16_t vlan_tag_mask = 0, priority_mask = 0;
739 uint32_t outer_vtag_num;
740 uint32_t inner_vtag_num;
741 uint16_t eth_type = 0;
742 uint32_t inner_flag = 0;
746 vlan_tag = ntohs(vlan_spec->tci);
747 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
748 vlan_tag &= ULP_VLAN_TAG_MASK;
749 vlan_tag = htons(vlan_tag);
750 eth_type = vlan_spec->inner_type;
754 vlan_tag_mask = ntohs(vlan_mask->tci);
755 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
756 vlan_tag_mask &= 0xfff;
759 * the storage for priority and vlan tag is 2 bytes
760 * The mask of priority which is 3 bits if it is all 1's
761 * then make the rest bits 13 bits as 1's
762 * so that it is matched as exact match.
764 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
765 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
766 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
767 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
768 vlan_tag_mask = htons(vlan_tag_mask);
771 if (ulp_rte_prsr_fld_size_validate(params, &idx,
772 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
773 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
774 return BNXT_TF_RC_ERROR;
778 * Copy the rte_flow_item for vlan into hdr_field using Vlan
781 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
783 * The priority field is ignored since OVS is setting it as
784 * wild card match and it is not supported. This is a work
785 * around and shall be addressed in the future.
787 ulp_rte_prsr_fld_mask(params, &idx, size,
790 ULP_PRSR_ACT_MASK_IGNORE);
792 ulp_rte_prsr_fld_mask(params, &idx, size,
795 ULP_PRSR_ACT_DEFAULT);
797 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
798 ulp_rte_prsr_fld_mask(params, &idx, size,
799 ulp_deference_struct(vlan_spec, inner_type),
800 ulp_deference_struct(vlan_mask, inner_type),
801 ULP_PRSR_ACT_MATCH_IGNORE);
803 /* Get the outer tag and inner tag counts */
804 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
805 BNXT_ULP_CF_IDX_O_VTAG_NUM);
806 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
807 BNXT_ULP_CF_IDX_I_VTAG_NUM);
809 /* Update the hdr_bitmap of the vlans */
810 hdr_bit = ¶ms->hdr_bitmap;
811 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
812 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
814 /* Update the vlan tag num */
816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
818 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
820 ULP_BITMAP_SET(params->hdr_bitmap.bits,
821 BNXT_ULP_HDR_BIT_OO_VLAN);
822 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
823 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
824 outer_vtag_num == 1) {
825 /* update the vlan tag num */
827 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
829 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
830 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
831 ULP_BITMAP_SET(params->hdr_bitmap.bits,
832 BNXT_ULP_HDR_BIT_OI_VLAN);
833 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
834 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
836 /* update the vlan tag num */
838 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
840 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
841 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
842 ULP_BITMAP_SET(params->hdr_bitmap.bits,
843 BNXT_ULP_HDR_BIT_IO_VLAN);
845 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
846 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
847 inner_vtag_num == 1) {
848 /* update the vlan tag num */
850 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
852 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
853 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
854 ULP_BITMAP_SET(params->hdr_bitmap.bits,
855 BNXT_ULP_HDR_BIT_II_VLAN);
858 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
859 return BNXT_TF_RC_ERROR;
861 /* Update the field protocol hdr bitmap */
862 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
863 return BNXT_TF_RC_SUCCESS;
866 /* Function to handle the update of proto header based on field values */
868 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
869 uint8_t proto, uint32_t in_flag)
871 if (proto == IPPROTO_UDP) {
873 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
874 BNXT_ULP_HDR_BIT_I_UDP);
875 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
877 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
878 BNXT_ULP_HDR_BIT_O_UDP);
879 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
881 } else if (proto == IPPROTO_TCP) {
883 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
884 BNXT_ULP_HDR_BIT_I_TCP);
885 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
887 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
888 BNXT_ULP_HDR_BIT_O_TCP);
889 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
891 } else if (proto == IPPROTO_GRE) {
892 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
893 } else if (proto == IPPROTO_ICMP) {
894 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
895 ULP_BITMAP_SET(param->hdr_bitmap.bits,
896 BNXT_ULP_HDR_BIT_I_ICMP);
898 ULP_BITMAP_SET(param->hdr_bitmap.bits,
899 BNXT_ULP_HDR_BIT_O_ICMP);
903 ULP_COMP_FLD_IDX_WR(param,
904 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
906 ULP_COMP_FLD_IDX_WR(param,
907 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
910 ULP_COMP_FLD_IDX_WR(param,
911 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
913 ULP_COMP_FLD_IDX_WR(param,
914 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
920 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
922 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
923 struct ulp_rte_parser_params *params)
925 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
926 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
927 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
931 uint32_t inner_flag = 0;
934 /* validate there are no 3rd L3 header */
935 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
937 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
938 return BNXT_TF_RC_ERROR;
941 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
942 BNXT_ULP_HDR_BIT_O_ETH) &&
943 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
944 BNXT_ULP_HDR_BIT_I_ETH)) {
945 /* Since F2 flow does not include eth item, when parser detects
946 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
947 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
948 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
949 * This will allow the parser post processor to update the
950 * t_dmac in hdr_field[o_eth.dmac]
952 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
953 BNXT_ULP_PROTO_HDR_VLAN_NUM);
954 params->field_idx = idx;
957 if (ulp_rte_prsr_fld_size_validate(params, &idx,
958 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
959 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
960 return BNXT_TF_RC_ERROR;
964 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
967 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
968 ulp_rte_prsr_fld_mask(params, &idx, size,
969 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
970 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
971 ULP_PRSR_ACT_DEFAULT);
974 * The tos field is ignored since OVS is setting it as wild card
975 * match and it is not supported. This is a work around and
976 * shall be addressed in the future.
978 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
979 ulp_rte_prsr_fld_mask(params, &idx, size,
980 ulp_deference_struct(ipv4_spec,
981 hdr.type_of_service),
982 ulp_deference_struct(ipv4_mask,
983 hdr.type_of_service),
984 ULP_PRSR_ACT_MASK_IGNORE);
986 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
987 ulp_rte_prsr_fld_mask(params, &idx, size,
988 ulp_deference_struct(ipv4_spec, hdr.total_length),
989 ulp_deference_struct(ipv4_mask, hdr.total_length),
990 ULP_PRSR_ACT_DEFAULT);
992 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
993 ulp_rte_prsr_fld_mask(params, &idx, size,
994 ulp_deference_struct(ipv4_spec, hdr.packet_id),
995 ulp_deference_struct(ipv4_mask, hdr.packet_id),
996 ULP_PRSR_ACT_DEFAULT);
998 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
999 ulp_rte_prsr_fld_mask(params, &idx, size,
1000 ulp_deference_struct(ipv4_spec,
1001 hdr.fragment_offset),
1002 ulp_deference_struct(ipv4_mask,
1003 hdr.fragment_offset),
1004 ULP_PRSR_ACT_DEFAULT);
1006 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1007 ulp_rte_prsr_fld_mask(params, &idx, size,
1008 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1009 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1010 ULP_PRSR_ACT_DEFAULT);
1012 /* Ignore proto for matching templates */
1013 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1014 ulp_rte_prsr_fld_mask(params, &idx, size,
1015 ulp_deference_struct(ipv4_spec,
1017 ulp_deference_struct(ipv4_mask,
1019 ULP_PRSR_ACT_MATCH_IGNORE);
1021 proto = ipv4_spec->hdr.next_proto_id;
1023 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1024 ulp_rte_prsr_fld_mask(params, &idx, size,
1025 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1026 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1027 ULP_PRSR_ACT_DEFAULT);
1029 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1030 ulp_rte_prsr_fld_mask(params, &idx, size,
1031 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1032 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1033 ULP_PRSR_ACT_DEFAULT);
1035 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1036 ulp_rte_prsr_fld_mask(params, &idx, size,
1037 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1038 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1039 ULP_PRSR_ACT_DEFAULT);
1041 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1042 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1043 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1044 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1045 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1048 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1049 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1052 /* Some of the PMD applications may set the protocol field
1053 * in the IPv4 spec but don't set the mask. So, consider
1054 * the mask in the proto value calculation.
1057 proto &= ipv4_mask->hdr.next_proto_id;
1059 /* Update the field protocol hdr bitmap */
1060 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1061 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1062 return BNXT_TF_RC_SUCCESS;
1065 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1067 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1068 struct ulp_rte_parser_params *params)
1070 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1071 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1072 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1075 uint32_t ver_spec = 0, ver_mask = 0;
1076 uint32_t tc_spec = 0, tc_mask = 0;
1077 uint32_t lab_spec = 0, lab_mask = 0;
1079 uint32_t inner_flag = 0;
1082 /* validate there are no 3rd L3 header */
1083 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1085 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1086 return BNXT_TF_RC_ERROR;
1089 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1090 BNXT_ULP_HDR_BIT_O_ETH) &&
1091 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1092 BNXT_ULP_HDR_BIT_I_ETH)) {
1093 /* Since F2 flow does not include eth item, when parser detects
1094 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1095 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1096 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1097 * This will allow the parser post processor to update the
1098 * t_dmac in hdr_field[o_eth.dmac]
1100 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1101 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1102 params->field_idx = idx;
1105 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1106 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1107 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1108 return BNXT_TF_RC_ERROR;
1112 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1116 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1117 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1118 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1119 proto = ipv6_spec->hdr.proto;
1123 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1124 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1125 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1127 /* Some of the PMD applications may set the protocol field
1128 * in the IPv6 spec but don't set the mask. So, consider
1129 * the mask in proto value calculation.
1131 proto &= ipv6_mask->hdr.proto;
1134 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1135 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1136 ULP_PRSR_ACT_DEFAULT);
1138 * The TC and flow label field are ignored since OVS is setting
1139 * it for match and it is not supported.
1140 * This is a work around and
1141 * shall be addressed in the future.
1143 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1144 ULP_PRSR_ACT_MASK_IGNORE);
1145 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1146 ULP_PRSR_ACT_MASK_IGNORE);
1148 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1149 ulp_rte_prsr_fld_mask(params, &idx, size,
1150 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1151 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1152 ULP_PRSR_ACT_DEFAULT);
1154 /* Ignore proto for template matching */
1155 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1156 ulp_rte_prsr_fld_mask(params, &idx, size,
1157 ulp_deference_struct(ipv6_spec, hdr.proto),
1158 ulp_deference_struct(ipv6_mask, hdr.proto),
1159 ULP_PRSR_ACT_MATCH_IGNORE);
1161 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1162 ulp_rte_prsr_fld_mask(params, &idx, size,
1163 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1164 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1165 ULP_PRSR_ACT_DEFAULT);
1167 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1168 ulp_rte_prsr_fld_mask(params, &idx, size,
1169 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1170 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1171 ULP_PRSR_ACT_DEFAULT);
1173 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1174 ulp_rte_prsr_fld_mask(params, &idx, size,
1175 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1176 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1177 ULP_PRSR_ACT_DEFAULT);
1179 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1180 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1181 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1182 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1183 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1186 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1187 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1190 /* Update the field protocol hdr bitmap */
1191 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1192 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1194 return BNXT_TF_RC_SUCCESS;
1197 /* Function to handle the update of proto header based on field values */
1199 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1202 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1203 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1204 BNXT_ULP_HDR_BIT_T_VXLAN);
1206 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1207 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1208 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1209 BNXT_ULP_HDR_BIT_T_GRE))
1210 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1213 /* Function to handle the parsing of RTE Flow item UDP Header. */
1215 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1216 struct ulp_rte_parser_params *params)
1218 const struct rte_flow_item_udp *udp_spec = item->spec;
1219 const struct rte_flow_item_udp *udp_mask = item->mask;
1220 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1223 uint16_t dport = 0, sport = 0;
1226 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1228 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1229 return BNXT_TF_RC_ERROR;
1233 sport = udp_spec->hdr.src_port;
1234 dport = udp_spec->hdr.dst_port;
1237 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1238 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1239 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1240 return BNXT_TF_RC_ERROR;
1244 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1247 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1248 ulp_rte_prsr_fld_mask(params, &idx, size,
1249 ulp_deference_struct(udp_spec, hdr.src_port),
1250 ulp_deference_struct(udp_mask, hdr.src_port),
1251 ULP_PRSR_ACT_DEFAULT);
1253 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1254 ulp_rte_prsr_fld_mask(params, &idx, size,
1255 ulp_deference_struct(udp_spec, hdr.dst_port),
1256 ulp_deference_struct(udp_mask, hdr.dst_port),
1257 ULP_PRSR_ACT_DEFAULT);
1259 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1260 ulp_rte_prsr_fld_mask(params, &idx, size,
1261 ulp_deference_struct(udp_spec, hdr.dgram_len),
1262 ulp_deference_struct(udp_mask, hdr.dgram_len),
1263 ULP_PRSR_ACT_DEFAULT);
1265 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1266 ulp_rte_prsr_fld_mask(params, &idx, size,
1267 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1268 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1269 ULP_PRSR_ACT_DEFAULT);
1271 /* Set the udp header bitmap and computed l4 header bitmaps */
1272 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1273 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1274 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1277 (uint32_t)rte_be_to_cpu_16(sport));
1278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1279 (uint32_t)rte_be_to_cpu_16(dport));
1280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1282 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1284 if (udp_mask && udp_mask->hdr.src_port)
1285 ULP_COMP_FLD_IDX_WR(params,
1286 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1288 if (udp_mask && udp_mask->hdr.dst_port)
1289 ULP_COMP_FLD_IDX_WR(params,
1290 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1293 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1294 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1295 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1296 (uint32_t)rte_be_to_cpu_16(sport));
1297 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1298 (uint32_t)rte_be_to_cpu_16(dport));
1299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1301 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1303 if (udp_mask && udp_mask->hdr.src_port)
1304 ULP_COMP_FLD_IDX_WR(params,
1305 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1307 if (udp_mask && udp_mask->hdr.dst_port)
1308 ULP_COMP_FLD_IDX_WR(params,
1309 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1312 /* Update the field protocol hdr bitmap */
1313 ulp_rte_l4_proto_type_update(params, dport);
1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1316 return BNXT_TF_RC_SUCCESS;
1319 /* Function to handle the parsing of RTE Flow item TCP Header. */
1321 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1322 struct ulp_rte_parser_params *params)
1324 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1325 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1326 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1328 uint16_t dport = 0, sport = 0;
1332 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1334 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1335 return BNXT_TF_RC_ERROR;
1339 sport = tcp_spec->hdr.src_port;
1340 dport = tcp_spec->hdr.dst_port;
1343 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1344 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1345 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1346 return BNXT_TF_RC_ERROR;
1350 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1353 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1354 ulp_rte_prsr_fld_mask(params, &idx, size,
1355 ulp_deference_struct(tcp_spec, hdr.src_port),
1356 ulp_deference_struct(tcp_mask, hdr.src_port),
1357 ULP_PRSR_ACT_DEFAULT);
1359 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1360 ulp_rte_prsr_fld_mask(params, &idx, size,
1361 ulp_deference_struct(tcp_spec, hdr.dst_port),
1362 ulp_deference_struct(tcp_mask, hdr.dst_port),
1363 ULP_PRSR_ACT_DEFAULT);
1365 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1366 ulp_rte_prsr_fld_mask(params, &idx, size,
1367 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1368 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1369 ULP_PRSR_ACT_DEFAULT);
1371 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1372 ulp_rte_prsr_fld_mask(params, &idx, size,
1373 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1374 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1375 ULP_PRSR_ACT_DEFAULT);
1377 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1378 ulp_rte_prsr_fld_mask(params, &idx, size,
1379 ulp_deference_struct(tcp_spec, hdr.data_off),
1380 ulp_deference_struct(tcp_mask, hdr.data_off),
1381 ULP_PRSR_ACT_DEFAULT);
1383 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1384 ulp_rte_prsr_fld_mask(params, &idx, size,
1385 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1386 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1387 ULP_PRSR_ACT_DEFAULT);
1389 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1390 ulp_rte_prsr_fld_mask(params, &idx, size,
1391 ulp_deference_struct(tcp_spec, hdr.rx_win),
1392 ulp_deference_struct(tcp_mask, hdr.rx_win),
1393 ULP_PRSR_ACT_DEFAULT);
1395 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1396 ulp_rte_prsr_fld_mask(params, &idx, size,
1397 ulp_deference_struct(tcp_spec, hdr.cksum),
1398 ulp_deference_struct(tcp_mask, hdr.cksum),
1399 ULP_PRSR_ACT_DEFAULT);
1401 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1402 ulp_rte_prsr_fld_mask(params, &idx, size,
1403 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1404 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1405 ULP_PRSR_ACT_DEFAULT);
1407 /* Set the udp header bitmap and computed l4 header bitmaps */
1408 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1409 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1410 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1411 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1413 (uint32_t)rte_be_to_cpu_16(sport));
1414 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1415 (uint32_t)rte_be_to_cpu_16(dport));
1416 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1418 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1420 if (tcp_mask && tcp_mask->hdr.src_port)
1421 ULP_COMP_FLD_IDX_WR(params,
1422 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1424 if (tcp_mask && tcp_mask->hdr.dst_port)
1425 ULP_COMP_FLD_IDX_WR(params,
1426 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1429 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1430 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1431 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1432 (uint32_t)rte_be_to_cpu_16(sport));
1433 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1434 (uint32_t)rte_be_to_cpu_16(dport));
1435 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1439 if (tcp_mask && tcp_mask->hdr.src_port)
1440 ULP_COMP_FLD_IDX_WR(params,
1441 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1443 if (tcp_mask && tcp_mask->hdr.dst_port)
1444 ULP_COMP_FLD_IDX_WR(params,
1445 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1449 return BNXT_TF_RC_SUCCESS;
1452 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1454 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1455 struct ulp_rte_parser_params *params)
1457 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1458 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1459 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1463 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1464 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1465 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1466 return BNXT_TF_RC_ERROR;
1470 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1473 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1474 ulp_rte_prsr_fld_mask(params, &idx, size,
1475 ulp_deference_struct(vxlan_spec, flags),
1476 ulp_deference_struct(vxlan_mask, flags),
1477 ULP_PRSR_ACT_DEFAULT);
1479 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1480 ulp_rte_prsr_fld_mask(params, &idx, size,
1481 ulp_deference_struct(vxlan_spec, rsvd0),
1482 ulp_deference_struct(vxlan_mask, rsvd0),
1483 ULP_PRSR_ACT_DEFAULT);
1485 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1486 ulp_rte_prsr_fld_mask(params, &idx, size,
1487 ulp_deference_struct(vxlan_spec, vni),
1488 ulp_deference_struct(vxlan_mask, vni),
1489 ULP_PRSR_ACT_DEFAULT);
1491 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1492 ulp_rte_prsr_fld_mask(params, &idx, size,
1493 ulp_deference_struct(vxlan_spec, rsvd1),
1494 ulp_deference_struct(vxlan_mask, rsvd1),
1495 ULP_PRSR_ACT_DEFAULT);
1497 /* Update the hdr_bitmap with vxlan */
1498 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1499 ulp_rte_l4_proto_type_update(params, 0);
1500 return BNXT_TF_RC_SUCCESS;
1503 /* Function to handle the parsing of RTE Flow item GRE Header. */
1505 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1506 struct ulp_rte_parser_params *params)
1508 const struct rte_flow_item_gre *gre_spec = item->spec;
1509 const struct rte_flow_item_gre *gre_mask = item->mask;
1510 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1514 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1515 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1516 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1517 return BNXT_TF_RC_ERROR;
1520 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1521 ulp_rte_prsr_fld_mask(params, &idx, size,
1522 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1523 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1524 ULP_PRSR_ACT_DEFAULT);
1526 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1527 ulp_rte_prsr_fld_mask(params, &idx, size,
1528 ulp_deference_struct(gre_spec, protocol),
1529 ulp_deference_struct(gre_mask, protocol),
1530 ULP_PRSR_ACT_DEFAULT);
1532 /* Update the hdr_bitmap with GRE */
1533 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1534 ulp_rte_l4_proto_type_update(params, 0);
1535 return BNXT_TF_RC_SUCCESS;
1538 /* Function to handle the parsing of RTE Flow item ANY. */
1540 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1541 struct ulp_rte_parser_params *params __rte_unused)
1543 return BNXT_TF_RC_SUCCESS;
1546 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1548 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1549 struct ulp_rte_parser_params *params)
1551 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1552 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1553 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1557 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1558 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1559 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1560 return BNXT_TF_RC_ERROR;
1563 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1564 ulp_rte_prsr_fld_mask(params, &idx, size,
1565 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1566 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1567 ULP_PRSR_ACT_DEFAULT);
1569 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1570 ulp_rte_prsr_fld_mask(params, &idx, size,
1571 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1572 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1573 ULP_PRSR_ACT_DEFAULT);
1575 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1576 ulp_rte_prsr_fld_mask(params, &idx, size,
1577 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1578 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1579 ULP_PRSR_ACT_DEFAULT);
1581 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1582 ulp_rte_prsr_fld_mask(params, &idx, size,
1583 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1584 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1585 ULP_PRSR_ACT_DEFAULT);
1587 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1588 ulp_rte_prsr_fld_mask(params, &idx, size,
1589 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1590 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1591 ULP_PRSR_ACT_DEFAULT);
1593 /* Update the hdr_bitmap with ICMP */
1594 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1595 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1597 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1598 return BNXT_TF_RC_SUCCESS;
1601 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1603 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1604 struct ulp_rte_parser_params *params)
1606 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1607 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1608 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1612 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1613 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1614 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1615 return BNXT_TF_RC_ERROR;
1618 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1619 ulp_rte_prsr_fld_mask(params, &idx, size,
1620 ulp_deference_struct(icmp_spec, type),
1621 ulp_deference_struct(icmp_mask, type),
1622 ULP_PRSR_ACT_DEFAULT);
1624 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1625 ulp_rte_prsr_fld_mask(params, &idx, size,
1626 ulp_deference_struct(icmp_spec, code),
1627 ulp_deference_struct(icmp_mask, code),
1628 ULP_PRSR_ACT_DEFAULT);
1630 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1631 ulp_rte_prsr_fld_mask(params, &idx, size,
1632 ulp_deference_struct(icmp_spec, checksum),
1633 ulp_deference_struct(icmp_mask, checksum),
1634 ULP_PRSR_ACT_DEFAULT);
1636 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1637 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1638 return BNXT_TF_RC_ERROR;
1641 /* Update the hdr_bitmap with ICMP */
1642 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1643 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1645 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1646 return BNXT_TF_RC_SUCCESS;
1649 /* Function to handle the parsing of RTE Flow item void Header */
1651 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1652 struct ulp_rte_parser_params *params __rte_unused)
1654 return BNXT_TF_RC_SUCCESS;
1657 /* Function to handle the parsing of RTE Flow action void Header. */
1659 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1660 struct ulp_rte_parser_params *params __rte_unused)
1662 return BNXT_TF_RC_SUCCESS;
1665 /* Function to handle the parsing of RTE Flow action Mark Header. */
1667 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1668 struct ulp_rte_parser_params *param)
1670 const struct rte_flow_action_mark *mark;
1671 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1674 mark = action_item->conf;
1676 mark_id = tfp_cpu_to_be_32(mark->id);
1677 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1678 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1680 /* Update the hdr_bitmap with vxlan */
1681 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1682 return BNXT_TF_RC_SUCCESS;
1684 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1685 return BNXT_TF_RC_ERROR;
1688 /* Function to handle the parsing of RTE Flow action RSS Header. */
1690 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1691 struct ulp_rte_parser_params *param)
1693 const struct rte_flow_action_rss *rss;
1694 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1696 if (action_item == NULL || action_item->conf == NULL) {
1697 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1698 return BNXT_TF_RC_ERROR;
1701 rss = action_item->conf;
1702 /* Copy the rss into the specific action properties */
1703 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1704 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1705 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1706 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1707 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1708 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1710 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1711 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1712 return BNXT_TF_RC_ERROR;
1714 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1717 /* set the RSS action header bit */
1718 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1720 return BNXT_TF_RC_SUCCESS;
1723 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1725 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1726 struct ulp_rte_parser_params *params)
1728 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1729 const struct rte_flow_item *item;
1730 const struct rte_flow_item_eth *eth_spec;
1731 const struct rte_flow_item_ipv4 *ipv4_spec;
1732 const struct rte_flow_item_ipv6 *ipv6_spec;
1733 struct rte_flow_item_vxlan vxlan_spec;
1734 uint32_t vlan_num = 0, vlan_size = 0;
1735 uint32_t ip_size = 0, ip_type = 0;
1736 uint32_t vxlan_size = 0;
1738 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1739 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1741 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1742 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1744 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1745 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1746 const uint8_t *tmp_buff;
1748 vxlan_encap = action_item->conf;
1750 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1751 return BNXT_TF_RC_ERROR;
1754 item = vxlan_encap->definition;
1756 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1757 return BNXT_TF_RC_ERROR;
1760 if (!ulp_rte_item_skip_void(&item, 0))
1761 return BNXT_TF_RC_ERROR;
1763 /* must have ethernet header */
1764 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1765 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1766 return BNXT_TF_RC_ERROR;
1768 eth_spec = item->spec;
1769 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1770 ulp_encap_buffer_copy(buff,
1771 eth_spec->dst.addr_bytes,
1772 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1773 ULP_BUFFER_ALIGN_8_BYTE);
1775 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1776 ulp_encap_buffer_copy(buff,
1777 eth_spec->src.addr_bytes,
1778 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1779 ULP_BUFFER_ALIGN_8_BYTE);
1781 /* Goto the next item */
1782 if (!ulp_rte_item_skip_void(&item, 1))
1783 return BNXT_TF_RC_ERROR;
1785 /* May have vlan header */
1786 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1788 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1789 ulp_encap_buffer_copy(buff,
1791 sizeof(struct rte_flow_item_vlan),
1792 ULP_BUFFER_ALIGN_8_BYTE);
1794 if (!ulp_rte_item_skip_void(&item, 1))
1795 return BNXT_TF_RC_ERROR;
1798 /* may have two vlan headers */
1799 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1801 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1802 sizeof(struct rte_flow_item_vlan)],
1804 sizeof(struct rte_flow_item_vlan));
1805 if (!ulp_rte_item_skip_void(&item, 1))
1806 return BNXT_TF_RC_ERROR;
1808 /* Update the vlan count and size of more than one */
1810 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1811 vlan_num = tfp_cpu_to_be_32(vlan_num);
1812 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1815 vlan_size = tfp_cpu_to_be_32(vlan_size);
1816 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1821 /* L3 must be IPv4, IPv6 */
1822 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1823 ipv4_spec = item->spec;
1824 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1826 /* copy the ipv4 details */
1827 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1828 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1829 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1830 ulp_encap_buffer_copy(buff,
1832 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1833 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1834 ULP_BUFFER_ALIGN_8_BYTE);
1836 /* Total length being ignored in the ip hdr. */
1837 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1838 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1839 ulp_encap_buffer_copy(buff,
1841 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1842 ULP_BUFFER_ALIGN_8_BYTE);
1843 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1844 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1845 ulp_encap_buffer_copy(buff,
1846 &ipv4_spec->hdr.version_ihl,
1847 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1848 ULP_BUFFER_ALIGN_8_BYTE);
1851 /* Update the dst ip address in ip encap buffer */
1852 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1853 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1854 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1855 ulp_encap_buffer_copy(buff,
1856 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1857 sizeof(ipv4_spec->hdr.dst_addr),
1858 ULP_BUFFER_ALIGN_8_BYTE);
1860 /* Update the src ip address */
1861 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1862 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1863 sizeof(ipv4_spec->hdr.src_addr)];
1864 ulp_encap_buffer_copy(buff,
1865 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1866 sizeof(ipv4_spec->hdr.src_addr),
1867 ULP_BUFFER_ALIGN_8_BYTE);
1869 /* Update the ip size details */
1870 ip_size = tfp_cpu_to_be_32(ip_size);
1871 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1872 &ip_size, sizeof(uint32_t));
1874 /* update the ip type */
1875 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1876 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1877 &ip_type, sizeof(uint32_t));
1879 /* update the computed field to notify it is ipv4 header */
1880 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1883 if (!ulp_rte_item_skip_void(&item, 1))
1884 return BNXT_TF_RC_ERROR;
1885 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1886 ipv6_spec = item->spec;
1887 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1889 /* copy the ipv6 details */
1890 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1891 if (ulp_buffer_is_empty(tmp_buff,
1892 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1893 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1894 ulp_encap_buffer_copy(buff,
1896 sizeof(def_ipv6_hdr),
1897 ULP_BUFFER_ALIGN_8_BYTE);
1899 /* The payload length being ignored in the ip hdr. */
1900 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1901 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1902 ulp_encap_buffer_copy(buff,
1904 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1905 ULP_BUFFER_ALIGN_8_BYTE);
1906 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1907 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1908 BNXT_ULP_ENCAP_IPV6_DO];
1909 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1910 ulp_encap_buffer_copy(buff,
1912 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1913 ULP_BUFFER_ALIGN_8_BYTE);
1915 /* Update the dst ip address in ip encap buffer */
1916 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1917 sizeof(def_ipv6_hdr)];
1918 ulp_encap_buffer_copy(buff,
1919 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1920 sizeof(ipv6_spec->hdr.dst_addr),
1921 ULP_BUFFER_ALIGN_8_BYTE);
1923 /* Update the src ip address */
1924 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1925 ulp_encap_buffer_copy(buff,
1926 (const uint8_t *)ipv6_spec->hdr.src_addr,
1927 sizeof(ipv6_spec->hdr.src_addr),
1928 ULP_BUFFER_ALIGN_16_BYTE);
1930 /* Update the ip size details */
1931 ip_size = tfp_cpu_to_be_32(ip_size);
1932 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1933 &ip_size, sizeof(uint32_t));
1935 /* update the ip type */
1936 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1937 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1938 &ip_type, sizeof(uint32_t));
1940 /* update the computed field to notify it is ipv6 header */
1941 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1944 if (!ulp_rte_item_skip_void(&item, 1))
1945 return BNXT_TF_RC_ERROR;
1947 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1948 return BNXT_TF_RC_ERROR;
1952 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1953 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1954 return BNXT_TF_RC_ERROR;
1956 /* copy the udp details */
1957 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1958 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1959 ULP_BUFFER_ALIGN_8_BYTE);
1961 if (!ulp_rte_item_skip_void(&item, 1))
1962 return BNXT_TF_RC_ERROR;
1965 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1966 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1967 return BNXT_TF_RC_ERROR;
1969 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1970 /* copy the vxlan details */
1971 memcpy(&vxlan_spec, item->spec, vxlan_size);
1972 vxlan_spec.flags = 0x08;
1973 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1974 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1975 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1976 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1978 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1979 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1980 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1981 (const uint8_t *)&vxlan_spec.vni,
1982 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1984 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1985 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1986 &vxlan_size, sizeof(uint32_t));
1988 /* update the hdr_bitmap with vxlan */
1989 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1990 return BNXT_TF_RC_SUCCESS;
1993 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1995 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1997 struct ulp_rte_parser_params *params)
1999 /* update the hdr_bitmap with vxlan */
2000 ULP_BITMAP_SET(params->act_bitmap.bits,
2001 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2002 /* Update computational field with tunnel decap info */
2003 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2005 return BNXT_TF_RC_SUCCESS;
2008 /* Function to handle the parsing of RTE Flow action drop Header. */
2010 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2011 struct ulp_rte_parser_params *params)
2013 /* Update the hdr_bitmap with drop */
2014 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2015 return BNXT_TF_RC_SUCCESS;
2018 /* Function to handle the parsing of RTE Flow action count. */
2020 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2021 struct ulp_rte_parser_params *params)
2023 const struct rte_flow_action_count *act_count;
2024 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2026 act_count = action_item->conf;
2028 if (act_count->shared) {
2030 "Parse Error:Shared count not supported\n");
2031 return BNXT_TF_RC_PARSE_ERR;
2033 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2035 BNXT_ULP_ACT_PROP_SZ_COUNT);
2038 /* Update the hdr_bitmap with count */
2039 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2040 return BNXT_TF_RC_SUCCESS;
2043 /* Function to handle the parsing of action ports. */
2045 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2048 enum bnxt_ulp_direction_type dir;
2051 struct ulp_rte_act_prop *act = ¶m->act_prop;
2052 enum bnxt_ulp_intf_type port_type;
2055 /* Get the direction */
2056 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2057 if (dir == BNXT_ULP_DIR_EGRESS) {
2058 /* For egress direction, fill vport */
2059 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2060 return BNXT_TF_RC_ERROR;
2063 pid = rte_cpu_to_be_32(pid);
2064 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2065 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2067 /* For ingress direction, fill vnic */
2068 port_type = ULP_COMP_FLD_IDX_RD(param,
2069 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2070 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2071 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2073 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2075 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2077 return BNXT_TF_RC_ERROR;
2080 pid = rte_cpu_to_be_32(pid);
2081 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2082 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2085 /* Update the action port set bit */
2086 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2087 return BNXT_TF_RC_SUCCESS;
2090 /* Function to handle the parsing of RTE Flow action PF. */
2092 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2093 struct ulp_rte_parser_params *params)
2097 enum bnxt_ulp_intf_type intf_type;
2099 /* Get the port id of the current device */
2100 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2102 /* Get the port db ifindex */
2103 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2105 BNXT_TF_DBG(ERR, "Invalid port id\n");
2106 return BNXT_TF_RC_ERROR;
2109 /* Check the port is PF port */
2110 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2111 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2112 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2113 return BNXT_TF_RC_ERROR;
2115 /* Update the action properties */
2116 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2117 return ulp_rte_parser_act_port_set(params, ifindex);
2120 /* Function to handle the parsing of RTE Flow action VF. */
2122 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2123 struct ulp_rte_parser_params *params)
2125 const struct rte_flow_action_vf *vf_action;
2126 enum bnxt_ulp_intf_type intf_type;
2130 vf_action = action_item->conf;
2132 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2133 return BNXT_TF_RC_PARSE_ERR;
2136 if (vf_action->original) {
2137 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2138 return BNXT_TF_RC_PARSE_ERR;
2141 bp = bnxt_get_bp(params->port_id);
2143 BNXT_TF_DBG(ERR, "Invalid bp\n");
2144 return BNXT_TF_RC_ERROR;
2147 /* vf_action->id is a logical number which in this case is an
2148 * offset from the first VF. So, to get the absolute VF id, the
2149 * offset must be added to the absolute first vf id of that port.
2151 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2155 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2156 return BNXT_TF_RC_ERROR;
2158 /* Check the port is VF port */
2159 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2160 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2161 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2162 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2163 return BNXT_TF_RC_ERROR;
2166 /* Update the action properties */
2167 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2168 return ulp_rte_parser_act_port_set(params, ifindex);
2171 /* Function to handle the parsing of RTE Flow action port_id. */
2173 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2174 struct ulp_rte_parser_params *param)
2176 const struct rte_flow_action_port_id *port_id = act_item->conf;
2178 enum bnxt_ulp_intf_type intf_type;
2182 "ParseErr: Invalid Argument\n");
2183 return BNXT_TF_RC_PARSE_ERR;
2185 if (port_id->original) {
2187 "ParseErr:Portid Original not supported\n");
2188 return BNXT_TF_RC_PARSE_ERR;
2191 /* Get the port db ifindex */
2192 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2194 BNXT_TF_DBG(ERR, "Invalid port id\n");
2195 return BNXT_TF_RC_ERROR;
2198 /* Get the intf type */
2199 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2201 BNXT_TF_DBG(ERR, "Invalid port type\n");
2202 return BNXT_TF_RC_ERROR;
2205 /* Set the action port */
2206 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2207 return ulp_rte_parser_act_port_set(param, ifindex);
2210 /* Function to handle the parsing of RTE Flow action phy_port. */
2212 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2213 struct ulp_rte_parser_params *prm)
2215 const struct rte_flow_action_phy_port *phy_port;
2219 enum bnxt_ulp_direction_type dir;
2221 phy_port = action_item->conf;
2224 "ParseErr: Invalid Argument\n");
2225 return BNXT_TF_RC_PARSE_ERR;
2228 if (phy_port->original) {
2230 "Parse Err:Port Original not supported\n");
2231 return BNXT_TF_RC_PARSE_ERR;
2233 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2234 if (dir != BNXT_ULP_DIR_EGRESS) {
2236 "Parse Err:Phy ports are valid only for egress\n");
2237 return BNXT_TF_RC_PARSE_ERR;
2239 /* Get the physical port details from port db */
2240 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2243 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2248 pid = rte_cpu_to_be_32(pid);
2249 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2250 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2252 /* Update the action port set bit */
2253 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2254 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2255 BNXT_ULP_INTF_TYPE_PHY_PORT);
2256 return BNXT_TF_RC_SUCCESS;
2259 /* Function to handle the parsing of RTE Flow action pop vlan. */
2261 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2262 struct ulp_rte_parser_params *params)
2264 /* Update the act_bitmap with pop */
2265 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2266 return BNXT_TF_RC_SUCCESS;
2269 /* Function to handle the parsing of RTE Flow action push vlan. */
2271 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2272 struct ulp_rte_parser_params *params)
2274 const struct rte_flow_action_of_push_vlan *push_vlan;
2276 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2278 push_vlan = action_item->conf;
2280 ethertype = push_vlan->ethertype;
2281 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2283 "Parse Err: Ethertype not supported\n");
2284 return BNXT_TF_RC_PARSE_ERR;
2286 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2287 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2288 /* Update the hdr_bitmap with push vlan */
2289 ULP_BITMAP_SET(params->act_bitmap.bits,
2290 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2291 return BNXT_TF_RC_SUCCESS;
2293 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2294 return BNXT_TF_RC_ERROR;
2297 /* Function to handle the parsing of RTE Flow action set vlan id. */
2299 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2300 struct ulp_rte_parser_params *params)
2302 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2304 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2306 vlan_vid = action_item->conf;
2307 if (vlan_vid && vlan_vid->vlan_vid) {
2308 vid = vlan_vid->vlan_vid;
2309 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2310 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2311 /* Update the hdr_bitmap with vlan vid */
2312 ULP_BITMAP_SET(params->act_bitmap.bits,
2313 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2314 return BNXT_TF_RC_SUCCESS;
2316 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2317 return BNXT_TF_RC_ERROR;
2320 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2322 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2323 struct ulp_rte_parser_params *params)
2325 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2327 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2329 vlan_pcp = action_item->conf;
2331 pcp = vlan_pcp->vlan_pcp;
2332 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2333 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2334 /* Update the hdr_bitmap with vlan vid */
2335 ULP_BITMAP_SET(params->act_bitmap.bits,
2336 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2337 return BNXT_TF_RC_SUCCESS;
2339 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2340 return BNXT_TF_RC_ERROR;
2343 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2345 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2346 struct ulp_rte_parser_params *params)
2348 const struct rte_flow_action_set_ipv4 *set_ipv4;
2349 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2351 set_ipv4 = action_item->conf;
2353 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2354 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2355 /* Update the hdr_bitmap with set ipv4 src */
2356 ULP_BITMAP_SET(params->act_bitmap.bits,
2357 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2358 return BNXT_TF_RC_SUCCESS;
2360 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2361 return BNXT_TF_RC_ERROR;
2364 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2366 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2367 struct ulp_rte_parser_params *params)
2369 const struct rte_flow_action_set_ipv4 *set_ipv4;
2370 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2372 set_ipv4 = action_item->conf;
2374 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2375 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2376 /* Update the hdr_bitmap with set ipv4 dst */
2377 ULP_BITMAP_SET(params->act_bitmap.bits,
2378 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2379 return BNXT_TF_RC_SUCCESS;
2381 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2382 return BNXT_TF_RC_ERROR;
2385 /* Function to handle the parsing of RTE Flow action set tp src.*/
2387 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2388 struct ulp_rte_parser_params *params)
2390 const struct rte_flow_action_set_tp *set_tp;
2391 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2393 set_tp = action_item->conf;
2395 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2396 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2397 /* Update the hdr_bitmap with set tp src */
2398 ULP_BITMAP_SET(params->act_bitmap.bits,
2399 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2400 return BNXT_TF_RC_SUCCESS;
2403 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2404 return BNXT_TF_RC_ERROR;
2407 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2409 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2410 struct ulp_rte_parser_params *params)
2412 const struct rte_flow_action_set_tp *set_tp;
2413 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2415 set_tp = action_item->conf;
2417 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2418 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2419 /* Update the hdr_bitmap with set tp dst */
2420 ULP_BITMAP_SET(params->act_bitmap.bits,
2421 BNXT_ULP_ACT_BIT_SET_TP_DST);
2422 return BNXT_TF_RC_SUCCESS;
2425 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2426 return BNXT_TF_RC_ERROR;
2429 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2431 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2432 struct ulp_rte_parser_params *params)
2434 /* Update the act_bitmap with dec ttl */
2435 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2436 return BNXT_TF_RC_SUCCESS;
2439 /* Function to handle the parsing of RTE Flow action JUMP */
2441 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2442 struct ulp_rte_parser_params *params)
2444 /* Update the act_bitmap with dec ttl */
2445 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2446 return BNXT_TF_RC_SUCCESS;
2450 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2451 struct ulp_rte_parser_params *params)
2453 const struct rte_flow_action_sample *sample;
2456 sample = action_item->conf;
2458 /* if SAMPLE bit is set it means this sample action is nested within the
2459 * actions of another sample action; this is not allowed
2461 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2462 BNXT_ULP_ACT_BIT_SAMPLE))
2463 return BNXT_TF_RC_ERROR;
2465 /* a sample action is only allowed as a shared action */
2466 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2467 BNXT_ULP_ACT_BIT_SHARED))
2468 return BNXT_TF_RC_ERROR;
2470 /* only a ratio of 1 i.e. 100% is supported */
2471 if (sample->ratio != 1)
2472 return BNXT_TF_RC_ERROR;
2474 if (!sample->actions)
2475 return BNXT_TF_RC_ERROR;
2477 /* parse the nested actions for a sample action */
2478 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2479 if (ret == BNXT_TF_RC_SUCCESS)
2480 /* Update the act_bitmap with sample */
2481 ULP_BITMAP_SET(params->act_bitmap.bits,
2482 BNXT_ULP_ACT_BIT_SAMPLE);