1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the computed
163 * fields for the interface.
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
169 uint16_t port_id, parif;
171 enum bnxt_ulp_direction_type dir;
173 /* get the direction details */
174 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
176 /* read the port id details */
177 port_id = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_INCOMING_IF);
179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
186 if (dir == BNXT_ULP_DIR_INGRESS) {
188 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
189 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
190 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
196 /* Get the match port type */
197 mtype = ULP_COMP_FLD_IDX_RD(params,
198 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
199 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
200 ULP_COMP_FLD_IDX_WR(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
203 /* Set VF func PARIF */
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_VF_FUNC_PARIF,
208 "ParseErr:ifindex is not valid\n");
211 ULP_COMP_FLD_IDX_WR(params,
212 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
215 /* populate the loopback parif */
216 ULP_COMP_FLD_IDX_WR(params,
217 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
218 BNXT_ULP_SYM_VF_FUNC_PARIF);
221 /* Set DRV func PARIF */
222 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
223 BNXT_ULP_DRV_FUNC_PARIF,
226 "ParseErr:ifindex is not valid\n");
229 ULP_COMP_FLD_IDX_WR(params,
230 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
237 * Function to handle the post processing of the parsing details
240 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
242 enum bnxt_ulp_direction_type dir;
243 enum bnxt_ulp_intf_type match_port_type, act_port_type;
244 uint32_t act_port_set;
246 /* Get the computed details */
247 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
248 match_port_type = ULP_COMP_FLD_IDX_RD(params,
249 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
250 act_port_type = ULP_COMP_FLD_IDX_RD(params,
251 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
252 act_port_set = ULP_COMP_FLD_IDX_RD(params,
253 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
255 /* set the flow direction in the proto and action header */
256 if (dir == BNXT_ULP_DIR_EGRESS) {
257 ULP_BITMAP_SET(params->hdr_bitmap.bits,
258 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
259 ULP_BITMAP_SET(params->act_bitmap.bits,
260 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
263 /* calculate the VF to VF flag */
264 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
265 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
266 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
268 /* Update the decrement ttl computational fields */
269 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
270 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
272 * Check that vxlan proto is included and vxlan decap
273 * action is not set then decrement tunnel ttl.
274 * Similarly add GRE and NVGRE in future.
276 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
277 BNXT_ULP_HDR_BIT_T_VXLAN) &&
278 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
279 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
280 ULP_COMP_FLD_IDX_WR(params,
281 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
283 ULP_COMP_FLD_IDX_WR(params,
284 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
288 /* Merge the hdr_fp_bit into the proto header bit */
289 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
291 /* Update the computed interface parameters */
292 bnxt_ulp_comp_fld_intf_update(params);
294 /* TBD: Handle the flow rejection scenarios */
299 * Function to compute the flow direction based on the match port details
302 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
304 enum bnxt_ulp_intf_type match_port_type;
306 /* Get the match port type */
307 match_port_type = ULP_COMP_FLD_IDX_RD(params,
308 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
310 /* If ingress flow and matchport is vf rep then dir is egress*/
311 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
312 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
314 BNXT_ULP_DIR_EGRESS);
316 /* Assign the input direction */
317 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
318 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
319 BNXT_ULP_DIR_INGRESS);
321 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
322 BNXT_ULP_DIR_EGRESS);
326 /* Function to handle the parsing of RTE Flow item PF Header. */
328 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
333 enum bnxt_ulp_direction_type dir;
334 struct ulp_rte_hdr_field *hdr_field;
335 enum bnxt_ulp_svif_type svif_type;
336 enum bnxt_ulp_intf_type port_type;
338 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
339 BNXT_ULP_INVALID_SVIF_VAL) {
341 "SVIF already set,multiple source not support'd\n");
342 return BNXT_TF_RC_ERROR;
345 /* Get port type details */
346 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
347 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
348 BNXT_TF_DBG(ERR, "Invalid port type\n");
349 return BNXT_TF_RC_ERROR;
352 /* Update the match port type */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
355 /* compute the direction */
356 bnxt_ulp_rte_parser_direction_compute(params);
358 /* Get the computed direction */
359 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
360 if (dir == BNXT_ULP_DIR_INGRESS) {
361 svif_type = BNXT_ULP_PHY_PORT_SVIF;
363 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
364 svif_type = BNXT_ULP_VF_FUNC_SVIF;
366 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
368 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
370 svif = rte_cpu_to_be_16(svif);
371 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
372 memcpy(hdr_field->spec, &svif, sizeof(svif));
373 memcpy(hdr_field->mask, &mask, sizeof(mask));
374 hdr_field->size = sizeof(svif);
375 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
376 rte_be_to_cpu_16(svif));
377 return BNXT_TF_RC_SUCCESS;
380 /* Function to handle the parsing of the RTE port id */
382 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
384 uint16_t port_id = 0;
385 uint16_t svif_mask = 0xFFFF;
387 int32_t rc = BNXT_TF_RC_ERROR;
389 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
390 BNXT_ULP_INVALID_SVIF_VAL)
391 return BNXT_TF_RC_SUCCESS;
393 /* SVIF not set. So get the port id */
394 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
396 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
399 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
403 /* Update the SVIF details */
404 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
408 /* Function to handle the implicit action port id */
410 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
412 struct rte_flow_action action_item = {0};
413 struct rte_flow_action_port_id port_id = {0};
415 /* Read the action port set bit */
416 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
417 /* Already set, so just exit */
418 return BNXT_TF_RC_SUCCESS;
420 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
421 action_item.conf = &port_id;
423 /* Update the action port based on incoming port */
424 ulp_rte_port_id_act_handler(&action_item, params);
426 /* Reset the action port set bit */
427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
428 return BNXT_TF_RC_SUCCESS;
431 /* Function to handle the parsing of RTE Flow item PF Header. */
433 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
434 struct ulp_rte_parser_params *params)
436 uint16_t port_id = 0;
437 uint16_t svif_mask = 0xFFFF;
440 /* Get the implicit port id */
441 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443 /* perform the conversion from dpdk port to bnxt ifindex */
444 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
447 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
448 return BNXT_TF_RC_ERROR;
451 /* Update the SVIF details */
452 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
455 /* Function to handle the parsing of RTE Flow item VF Header. */
457 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
458 struct ulp_rte_parser_params *params)
460 const struct rte_flow_item_vf *vf_spec = item->spec;
461 const struct rte_flow_item_vf *vf_mask = item->mask;
464 int32_t rc = BNXT_TF_RC_PARSE_ERR;
466 /* Get VF rte_flow_item for Port details */
468 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
472 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
477 /* perform the conversion from VF Func id to bnxt ifindex */
478 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
481 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
484 /* Update the SVIF details */
485 return ulp_rte_parser_svif_set(params, ifindex, mask);
488 /* Function to handle the parsing of RTE Flow item port id Header. */
490 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
491 struct ulp_rte_parser_params *params)
493 const struct rte_flow_item_port_id *port_spec = item->spec;
494 const struct rte_flow_item_port_id *port_mask = item->mask;
496 int32_t rc = BNXT_TF_RC_PARSE_ERR;
500 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
504 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
507 mask = port_mask->id;
509 /* perform the conversion from dpdk port to bnxt ifindex */
510 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
513 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
516 /* Update the SVIF details */
517 return ulp_rte_parser_svif_set(params, ifindex, mask);
520 /* Function to handle the parsing of RTE Flow item phy port Header. */
522 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
523 struct ulp_rte_parser_params *params)
525 const struct rte_flow_item_phy_port *port_spec = item->spec;
526 const struct rte_flow_item_phy_port *port_mask = item->mask;
528 int32_t rc = BNXT_TF_RC_ERROR;
530 enum bnxt_ulp_direction_type dir;
531 struct ulp_rte_hdr_field *hdr_field;
533 /* Copy the rte_flow_item for phy port into hdr_field */
535 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
539 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
542 mask = port_mask->index;
544 /* Update the match port type */
545 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
546 BNXT_ULP_INTF_TYPE_PHY_PORT);
548 /* Compute the Hw direction */
549 bnxt_ulp_rte_parser_direction_compute(params);
551 /* Direction validation */
552 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
553 if (dir == BNXT_ULP_DIR_EGRESS) {
555 "Parse Err:Phy ports are valid only for ingress\n");
556 return BNXT_TF_RC_PARSE_ERR;
559 /* Get the physical port details from port db */
560 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
563 BNXT_TF_DBG(ERR, "Failed to get port details\n");
564 return BNXT_TF_RC_PARSE_ERR;
567 /* Update the SVIF details */
568 svif = rte_cpu_to_be_16(svif);
569 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
570 memcpy(hdr_field->spec, &svif, sizeof(svif));
571 memcpy(hdr_field->mask, &mask, sizeof(mask));
572 hdr_field->size = sizeof(svif);
573 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
574 rte_be_to_cpu_16(svif));
575 return BNXT_TF_RC_SUCCESS;
578 /* Function to handle the update of proto header based on field values */
580 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
581 uint16_t type, uint32_t in_flag)
583 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
585 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
586 BNXT_ULP_HDR_BIT_I_IPV4);
587 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
589 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
590 BNXT_ULP_HDR_BIT_O_IPV4);
591 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
593 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
595 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
596 BNXT_ULP_HDR_BIT_I_IPV6);
597 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
599 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
600 BNXT_ULP_HDR_BIT_O_IPV6);
601 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
606 /* Internal Function to identify broadcast or multicast packets */
608 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
610 if (rte_is_multicast_ether_addr(eth_addr) ||
611 rte_is_broadcast_ether_addr(eth_addr)) {
613 "No support for bcast or mcast addr offload\n");
619 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
621 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
622 struct ulp_rte_parser_params *params)
624 const struct rte_flow_item_eth *eth_spec = item->spec;
625 const struct rte_flow_item_eth *eth_mask = item->mask;
626 struct ulp_rte_hdr_field *field;
627 uint32_t idx = params->field_idx;
629 uint16_t eth_type = 0;
630 uint32_t inner_flag = 0;
633 * Copy the rte_flow_item for eth into hdr_field using ethernet
637 size = sizeof(eth_spec->dst.addr_bytes);
638 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
639 eth_spec->dst.addr_bytes,
641 /* Todo: work around to avoid multicast and broadcast addr */
642 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
643 return BNXT_TF_RC_PARSE_ERR;
645 size = sizeof(eth_spec->src.addr_bytes);
646 field = ulp_rte_parser_fld_copy(field,
647 eth_spec->src.addr_bytes,
649 /* Todo: work around to avoid multicast and broadcast addr */
650 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
651 return BNXT_TF_RC_PARSE_ERR;
653 field = ulp_rte_parser_fld_copy(field,
655 sizeof(eth_spec->type));
656 eth_type = eth_spec->type;
659 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
660 sizeof(eth_mask->dst.addr_bytes));
661 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
662 sizeof(eth_mask->src.addr_bytes));
663 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
664 sizeof(eth_mask->type));
666 /* Add number of vlan header elements */
667 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
668 params->vlan_idx = params->field_idx;
669 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
671 /* Update the protocol hdr bitmap */
672 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
673 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
676 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
678 /* Update the field protocol hdr bitmap */
679 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
681 return BNXT_TF_RC_SUCCESS;
684 /* Function to handle the parsing of RTE Flow item Vlan Header. */
686 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
687 struct ulp_rte_parser_params *params)
689 const struct rte_flow_item_vlan *vlan_spec = item->spec;
690 const struct rte_flow_item_vlan *vlan_mask = item->mask;
691 struct ulp_rte_hdr_field *field;
692 struct ulp_rte_hdr_bitmap *hdr_bit;
693 uint32_t idx = params->vlan_idx;
694 uint16_t vlan_tag, priority;
695 uint32_t outer_vtag_num;
696 uint32_t inner_vtag_num;
697 uint16_t eth_type = 0;
698 uint32_t inner_flag = 0;
701 * Copy the rte_flow_item for vlan into hdr_field using Vlan
705 vlan_tag = ntohs(vlan_spec->tci);
706 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
707 vlan_tag &= ULP_VLAN_TAG_MASK;
708 vlan_tag = htons(vlan_tag);
710 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
713 field = ulp_rte_parser_fld_copy(field,
716 field = ulp_rte_parser_fld_copy(field,
717 &vlan_spec->inner_type,
718 sizeof(vlan_spec->inner_type));
719 eth_type = vlan_spec->inner_type;
723 vlan_tag = ntohs(vlan_mask->tci);
724 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
728 * the storage for priority and vlan tag is 2 bytes
729 * The mask of priority which is 3 bits if it is all 1's
730 * then make the rest bits 13 bits as 1's
731 * so that it is matched as exact match.
733 if (priority == ULP_VLAN_PRIORITY_MASK)
734 priority |= ~ULP_VLAN_PRIORITY_MASK;
735 if (vlan_tag == ULP_VLAN_TAG_MASK)
736 vlan_tag |= ~ULP_VLAN_TAG_MASK;
737 vlan_tag = htons(vlan_tag);
740 * The priority field is ignored since OVS is setting it as
741 * wild card match and it is not supported. This is a work
742 * around and shall be addressed in the future.
746 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
748 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
749 sizeof(vlan_mask->inner_type));
751 /* Set the vlan index to new incremented value */
752 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
754 /* Get the outer tag and inner tag counts */
755 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
756 BNXT_ULP_CF_IDX_O_VTAG_NUM);
757 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
758 BNXT_ULP_CF_IDX_I_VTAG_NUM);
760 /* Update the hdr_bitmap of the vlans */
761 hdr_bit = ¶ms->hdr_bitmap;
762 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
763 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
765 /* Update the vlan tag num */
767 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
769 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
770 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
771 ULP_BITMAP_SET(params->hdr_bitmap.bits,
772 BNXT_ULP_HDR_BIT_OO_VLAN);
773 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
774 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
775 outer_vtag_num == 1) {
776 /* update the vlan tag num */
778 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
780 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
781 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
782 ULP_BITMAP_SET(params->hdr_bitmap.bits,
783 BNXT_ULP_HDR_BIT_OI_VLAN);
784 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
785 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
787 /* update the vlan tag num */
789 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
791 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
792 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
793 ULP_BITMAP_SET(params->hdr_bitmap.bits,
794 BNXT_ULP_HDR_BIT_IO_VLAN);
796 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
797 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
798 inner_vtag_num == 1) {
799 /* update the vlan tag num */
801 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
803 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
804 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
805 ULP_BITMAP_SET(params->hdr_bitmap.bits,
806 BNXT_ULP_HDR_BIT_II_VLAN);
809 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
810 return BNXT_TF_RC_ERROR;
812 /* Update the field protocol hdr bitmap */
813 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
814 return BNXT_TF_RC_SUCCESS;
817 /* Function to handle the update of proto header based on field values */
819 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
820 uint8_t proto, uint32_t in_flag)
822 if (proto == IPPROTO_UDP) {
824 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
825 BNXT_ULP_HDR_BIT_I_UDP);
826 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
828 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
829 BNXT_ULP_HDR_BIT_O_UDP);
830 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
832 } else if (proto == IPPROTO_TCP) {
834 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
835 BNXT_ULP_HDR_BIT_I_TCP);
836 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
838 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
839 BNXT_ULP_HDR_BIT_O_TCP);
840 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
845 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
847 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
848 struct ulp_rte_parser_params *params)
850 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
851 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
852 struct ulp_rte_hdr_field *field;
853 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
854 uint32_t idx = params->field_idx;
857 uint32_t inner_flag = 0;
860 /* validate there are no 3rd L3 header */
861 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
863 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
864 return BNXT_TF_RC_ERROR;
868 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
872 size = sizeof(ipv4_spec->hdr.version_ihl);
873 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
874 &ipv4_spec->hdr.version_ihl,
876 size = sizeof(ipv4_spec->hdr.type_of_service);
877 field = ulp_rte_parser_fld_copy(field,
878 &ipv4_spec->hdr.type_of_service,
880 size = sizeof(ipv4_spec->hdr.total_length);
881 field = ulp_rte_parser_fld_copy(field,
882 &ipv4_spec->hdr.total_length,
884 size = sizeof(ipv4_spec->hdr.packet_id);
885 field = ulp_rte_parser_fld_copy(field,
886 &ipv4_spec->hdr.packet_id,
888 size = sizeof(ipv4_spec->hdr.fragment_offset);
889 field = ulp_rte_parser_fld_copy(field,
890 &ipv4_spec->hdr.fragment_offset,
892 size = sizeof(ipv4_spec->hdr.time_to_live);
893 field = ulp_rte_parser_fld_copy(field,
894 &ipv4_spec->hdr.time_to_live,
896 size = sizeof(ipv4_spec->hdr.next_proto_id);
897 field = ulp_rte_parser_fld_copy(field,
898 &ipv4_spec->hdr.next_proto_id,
900 proto = ipv4_spec->hdr.next_proto_id;
901 size = sizeof(ipv4_spec->hdr.hdr_checksum);
902 field = ulp_rte_parser_fld_copy(field,
903 &ipv4_spec->hdr.hdr_checksum,
905 size = sizeof(ipv4_spec->hdr.src_addr);
906 field = ulp_rte_parser_fld_copy(field,
907 &ipv4_spec->hdr.src_addr,
909 size = sizeof(ipv4_spec->hdr.dst_addr);
910 field = ulp_rte_parser_fld_copy(field,
911 &ipv4_spec->hdr.dst_addr,
915 ulp_rte_prsr_mask_copy(params, &idx,
916 &ipv4_mask->hdr.version_ihl,
917 sizeof(ipv4_mask->hdr.version_ihl));
919 * The tos field is ignored since OVS is setting it as wild card
920 * match and it is not supported. This is a work around and
921 * shall be addressed in the future.
925 ulp_rte_prsr_mask_copy(params, &idx,
926 &ipv4_mask->hdr.total_length,
927 sizeof(ipv4_mask->hdr.total_length));
928 ulp_rte_prsr_mask_copy(params, &idx,
929 &ipv4_mask->hdr.packet_id,
930 sizeof(ipv4_mask->hdr.packet_id));
931 ulp_rte_prsr_mask_copy(params, &idx,
932 &ipv4_mask->hdr.fragment_offset,
933 sizeof(ipv4_mask->hdr.fragment_offset));
934 ulp_rte_prsr_mask_copy(params, &idx,
935 &ipv4_mask->hdr.time_to_live,
936 sizeof(ipv4_mask->hdr.time_to_live));
937 ulp_rte_prsr_mask_copy(params, &idx,
938 &ipv4_mask->hdr.next_proto_id,
939 sizeof(ipv4_mask->hdr.next_proto_id));
940 ulp_rte_prsr_mask_copy(params, &idx,
941 &ipv4_mask->hdr.hdr_checksum,
942 sizeof(ipv4_mask->hdr.hdr_checksum));
943 ulp_rte_prsr_mask_copy(params, &idx,
944 &ipv4_mask->hdr.src_addr,
945 sizeof(ipv4_mask->hdr.src_addr));
946 ulp_rte_prsr_mask_copy(params, &idx,
947 &ipv4_mask->hdr.dst_addr,
948 sizeof(ipv4_mask->hdr.dst_addr));
950 /* Add the number of ipv4 header elements */
951 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
953 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
954 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
955 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
956 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
957 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
960 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
961 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
964 /* Update the field protocol hdr bitmap */
965 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
966 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
967 return BNXT_TF_RC_SUCCESS;
970 /* Function to handle the parsing of RTE Flow item IPV6 Header */
972 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
973 struct ulp_rte_parser_params *params)
975 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
976 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
977 struct ulp_rte_hdr_field *field;
978 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
979 uint32_t idx = params->field_idx;
981 uint32_t vtcf, vtcf_mask;
983 uint32_t inner_flag = 0;
986 /* validate there are no 3rd L3 header */
987 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
989 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
990 return BNXT_TF_RC_ERROR;
994 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
998 size = sizeof(ipv6_spec->hdr.vtc_flow);
1000 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1001 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1005 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1006 field = ulp_rte_parser_fld_copy(field,
1010 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1011 field = ulp_rte_parser_fld_copy(field,
1015 size = sizeof(ipv6_spec->hdr.payload_len);
1016 field = ulp_rte_parser_fld_copy(field,
1017 &ipv6_spec->hdr.payload_len,
1019 size = sizeof(ipv6_spec->hdr.proto);
1020 field = ulp_rte_parser_fld_copy(field,
1021 &ipv6_spec->hdr.proto,
1023 proto = ipv6_spec->hdr.proto;
1024 size = sizeof(ipv6_spec->hdr.hop_limits);
1025 field = ulp_rte_parser_fld_copy(field,
1026 &ipv6_spec->hdr.hop_limits,
1028 size = sizeof(ipv6_spec->hdr.src_addr);
1029 field = ulp_rte_parser_fld_copy(field,
1030 &ipv6_spec->hdr.src_addr,
1032 size = sizeof(ipv6_spec->hdr.dst_addr);
1033 field = ulp_rte_parser_fld_copy(field,
1034 &ipv6_spec->hdr.dst_addr,
1038 size = sizeof(ipv6_mask->hdr.vtc_flow);
1040 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1041 ulp_rte_prsr_mask_copy(params, &idx,
1045 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1046 ulp_rte_prsr_mask_copy(params, &idx,
1051 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1052 ulp_rte_prsr_mask_copy(params, &idx,
1056 ulp_rte_prsr_mask_copy(params, &idx,
1057 &ipv6_mask->hdr.payload_len,
1058 sizeof(ipv6_mask->hdr.payload_len));
1059 ulp_rte_prsr_mask_copy(params, &idx,
1060 &ipv6_mask->hdr.proto,
1061 sizeof(ipv6_mask->hdr.proto));
1062 ulp_rte_prsr_mask_copy(params, &idx,
1063 &ipv6_mask->hdr.hop_limits,
1064 sizeof(ipv6_mask->hdr.hop_limits));
1065 ulp_rte_prsr_mask_copy(params, &idx,
1066 &ipv6_mask->hdr.src_addr,
1067 sizeof(ipv6_mask->hdr.src_addr));
1068 ulp_rte_prsr_mask_copy(params, &idx,
1069 &ipv6_mask->hdr.dst_addr,
1070 sizeof(ipv6_mask->hdr.dst_addr));
1072 /* add number of ipv6 header elements */
1073 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1075 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1076 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1077 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1078 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1079 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1082 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1083 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1086 /* Update the field protocol hdr bitmap */
1087 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1090 return BNXT_TF_RC_SUCCESS;
1093 /* Function to handle the update of proto header based on field values */
1095 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1098 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1099 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1100 BNXT_ULP_HDR_BIT_T_VXLAN);
1103 /* Function to handle the parsing of RTE Flow item UDP Header. */
1105 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1106 struct ulp_rte_parser_params *params)
1108 const struct rte_flow_item_udp *udp_spec = item->spec;
1109 const struct rte_flow_item_udp *udp_mask = item->mask;
1110 struct ulp_rte_hdr_field *field;
1111 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1112 uint32_t idx = params->field_idx;
1114 uint16_t dst_port = 0;
1117 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1119 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1120 return BNXT_TF_RC_ERROR;
1124 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1128 size = sizeof(udp_spec->hdr.src_port);
1129 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1130 &udp_spec->hdr.src_port,
1132 size = sizeof(udp_spec->hdr.dst_port);
1133 field = ulp_rte_parser_fld_copy(field,
1134 &udp_spec->hdr.dst_port,
1136 dst_port = udp_spec->hdr.dst_port;
1137 size = sizeof(udp_spec->hdr.dgram_len);
1138 field = ulp_rte_parser_fld_copy(field,
1139 &udp_spec->hdr.dgram_len,
1141 size = sizeof(udp_spec->hdr.dgram_cksum);
1142 field = ulp_rte_parser_fld_copy(field,
1143 &udp_spec->hdr.dgram_cksum,
1147 ulp_rte_prsr_mask_copy(params, &idx,
1148 &udp_mask->hdr.src_port,
1149 sizeof(udp_mask->hdr.src_port));
1150 ulp_rte_prsr_mask_copy(params, &idx,
1151 &udp_mask->hdr.dst_port,
1152 sizeof(udp_mask->hdr.dst_port));
1153 ulp_rte_prsr_mask_copy(params, &idx,
1154 &udp_mask->hdr.dgram_len,
1155 sizeof(udp_mask->hdr.dgram_len));
1156 ulp_rte_prsr_mask_copy(params, &idx,
1157 &udp_mask->hdr.dgram_cksum,
1158 sizeof(udp_mask->hdr.dgram_cksum));
1161 /* Add number of UDP header elements */
1162 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1164 /* Set the udp header bitmap and computed l4 header bitmaps */
1165 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1166 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1167 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1168 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1170 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1171 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1172 /* Update the field protocol hdr bitmap */
1173 ulp_rte_l4_proto_type_update(params, dst_port);
1175 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1176 return BNXT_TF_RC_SUCCESS;
1179 /* Function to handle the parsing of RTE Flow item TCP Header. */
1181 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1182 struct ulp_rte_parser_params *params)
1184 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1185 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1186 struct ulp_rte_hdr_field *field;
1187 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1188 uint32_t idx = params->field_idx;
1192 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1194 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1195 return BNXT_TF_RC_ERROR;
1199 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1203 size = sizeof(tcp_spec->hdr.src_port);
1204 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1205 &tcp_spec->hdr.src_port,
1207 size = sizeof(tcp_spec->hdr.dst_port);
1208 field = ulp_rte_parser_fld_copy(field,
1209 &tcp_spec->hdr.dst_port,
1211 size = sizeof(tcp_spec->hdr.sent_seq);
1212 field = ulp_rte_parser_fld_copy(field,
1213 &tcp_spec->hdr.sent_seq,
1215 size = sizeof(tcp_spec->hdr.recv_ack);
1216 field = ulp_rte_parser_fld_copy(field,
1217 &tcp_spec->hdr.recv_ack,
1219 size = sizeof(tcp_spec->hdr.data_off);
1220 field = ulp_rte_parser_fld_copy(field,
1221 &tcp_spec->hdr.data_off,
1223 size = sizeof(tcp_spec->hdr.tcp_flags);
1224 field = ulp_rte_parser_fld_copy(field,
1225 &tcp_spec->hdr.tcp_flags,
1227 size = sizeof(tcp_spec->hdr.rx_win);
1228 field = ulp_rte_parser_fld_copy(field,
1229 &tcp_spec->hdr.rx_win,
1231 size = sizeof(tcp_spec->hdr.cksum);
1232 field = ulp_rte_parser_fld_copy(field,
1233 &tcp_spec->hdr.cksum,
1235 size = sizeof(tcp_spec->hdr.tcp_urp);
1236 field = ulp_rte_parser_fld_copy(field,
1237 &tcp_spec->hdr.tcp_urp,
1240 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1244 ulp_rte_prsr_mask_copy(params, &idx,
1245 &tcp_mask->hdr.src_port,
1246 sizeof(tcp_mask->hdr.src_port));
1247 ulp_rte_prsr_mask_copy(params, &idx,
1248 &tcp_mask->hdr.dst_port,
1249 sizeof(tcp_mask->hdr.dst_port));
1250 ulp_rte_prsr_mask_copy(params, &idx,
1251 &tcp_mask->hdr.sent_seq,
1252 sizeof(tcp_mask->hdr.sent_seq));
1253 ulp_rte_prsr_mask_copy(params, &idx,
1254 &tcp_mask->hdr.recv_ack,
1255 sizeof(tcp_mask->hdr.recv_ack));
1256 ulp_rte_prsr_mask_copy(params, &idx,
1257 &tcp_mask->hdr.data_off,
1258 sizeof(tcp_mask->hdr.data_off));
1259 ulp_rte_prsr_mask_copy(params, &idx,
1260 &tcp_mask->hdr.tcp_flags,
1261 sizeof(tcp_mask->hdr.tcp_flags));
1262 ulp_rte_prsr_mask_copy(params, &idx,
1263 &tcp_mask->hdr.rx_win,
1264 sizeof(tcp_mask->hdr.rx_win));
1265 ulp_rte_prsr_mask_copy(params, &idx,
1266 &tcp_mask->hdr.cksum,
1267 sizeof(tcp_mask->hdr.cksum));
1268 ulp_rte_prsr_mask_copy(params, &idx,
1269 &tcp_mask->hdr.tcp_urp,
1270 sizeof(tcp_mask->hdr.tcp_urp));
1272 /* add number of TCP header elements */
1273 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1275 /* Set the udp header bitmap and computed l4 header bitmaps */
1276 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1277 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1278 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1281 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1282 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1284 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1285 return BNXT_TF_RC_SUCCESS;
1288 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1290 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1291 struct ulp_rte_parser_params *params)
1293 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1294 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1295 struct ulp_rte_hdr_field *field;
1296 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1297 uint32_t idx = params->field_idx;
1301 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1305 size = sizeof(vxlan_spec->flags);
1306 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1309 size = sizeof(vxlan_spec->rsvd0);
1310 field = ulp_rte_parser_fld_copy(field,
1313 size = sizeof(vxlan_spec->vni);
1314 field = ulp_rte_parser_fld_copy(field,
1317 size = sizeof(vxlan_spec->rsvd1);
1318 field = ulp_rte_parser_fld_copy(field,
1323 ulp_rte_prsr_mask_copy(params, &idx,
1325 sizeof(vxlan_mask->flags));
1326 ulp_rte_prsr_mask_copy(params, &idx,
1328 sizeof(vxlan_mask->rsvd0));
1329 ulp_rte_prsr_mask_copy(params, &idx,
1331 sizeof(vxlan_mask->vni));
1332 ulp_rte_prsr_mask_copy(params, &idx,
1334 sizeof(vxlan_mask->rsvd1));
1336 /* Add number of vxlan header elements */
1337 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1339 /* Update the hdr_bitmap with vxlan */
1340 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1341 return BNXT_TF_RC_SUCCESS;
1344 /* Function to handle the parsing of RTE Flow item void Header */
1346 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1347 struct ulp_rte_parser_params *params __rte_unused)
1349 return BNXT_TF_RC_SUCCESS;
1352 /* Function to handle the parsing of RTE Flow action void Header. */
1354 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1355 struct ulp_rte_parser_params *params __rte_unused)
1357 return BNXT_TF_RC_SUCCESS;
1360 /* Function to handle the parsing of RTE Flow action Mark Header. */
1362 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1363 struct ulp_rte_parser_params *param)
1365 const struct rte_flow_action_mark *mark;
1366 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1369 mark = action_item->conf;
1371 mark_id = tfp_cpu_to_be_32(mark->id);
1372 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1373 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1375 /* Update the hdr_bitmap with vxlan */
1376 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1377 return BNXT_TF_RC_SUCCESS;
1379 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1380 return BNXT_TF_RC_ERROR;
1383 /* Function to handle the parsing of RTE Flow action RSS Header. */
1385 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1386 struct ulp_rte_parser_params *param)
1388 const struct rte_flow_action_rss *rss = action_item->conf;
1391 /* Update the hdr_bitmap with vxlan */
1392 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1393 return BNXT_TF_RC_SUCCESS;
1395 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1396 return BNXT_TF_RC_ERROR;
1399 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1401 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1402 struct ulp_rte_parser_params *params)
1404 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1405 const struct rte_flow_item *item;
1406 const struct rte_flow_item_eth *eth_spec;
1407 const struct rte_flow_item_ipv4 *ipv4_spec;
1408 const struct rte_flow_item_ipv6 *ipv6_spec;
1409 struct rte_flow_item_vxlan vxlan_spec;
1410 uint32_t vlan_num = 0, vlan_size = 0;
1411 uint32_t ip_size = 0, ip_type = 0;
1412 uint32_t vxlan_size = 0;
1414 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1415 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1417 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1418 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1420 vxlan_encap = action_item->conf;
1422 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1423 return BNXT_TF_RC_ERROR;
1426 item = vxlan_encap->definition;
1428 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1429 return BNXT_TF_RC_ERROR;
1432 if (!ulp_rte_item_skip_void(&item, 0))
1433 return BNXT_TF_RC_ERROR;
1435 /* must have ethernet header */
1436 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1437 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1438 return BNXT_TF_RC_ERROR;
1440 eth_spec = item->spec;
1441 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1442 ulp_encap_buffer_copy(buff,
1443 eth_spec->dst.addr_bytes,
1444 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1446 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1447 ulp_encap_buffer_copy(buff,
1448 eth_spec->src.addr_bytes,
1449 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1451 /* Goto the next item */
1452 if (!ulp_rte_item_skip_void(&item, 1))
1453 return BNXT_TF_RC_ERROR;
1455 /* May have vlan header */
1456 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1458 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1459 ulp_encap_buffer_copy(buff,
1461 sizeof(struct rte_flow_item_vlan));
1463 if (!ulp_rte_item_skip_void(&item, 1))
1464 return BNXT_TF_RC_ERROR;
1467 /* may have two vlan headers */
1468 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1470 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1471 sizeof(struct rte_flow_item_vlan)],
1473 sizeof(struct rte_flow_item_vlan));
1474 if (!ulp_rte_item_skip_void(&item, 1))
1475 return BNXT_TF_RC_ERROR;
1477 /* Update the vlan count and size of more than one */
1479 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1480 vlan_num = tfp_cpu_to_be_32(vlan_num);
1481 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1484 vlan_size = tfp_cpu_to_be_32(vlan_size);
1485 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1490 /* L3 must be IPv4, IPv6 */
1491 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1492 ipv4_spec = item->spec;
1493 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1495 /* copy the ipv4 details */
1496 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1497 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1498 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1499 ulp_encap_buffer_copy(buff,
1501 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1502 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1504 const uint8_t *tmp_buff;
1506 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1507 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1508 ulp_encap_buffer_copy(buff,
1510 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1511 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1512 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1513 ulp_encap_buffer_copy(buff,
1514 &ipv4_spec->hdr.version_ihl,
1515 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1517 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1518 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1519 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1520 ulp_encap_buffer_copy(buff,
1521 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1522 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1524 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1525 ulp_encap_buffer_copy(buff,
1526 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1527 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1529 /* Update the ip size details */
1530 ip_size = tfp_cpu_to_be_32(ip_size);
1531 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1532 &ip_size, sizeof(uint32_t));
1534 /* update the ip type */
1535 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1536 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1537 &ip_type, sizeof(uint32_t));
1539 /* update the computed field to notify it is ipv4 header */
1540 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1543 if (!ulp_rte_item_skip_void(&item, 1))
1544 return BNXT_TF_RC_ERROR;
1545 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1546 ipv6_spec = item->spec;
1547 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1549 /* copy the ipv4 details */
1550 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1551 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1553 /* Update the ip size details */
1554 ip_size = tfp_cpu_to_be_32(ip_size);
1555 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1556 &ip_size, sizeof(uint32_t));
1558 /* update the ip type */
1559 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1560 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1561 &ip_type, sizeof(uint32_t));
1563 /* update the computed field to notify it is ipv6 header */
1564 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1567 if (!ulp_rte_item_skip_void(&item, 1))
1568 return BNXT_TF_RC_ERROR;
1570 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1571 return BNXT_TF_RC_ERROR;
1575 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1576 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1577 return BNXT_TF_RC_ERROR;
1579 /* copy the udp details */
1580 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1581 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1583 if (!ulp_rte_item_skip_void(&item, 1))
1584 return BNXT_TF_RC_ERROR;
1587 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1588 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1589 return BNXT_TF_RC_ERROR;
1591 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1592 /* copy the vxlan details */
1593 memcpy(&vxlan_spec, item->spec, vxlan_size);
1594 vxlan_spec.flags = 0x08;
1595 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1596 (const uint8_t *)&vxlan_spec,
1598 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1599 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1600 &vxlan_size, sizeof(uint32_t));
1602 /* update the hdr_bitmap with vxlan */
1603 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1604 return BNXT_TF_RC_SUCCESS;
1607 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1609 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1611 struct ulp_rte_parser_params *params)
1613 /* update the hdr_bitmap with vxlan */
1614 ULP_BITMAP_SET(params->act_bitmap.bits,
1615 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1616 return BNXT_TF_RC_SUCCESS;
1619 /* Function to handle the parsing of RTE Flow action drop Header. */
1621 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1622 struct ulp_rte_parser_params *params)
1624 /* Update the hdr_bitmap with drop */
1625 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1626 return BNXT_TF_RC_SUCCESS;
1629 /* Function to handle the parsing of RTE Flow action count. */
1631 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1632 struct ulp_rte_parser_params *params)
1635 const struct rte_flow_action_count *act_count;
1636 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1638 act_count = action_item->conf;
1640 if (act_count->shared) {
1642 "Parse Error:Shared count not supported\n");
1643 return BNXT_TF_RC_PARSE_ERR;
1645 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1647 BNXT_ULP_ACT_PROP_SZ_COUNT);
1650 /* Update the hdr_bitmap with count */
1651 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1652 return BNXT_TF_RC_SUCCESS;
1655 /* Function to handle the parsing of action ports. */
1657 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1660 enum bnxt_ulp_direction_type dir;
1663 struct ulp_rte_act_prop *act = ¶m->act_prop;
1664 enum bnxt_ulp_intf_type port_type;
1667 /* Get the direction */
1668 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1669 if (dir == BNXT_ULP_DIR_EGRESS) {
1670 /* For egress direction, fill vport */
1671 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1672 return BNXT_TF_RC_ERROR;
1675 pid = rte_cpu_to_be_32(pid);
1676 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1677 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1679 /* For ingress direction, fill vnic */
1680 port_type = ULP_COMP_FLD_IDX_RD(param,
1681 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1682 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1683 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1685 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1687 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1689 return BNXT_TF_RC_ERROR;
1692 pid = rte_cpu_to_be_32(pid);
1693 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1694 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1697 /* Update the action port set bit */
1698 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1699 return BNXT_TF_RC_SUCCESS;
1702 /* Function to handle the parsing of RTE Flow action PF. */
1704 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1705 struct ulp_rte_parser_params *params)
1709 enum bnxt_ulp_intf_type intf_type;
1711 /* Get the port id of the current device */
1712 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1714 /* Get the port db ifindex */
1715 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1717 BNXT_TF_DBG(ERR, "Invalid port id\n");
1718 return BNXT_TF_RC_ERROR;
1721 /* Check the port is PF port */
1722 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1723 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1724 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1725 return BNXT_TF_RC_ERROR;
1727 /* Update the action properties */
1728 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1729 return ulp_rte_parser_act_port_set(params, ifindex);
1732 /* Function to handle the parsing of RTE Flow action VF. */
1734 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1735 struct ulp_rte_parser_params *params)
1737 const struct rte_flow_action_vf *vf_action;
1739 enum bnxt_ulp_intf_type intf_type;
1741 vf_action = action_item->conf;
1743 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1744 return BNXT_TF_RC_PARSE_ERR;
1747 if (vf_action->original) {
1748 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1749 return BNXT_TF_RC_PARSE_ERR;
1752 /* Check the port is VF port */
1753 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1755 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1756 return BNXT_TF_RC_ERROR;
1758 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1759 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1760 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1761 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1762 return BNXT_TF_RC_ERROR;
1765 /* Update the action properties */
1766 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1767 return ulp_rte_parser_act_port_set(params, ifindex);
1770 /* Function to handle the parsing of RTE Flow action port_id. */
1772 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1773 struct ulp_rte_parser_params *param)
1775 const struct rte_flow_action_port_id *port_id = act_item->conf;
1777 enum bnxt_ulp_intf_type intf_type;
1781 "ParseErr: Invalid Argument\n");
1782 return BNXT_TF_RC_PARSE_ERR;
1784 if (port_id->original) {
1786 "ParseErr:Portid Original not supported\n");
1787 return BNXT_TF_RC_PARSE_ERR;
1790 /* Get the port db ifindex */
1791 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1793 BNXT_TF_DBG(ERR, "Invalid port id\n");
1794 return BNXT_TF_RC_ERROR;
1797 /* Get the intf type */
1798 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1800 BNXT_TF_DBG(ERR, "Invalid port type\n");
1801 return BNXT_TF_RC_ERROR;
1804 /* Set the action port */
1805 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1806 return ulp_rte_parser_act_port_set(param, ifindex);
1809 /* Function to handle the parsing of RTE Flow action phy_port. */
1811 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1812 struct ulp_rte_parser_params *prm)
1814 const struct rte_flow_action_phy_port *phy_port;
1818 enum bnxt_ulp_direction_type dir;
1820 phy_port = action_item->conf;
1823 "ParseErr: Invalid Argument\n");
1824 return BNXT_TF_RC_PARSE_ERR;
1827 if (phy_port->original) {
1829 "Parse Err:Port Original not supported\n");
1830 return BNXT_TF_RC_PARSE_ERR;
1832 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1833 if (dir != BNXT_ULP_DIR_EGRESS) {
1835 "Parse Err:Phy ports are valid only for egress\n");
1836 return BNXT_TF_RC_PARSE_ERR;
1838 /* Get the physical port details from port db */
1839 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1842 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1847 pid = rte_cpu_to_be_32(pid);
1848 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1849 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1851 /* Update the action port set bit */
1852 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1853 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1854 BNXT_ULP_INTF_TYPE_PHY_PORT);
1855 return BNXT_TF_RC_SUCCESS;
1858 /* Function to handle the parsing of RTE Flow action pop vlan. */
1860 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1861 struct ulp_rte_parser_params *params)
1863 /* Update the act_bitmap with pop */
1864 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1865 return BNXT_TF_RC_SUCCESS;
1868 /* Function to handle the parsing of RTE Flow action push vlan. */
1870 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1871 struct ulp_rte_parser_params *params)
1873 const struct rte_flow_action_of_push_vlan *push_vlan;
1875 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1877 push_vlan = action_item->conf;
1879 ethertype = push_vlan->ethertype;
1880 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1882 "Parse Err: Ethertype not supported\n");
1883 return BNXT_TF_RC_PARSE_ERR;
1885 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1886 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1887 /* Update the hdr_bitmap with push vlan */
1888 ULP_BITMAP_SET(params->act_bitmap.bits,
1889 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1890 return BNXT_TF_RC_SUCCESS;
1892 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1893 return BNXT_TF_RC_ERROR;
1896 /* Function to handle the parsing of RTE Flow action set vlan id. */
1898 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1899 struct ulp_rte_parser_params *params)
1901 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1903 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1905 vlan_vid = action_item->conf;
1906 if (vlan_vid && vlan_vid->vlan_vid) {
1907 vid = vlan_vid->vlan_vid;
1908 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1909 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1910 /* Update the hdr_bitmap with vlan vid */
1911 ULP_BITMAP_SET(params->act_bitmap.bits,
1912 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1913 return BNXT_TF_RC_SUCCESS;
1915 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1916 return BNXT_TF_RC_ERROR;
1919 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1921 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1922 struct ulp_rte_parser_params *params)
1924 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1926 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1928 vlan_pcp = action_item->conf;
1930 pcp = vlan_pcp->vlan_pcp;
1931 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1932 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1933 /* Update the hdr_bitmap with vlan vid */
1934 ULP_BITMAP_SET(params->act_bitmap.bits,
1935 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1936 return BNXT_TF_RC_SUCCESS;
1938 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1939 return BNXT_TF_RC_ERROR;
1942 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1944 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1945 struct ulp_rte_parser_params *params)
1947 const struct rte_flow_action_set_ipv4 *set_ipv4;
1948 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1950 set_ipv4 = action_item->conf;
1952 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1953 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1954 /* Update the hdr_bitmap with set ipv4 src */
1955 ULP_BITMAP_SET(params->act_bitmap.bits,
1956 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1957 return BNXT_TF_RC_SUCCESS;
1959 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1960 return BNXT_TF_RC_ERROR;
1963 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1965 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1966 struct ulp_rte_parser_params *params)
1968 const struct rte_flow_action_set_ipv4 *set_ipv4;
1969 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1971 set_ipv4 = action_item->conf;
1973 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1974 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1975 /* Update the hdr_bitmap with set ipv4 dst */
1976 ULP_BITMAP_SET(params->act_bitmap.bits,
1977 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1978 return BNXT_TF_RC_SUCCESS;
1980 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1981 return BNXT_TF_RC_ERROR;
1984 /* Function to handle the parsing of RTE Flow action set tp src.*/
1986 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1987 struct ulp_rte_parser_params *params)
1989 const struct rte_flow_action_set_tp *set_tp;
1990 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1992 set_tp = action_item->conf;
1994 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1995 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1996 /* Update the hdr_bitmap with set tp src */
1997 ULP_BITMAP_SET(params->act_bitmap.bits,
1998 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1999 return BNXT_TF_RC_SUCCESS;
2002 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2003 return BNXT_TF_RC_ERROR;
2006 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2008 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2009 struct ulp_rte_parser_params *params)
2011 const struct rte_flow_action_set_tp *set_tp;
2012 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2014 set_tp = action_item->conf;
2016 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2017 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2018 /* Update the hdr_bitmap with set tp dst */
2019 ULP_BITMAP_SET(params->act_bitmap.bits,
2020 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2021 return BNXT_TF_RC_SUCCESS;
2024 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2025 return BNXT_TF_RC_ERROR;
2028 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2030 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2031 struct ulp_rte_parser_params *params)
2033 /* Update the act_bitmap with dec ttl */
2034 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2035 return BNXT_TF_RC_SUCCESS;