1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK 0x700
23 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN 4789
26 /* Utility function to skip the void items. */
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
41 /* Utility function to update the field_bitmap */
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 struct ulp_rte_hdr_field *field;
48 field = ¶ms->hdr_field[idx];
49 if (ulp_bitmap_notzero(field->mask, field->size)) {
50 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 if (!ulp_bitmap_is_ones(field->mask, field->size))
53 ULP_BITMAP_SET(params->fld_bitmap.bits,
54 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
56 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
67 memcpy(field->spec, buffer, field->size);
72 /* Utility function to copy field masks items */
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
79 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81 memcpy(field->mask, buffer, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx);
86 /* Utility function to ignore field masks items */
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 const void *buffer __rte_unused,
91 uint32_t size __rte_unused)
97 * Function to handle the parsing of RTE Flows and placing
98 * the RTE flow items into the ulp structures.
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102 struct ulp_rte_parser_params *params)
104 const struct rte_flow_item *item = pattern;
105 struct bnxt_ulp_rte_hdr_info *hdr_info;
107 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 /* Set the computed flags for no vlan tags before parsing */
110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 /* Parse all the items in the pattern */
114 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115 /* get the header information from the flow_hdr_info table */
116 hdr_info = &ulp_hdr_info[item->type];
117 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 "Truflow parser does not support type %d\n",
121 return BNXT_TF_RC_PARSE_ERR;
122 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123 /* call the registered callback handler */
124 if (hdr_info->proto_hdr_func) {
125 if (hdr_info->proto_hdr_func(item, params) !=
126 BNXT_TF_RC_SUCCESS) {
127 return BNXT_TF_RC_ERROR;
133 /* update the implied SVIF */
134 return ulp_rte_parser_implicit_match_port_process(params);
138 * Function to handle the parsing of RTE Flows and placing
139 * the RTE flow actions into the ulp structures.
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143 struct ulp_rte_parser_params *params)
145 const struct rte_flow_action *action_item = actions;
146 struct bnxt_ulp_rte_act_info *hdr_info;
148 /* Parse all the items in the pattern */
149 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150 /* get the header information from the flow_hdr_info table */
151 hdr_info = &ulp_act_info[action_item->type];
152 if (hdr_info->act_type ==
153 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 "Truflow parser does not support act %u\n",
157 return BNXT_TF_RC_ERROR;
158 } else if (hdr_info->act_type ==
159 BNXT_ULP_ACT_TYPE_SUPPORTED) {
160 /* call the registered callback handler */
161 if (hdr_info->proto_act_func) {
162 if (hdr_info->proto_act_func(action_item,
164 BNXT_TF_RC_SUCCESS) {
165 return BNXT_TF_RC_ERROR;
171 /* update the implied port details */
172 ulp_rte_parser_implicit_act_port_process(params);
173 return BNXT_TF_RC_SUCCESS;
177 * Function to handle the post processing of the computed
178 * fields for the interface.
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
184 uint16_t port_id, parif;
186 enum bnxt_ulp_direction_type dir;
188 /* get the direction details */
189 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 /* read the port id details */
192 port_id = ULP_COMP_FLD_IDX_RD(params,
193 BNXT_ULP_CF_IDX_INCOMING_IF);
194 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
197 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
201 if (dir == BNXT_ULP_DIR_INGRESS) {
203 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
211 /* Get the match port type */
212 mtype = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215 ULP_COMP_FLD_IDX_WR(params,
216 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 /* Set VF func PARIF */
219 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220 BNXT_ULP_VF_FUNC_PARIF,
223 "ParseErr:ifindex is not valid\n");
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
231 /* Set DRV func PARIF */
232 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233 BNXT_ULP_DRV_FUNC_PARIF,
236 "ParseErr:ifindex is not valid\n");
239 ULP_COMP_FLD_IDX_WR(params,
240 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
249 enum bnxt_ulp_intf_type match_port_type, act_port_type;
250 enum bnxt_ulp_direction_type dir;
251 uint32_t act_port_set;
253 /* Get the computed details */
254 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255 match_port_type = ULP_COMP_FLD_IDX_RD(params,
256 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257 act_port_type = ULP_COMP_FLD_IDX_RD(params,
258 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259 act_port_set = ULP_COMP_FLD_IDX_RD(params,
260 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
262 /* set the flow direction in the proto and action header */
263 if (dir == BNXT_ULP_DIR_EGRESS) {
264 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266 ULP_BITMAP_SET(params->act_bitmap.bits,
267 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
270 /* calculate the VF to VF flag */
271 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
275 /* Update the decrement ttl computational fields */
276 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277 BNXT_ULP_ACT_BIT_DEC_TTL)) {
279 * Check that vxlan proto is included and vxlan decap
280 * action is not set then decrement tunnel ttl.
281 * Similarly add GRE and NVGRE in future.
283 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284 BNXT_ULP_HDR_BIT_T_VXLAN) &&
285 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
287 ULP_COMP_FLD_IDX_WR(params,
288 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
290 ULP_COMP_FLD_IDX_WR(params,
291 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
295 /* Merge the hdr_fp_bit into the proto header bit */
296 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
298 /* Update the comp fld fid */
299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
301 /* Update the computed interface parameters */
302 bnxt_ulp_comp_fld_intf_update(params);
304 /* TBD: Handle the flow rejection scenarios */
309 * Function to handle the post processing of the parsing details
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
314 ulp_post_process_normal_flow(params);
315 return ulp_post_process_tun_flow(params);
319 * Function to compute the flow direction based on the match port details
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
324 enum bnxt_ulp_intf_type match_port_type;
326 /* Get the match port type */
327 match_port_type = ULP_COMP_FLD_IDX_RD(params,
328 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
330 /* If ingress flow and matchport is vf rep then dir is egress*/
331 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334 BNXT_ULP_DIR_EGRESS);
336 /* Assign the input direction */
337 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339 BNXT_ULP_DIR_INGRESS);
341 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342 BNXT_ULP_DIR_EGRESS);
346 /* Function to handle the parsing of RTE Flow item PF Header. */
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
353 enum bnxt_ulp_direction_type dir;
354 struct ulp_rte_hdr_field *hdr_field;
355 enum bnxt_ulp_svif_type svif_type;
356 enum bnxt_ulp_intf_type port_type;
358 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359 BNXT_ULP_INVALID_SVIF_VAL) {
361 "SVIF already set,multiple source not support'd\n");
362 return BNXT_TF_RC_ERROR;
365 /* Get port type details */
366 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368 BNXT_TF_DBG(ERR, "Invalid port type\n");
369 return BNXT_TF_RC_ERROR;
372 /* Update the match port type */
373 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
375 /* compute the direction */
376 bnxt_ulp_rte_parser_direction_compute(params);
378 /* Get the computed direction */
379 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380 if (dir == BNXT_ULP_DIR_INGRESS) {
381 svif_type = BNXT_ULP_PHY_PORT_SVIF;
383 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384 svif_type = BNXT_ULP_VF_FUNC_SVIF;
386 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
388 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
390 svif = rte_cpu_to_be_16(svif);
391 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392 memcpy(hdr_field->spec, &svif, sizeof(svif));
393 memcpy(hdr_field->mask, &mask, sizeof(mask));
394 hdr_field->size = sizeof(svif);
395 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396 rte_be_to_cpu_16(svif));
397 return BNXT_TF_RC_SUCCESS;
400 /* Function to handle the parsing of the RTE port id */
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
404 uint16_t port_id = 0;
405 uint16_t svif_mask = 0xFFFF;
407 int32_t rc = BNXT_TF_RC_ERROR;
409 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410 BNXT_ULP_INVALID_SVIF_VAL)
411 return BNXT_TF_RC_SUCCESS;
413 /* SVIF not set. So get the port id */
414 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
416 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
419 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
423 /* Update the SVIF details */
424 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
428 /* Function to handle the implicit action port id */
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
432 struct rte_flow_action action_item = {0};
433 struct rte_flow_action_port_id port_id = {0};
435 /* Read the action port set bit */
436 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437 /* Already set, so just exit */
438 return BNXT_TF_RC_SUCCESS;
440 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441 action_item.conf = &port_id;
443 /* Update the action port based on incoming port */
444 ulp_rte_port_id_act_handler(&action_item, params);
446 /* Reset the action port set bit */
447 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448 return BNXT_TF_RC_SUCCESS;
451 /* Function to handle the parsing of RTE Flow item PF Header. */
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454 struct ulp_rte_parser_params *params)
456 uint16_t port_id = 0;
457 uint16_t svif_mask = 0xFFFF;
460 /* Get the implicit port id */
461 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
463 /* perform the conversion from dpdk port to bnxt ifindex */
464 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
467 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468 return BNXT_TF_RC_ERROR;
471 /* Update the SVIF details */
472 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
475 /* Function to handle the parsing of RTE Flow item VF Header. */
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478 struct ulp_rte_parser_params *params)
480 const struct rte_flow_item_vf *vf_spec = item->spec;
481 const struct rte_flow_item_vf *vf_mask = item->mask;
484 int32_t rc = BNXT_TF_RC_PARSE_ERR;
486 /* Get VF rte_flow_item for Port details */
488 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
492 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
497 /* perform the conversion from VF Func id to bnxt ifindex */
498 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
501 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
504 /* Update the SVIF details */
505 return ulp_rte_parser_svif_set(params, ifindex, mask);
508 /* Function to handle the parsing of RTE Flow item port id Header. */
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511 struct ulp_rte_parser_params *params)
513 const struct rte_flow_item_port_id *port_spec = item->spec;
514 const struct rte_flow_item_port_id *port_mask = item->mask;
516 int32_t rc = BNXT_TF_RC_PARSE_ERR;
520 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
524 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
527 mask = port_mask->id;
529 /* perform the conversion from dpdk port to bnxt ifindex */
530 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
533 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
536 /* Update the SVIF details */
537 return ulp_rte_parser_svif_set(params, ifindex, mask);
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543 struct ulp_rte_parser_params *params)
545 const struct rte_flow_item_phy_port *port_spec = item->spec;
546 const struct rte_flow_item_phy_port *port_mask = item->mask;
548 int32_t rc = BNXT_TF_RC_ERROR;
550 enum bnxt_ulp_direction_type dir;
551 struct ulp_rte_hdr_field *hdr_field;
553 /* Copy the rte_flow_item for phy port into hdr_field */
555 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
559 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
562 mask = port_mask->index;
564 /* Update the match port type */
565 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566 BNXT_ULP_INTF_TYPE_PHY_PORT);
568 /* Compute the Hw direction */
569 bnxt_ulp_rte_parser_direction_compute(params);
571 /* Direction validation */
572 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573 if (dir == BNXT_ULP_DIR_EGRESS) {
575 "Parse Err:Phy ports are valid only for ingress\n");
576 return BNXT_TF_RC_PARSE_ERR;
579 /* Get the physical port details from port db */
580 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
583 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584 return BNXT_TF_RC_PARSE_ERR;
587 /* Update the SVIF details */
588 svif = rte_cpu_to_be_16(svif);
589 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590 memcpy(hdr_field->spec, &svif, sizeof(svif));
591 memcpy(hdr_field->mask, &mask, sizeof(mask));
592 hdr_field->size = sizeof(svif);
593 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594 rte_be_to_cpu_16(svif));
595 return BNXT_TF_RC_SUCCESS;
598 /* Function to handle the update of proto header based on field values */
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601 uint16_t type, uint32_t in_flag)
603 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
605 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606 BNXT_ULP_HDR_BIT_I_IPV4);
607 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
609 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610 BNXT_ULP_HDR_BIT_O_IPV4);
611 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
613 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
615 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616 BNXT_ULP_HDR_BIT_I_IPV6);
617 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
619 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620 BNXT_ULP_HDR_BIT_O_IPV6);
621 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
626 /* Internal Function to identify broadcast or multicast packets */
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
630 if (rte_is_multicast_ether_addr(eth_addr) ||
631 rte_is_broadcast_ether_addr(eth_addr)) {
633 "No support for bcast or mcast addr offload\n");
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642 struct ulp_rte_parser_params *params)
644 const struct rte_flow_item_eth *eth_spec = item->spec;
645 const struct rte_flow_item_eth *eth_mask = item->mask;
646 struct ulp_rte_hdr_field *field;
647 uint32_t idx = params->field_idx;
649 uint16_t eth_type = 0;
650 uint32_t inner_flag = 0;
653 * Copy the rte_flow_item for eth into hdr_field using ethernet
657 size = sizeof(eth_spec->dst.addr_bytes);
658 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
659 eth_spec->dst.addr_bytes,
661 /* Todo: work around to avoid multicast and broadcast addr */
662 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
663 return BNXT_TF_RC_PARSE_ERR;
665 size = sizeof(eth_spec->src.addr_bytes);
666 field = ulp_rte_parser_fld_copy(field,
667 eth_spec->src.addr_bytes,
669 /* Todo: work around to avoid multicast and broadcast addr */
670 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
671 return BNXT_TF_RC_PARSE_ERR;
673 field = ulp_rte_parser_fld_copy(field,
675 sizeof(eth_spec->type));
676 eth_type = eth_spec->type;
679 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680 sizeof(eth_mask->dst.addr_bytes));
681 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682 sizeof(eth_mask->src.addr_bytes));
683 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
684 sizeof(eth_mask->type));
686 /* Add number of Eth header elements */
687 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
689 /* Update the protocol hdr bitmap */
690 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691 BNXT_ULP_HDR_BIT_O_ETH) ||
692 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693 BNXT_ULP_HDR_BIT_O_IPV4) ||
694 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695 BNXT_ULP_HDR_BIT_O_IPV6) ||
696 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_UDP) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_TCP)) {
700 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
703 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
705 /* Update the field protocol hdr bitmap */
706 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
708 return BNXT_TF_RC_SUCCESS;
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714 struct ulp_rte_parser_params *params)
716 const struct rte_flow_item_vlan *vlan_spec = item->spec;
717 const struct rte_flow_item_vlan *vlan_mask = item->mask;
718 struct ulp_rte_hdr_field *field;
719 struct ulp_rte_hdr_bitmap *hdr_bit;
720 uint32_t idx = params->field_idx;
721 uint16_t vlan_tag, priority;
722 uint32_t outer_vtag_num;
723 uint32_t inner_vtag_num;
724 uint16_t eth_type = 0;
725 uint32_t inner_flag = 0;
728 * Copy the rte_flow_item for vlan into hdr_field using Vlan
732 vlan_tag = ntohs(vlan_spec->tci);
733 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734 vlan_tag &= ULP_VLAN_TAG_MASK;
735 vlan_tag = htons(vlan_tag);
737 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
740 field = ulp_rte_parser_fld_copy(field,
743 field = ulp_rte_parser_fld_copy(field,
744 &vlan_spec->inner_type,
745 sizeof(vlan_spec->inner_type));
746 eth_type = vlan_spec->inner_type;
750 vlan_tag = ntohs(vlan_mask->tci);
751 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
755 * the storage for priority and vlan tag is 2 bytes
756 * The mask of priority which is 3 bits if it is all 1's
757 * then make the rest bits 13 bits as 1's
758 * so that it is matched as exact match.
760 if (priority == ULP_VLAN_PRIORITY_MASK)
761 priority |= ~ULP_VLAN_PRIORITY_MASK;
762 if (vlan_tag == ULP_VLAN_TAG_MASK)
763 vlan_tag |= ~ULP_VLAN_TAG_MASK;
764 vlan_tag = htons(vlan_tag);
767 * The priority field is ignored since OVS is setting it as
768 * wild card match and it is not supported. This is a work
769 * around and shall be addressed in the future.
771 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
774 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
776 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777 sizeof(vlan_mask->inner_type));
779 /* Set the field index to new incremented value */
780 params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
782 /* Get the outer tag and inner tag counts */
783 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784 BNXT_ULP_CF_IDX_O_VTAG_NUM);
785 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786 BNXT_ULP_CF_IDX_I_VTAG_NUM);
788 /* Update the hdr_bitmap of the vlans */
789 hdr_bit = ¶ms->hdr_bitmap;
790 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
793 /* Update the vlan tag num */
795 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
797 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800 BNXT_ULP_HDR_BIT_OO_VLAN);
801 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803 outer_vtag_num == 1) {
804 /* update the vlan tag num */
806 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
808 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811 BNXT_ULP_HDR_BIT_OI_VLAN);
812 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
815 /* update the vlan tag num */
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822 BNXT_ULP_HDR_BIT_IO_VLAN);
824 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826 inner_vtag_num == 1) {
827 /* update the vlan tag num */
829 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
831 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834 BNXT_ULP_HDR_BIT_II_VLAN);
837 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838 return BNXT_TF_RC_ERROR;
840 /* Update the field protocol hdr bitmap */
841 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842 return BNXT_TF_RC_SUCCESS;
845 /* Function to handle the update of proto header based on field values */
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848 uint8_t proto, uint32_t in_flag)
850 if (proto == IPPROTO_UDP) {
852 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853 BNXT_ULP_HDR_BIT_I_UDP);
854 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
856 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857 BNXT_ULP_HDR_BIT_O_UDP);
858 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
860 } else if (proto == IPPROTO_TCP) {
862 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863 BNXT_ULP_HDR_BIT_I_TCP);
864 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
866 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867 BNXT_ULP_HDR_BIT_O_TCP);
868 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876 struct ulp_rte_parser_params *params)
878 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880 struct ulp_rte_hdr_field *field;
881 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
882 uint32_t idx = params->field_idx;
885 uint32_t inner_flag = 0;
888 /* validate there are no 3rd L3 header */
889 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
891 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892 return BNXT_TF_RC_ERROR;
895 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896 BNXT_ULP_HDR_BIT_O_ETH) &&
897 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898 BNXT_ULP_HDR_BIT_I_ETH)) {
899 /* Since F2 flow does not include eth item, when parser detects
900 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903 * This will allow the parser post processor to update the
904 * t_dmac in hdr_field[o_eth.dmac]
906 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907 BNXT_ULP_PROTO_HDR_VLAN_NUM);
908 params->field_idx = idx;
912 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
916 size = sizeof(ipv4_spec->hdr.version_ihl);
917 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
918 &ipv4_spec->hdr.version_ihl,
920 size = sizeof(ipv4_spec->hdr.type_of_service);
921 field = ulp_rte_parser_fld_copy(field,
922 &ipv4_spec->hdr.type_of_service,
924 size = sizeof(ipv4_spec->hdr.total_length);
925 field = ulp_rte_parser_fld_copy(field,
926 &ipv4_spec->hdr.total_length,
928 size = sizeof(ipv4_spec->hdr.packet_id);
929 field = ulp_rte_parser_fld_copy(field,
930 &ipv4_spec->hdr.packet_id,
932 size = sizeof(ipv4_spec->hdr.fragment_offset);
933 field = ulp_rte_parser_fld_copy(field,
934 &ipv4_spec->hdr.fragment_offset,
936 size = sizeof(ipv4_spec->hdr.time_to_live);
937 field = ulp_rte_parser_fld_copy(field,
938 &ipv4_spec->hdr.time_to_live,
940 size = sizeof(ipv4_spec->hdr.next_proto_id);
941 field = ulp_rte_parser_fld_copy(field,
942 &ipv4_spec->hdr.next_proto_id,
944 proto = ipv4_spec->hdr.next_proto_id;
945 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946 field = ulp_rte_parser_fld_copy(field,
947 &ipv4_spec->hdr.hdr_checksum,
949 size = sizeof(ipv4_spec->hdr.src_addr);
950 field = ulp_rte_parser_fld_copy(field,
951 &ipv4_spec->hdr.src_addr,
953 size = sizeof(ipv4_spec->hdr.dst_addr);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.dst_addr,
959 ulp_rte_prsr_mask_copy(params, &idx,
960 &ipv4_mask->hdr.version_ihl,
961 sizeof(ipv4_mask->hdr.version_ihl));
963 * The tos field is ignored since OVS is setting it as wild card
964 * match and it is not supported. This is a work around and
965 * shall be addressed in the future.
967 ulp_rte_prsr_mask_ignore(params, &idx,
968 &ipv4_mask->hdr.type_of_service,
969 sizeof(ipv4_mask->hdr.type_of_service)
972 ulp_rte_prsr_mask_copy(params, &idx,
973 &ipv4_mask->hdr.total_length,
974 sizeof(ipv4_mask->hdr.total_length));
975 ulp_rte_prsr_mask_copy(params, &idx,
976 &ipv4_mask->hdr.packet_id,
977 sizeof(ipv4_mask->hdr.packet_id));
978 ulp_rte_prsr_mask_copy(params, &idx,
979 &ipv4_mask->hdr.fragment_offset,
980 sizeof(ipv4_mask->hdr.fragment_offset));
981 ulp_rte_prsr_mask_copy(params, &idx,
982 &ipv4_mask->hdr.time_to_live,
983 sizeof(ipv4_mask->hdr.time_to_live));
984 ulp_rte_prsr_mask_copy(params, &idx,
985 &ipv4_mask->hdr.next_proto_id,
986 sizeof(ipv4_mask->hdr.next_proto_id));
987 ulp_rte_prsr_mask_copy(params, &idx,
988 &ipv4_mask->hdr.hdr_checksum,
989 sizeof(ipv4_mask->hdr.hdr_checksum));
990 ulp_rte_prsr_mask_copy(params, &idx,
991 &ipv4_mask->hdr.src_addr,
992 sizeof(ipv4_mask->hdr.src_addr));
993 ulp_rte_prsr_mask_copy(params, &idx,
994 &ipv4_mask->hdr.dst_addr,
995 sizeof(ipv4_mask->hdr.dst_addr));
997 /* Add the number of ipv4 header elements */
998 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1000 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1007 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1011 /* Update the field protocol hdr bitmap */
1012 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1013 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1014 return BNXT_TF_RC_SUCCESS;
1017 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1019 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1020 struct ulp_rte_parser_params *params)
1022 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1023 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1024 struct ulp_rte_hdr_field *field;
1025 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1026 uint32_t idx = params->field_idx;
1028 uint32_t vtcf, vtcf_mask;
1030 uint32_t inner_flag = 0;
1033 /* validate there are no 3rd L3 header */
1034 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1036 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1037 return BNXT_TF_RC_ERROR;
1040 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1041 BNXT_ULP_HDR_BIT_O_ETH) &&
1042 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1043 BNXT_ULP_HDR_BIT_I_ETH)) {
1044 /* Since F2 flow does not include eth item, when parser detects
1045 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1046 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1047 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1048 * This will allow the parser post processor to update the
1049 * t_dmac in hdr_field[o_eth.dmac]
1051 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1052 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1053 params->field_idx = idx;
1057 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1061 size = sizeof(ipv6_spec->hdr.vtc_flow);
1063 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1064 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1068 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1069 field = ulp_rte_parser_fld_copy(field,
1073 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1074 field = ulp_rte_parser_fld_copy(field,
1078 size = sizeof(ipv6_spec->hdr.payload_len);
1079 field = ulp_rte_parser_fld_copy(field,
1080 &ipv6_spec->hdr.payload_len,
1082 size = sizeof(ipv6_spec->hdr.proto);
1083 field = ulp_rte_parser_fld_copy(field,
1084 &ipv6_spec->hdr.proto,
1086 proto = ipv6_spec->hdr.proto;
1087 size = sizeof(ipv6_spec->hdr.hop_limits);
1088 field = ulp_rte_parser_fld_copy(field,
1089 &ipv6_spec->hdr.hop_limits,
1091 size = sizeof(ipv6_spec->hdr.src_addr);
1092 field = ulp_rte_parser_fld_copy(field,
1093 &ipv6_spec->hdr.src_addr,
1095 size = sizeof(ipv6_spec->hdr.dst_addr);
1096 field = ulp_rte_parser_fld_copy(field,
1097 &ipv6_spec->hdr.dst_addr,
1101 size = sizeof(ipv6_mask->hdr.vtc_flow);
1103 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1104 ulp_rte_prsr_mask_copy(params, &idx,
1108 * The TC and flow label field are ignored since OVS is setting
1109 * it for match and it is not supported.
1110 * This is a work around and
1111 * shall be addressed in the future.
1113 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1114 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1116 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1117 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1119 ulp_rte_prsr_mask_copy(params, &idx,
1120 &ipv6_mask->hdr.payload_len,
1121 sizeof(ipv6_mask->hdr.payload_len));
1122 ulp_rte_prsr_mask_copy(params, &idx,
1123 &ipv6_mask->hdr.proto,
1124 sizeof(ipv6_mask->hdr.proto));
1125 ulp_rte_prsr_mask_copy(params, &idx,
1126 &ipv6_mask->hdr.hop_limits,
1127 sizeof(ipv6_mask->hdr.hop_limits));
1128 ulp_rte_prsr_mask_copy(params, &idx,
1129 &ipv6_mask->hdr.src_addr,
1130 sizeof(ipv6_mask->hdr.src_addr));
1131 ulp_rte_prsr_mask_copy(params, &idx,
1132 &ipv6_mask->hdr.dst_addr,
1133 sizeof(ipv6_mask->hdr.dst_addr));
1135 /* add number of ipv6 header elements */
1136 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1138 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1139 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1140 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1141 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1142 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1145 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1146 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1149 /* Update the field protocol hdr bitmap */
1150 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1151 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1153 return BNXT_TF_RC_SUCCESS;
1156 /* Function to handle the update of proto header based on field values */
1158 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1161 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1162 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1163 BNXT_ULP_HDR_BIT_T_VXLAN);
1164 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1168 /* Function to handle the parsing of RTE Flow item UDP Header. */
1170 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1171 struct ulp_rte_parser_params *params)
1173 const struct rte_flow_item_udp *udp_spec = item->spec;
1174 const struct rte_flow_item_udp *udp_mask = item->mask;
1175 struct ulp_rte_hdr_field *field;
1176 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1177 uint32_t idx = params->field_idx;
1179 uint16_t dst_port = 0;
1182 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1184 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1185 return BNXT_TF_RC_ERROR;
1189 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1193 size = sizeof(udp_spec->hdr.src_port);
1194 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1195 &udp_spec->hdr.src_port,
1198 size = sizeof(udp_spec->hdr.dst_port);
1199 field = ulp_rte_parser_fld_copy(field,
1200 &udp_spec->hdr.dst_port,
1202 dst_port = udp_spec->hdr.dst_port;
1203 size = sizeof(udp_spec->hdr.dgram_len);
1204 field = ulp_rte_parser_fld_copy(field,
1205 &udp_spec->hdr.dgram_len,
1207 size = sizeof(udp_spec->hdr.dgram_cksum);
1208 field = ulp_rte_parser_fld_copy(field,
1209 &udp_spec->hdr.dgram_cksum,
1213 ulp_rte_prsr_mask_copy(params, &idx,
1214 &udp_mask->hdr.src_port,
1215 sizeof(udp_mask->hdr.src_port));
1216 ulp_rte_prsr_mask_copy(params, &idx,
1217 &udp_mask->hdr.dst_port,
1218 sizeof(udp_mask->hdr.dst_port));
1219 ulp_rte_prsr_mask_copy(params, &idx,
1220 &udp_mask->hdr.dgram_len,
1221 sizeof(udp_mask->hdr.dgram_len));
1222 ulp_rte_prsr_mask_copy(params, &idx,
1223 &udp_mask->hdr.dgram_cksum,
1224 sizeof(udp_mask->hdr.dgram_cksum));
1227 /* Add number of UDP header elements */
1228 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1230 /* Set the udp header bitmap and computed l4 header bitmaps */
1231 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1232 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1233 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1234 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1236 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1237 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1238 /* Update the field protocol hdr bitmap */
1239 ulp_rte_l4_proto_type_update(params, dst_port);
1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1242 return BNXT_TF_RC_SUCCESS;
1245 /* Function to handle the parsing of RTE Flow item TCP Header. */
1247 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1248 struct ulp_rte_parser_params *params)
1250 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1251 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1252 struct ulp_rte_hdr_field *field;
1253 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1254 uint32_t idx = params->field_idx;
1258 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1260 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1261 return BNXT_TF_RC_ERROR;
1265 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1269 size = sizeof(tcp_spec->hdr.src_port);
1270 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1271 &tcp_spec->hdr.src_port,
1273 size = sizeof(tcp_spec->hdr.dst_port);
1274 field = ulp_rte_parser_fld_copy(field,
1275 &tcp_spec->hdr.dst_port,
1277 size = sizeof(tcp_spec->hdr.sent_seq);
1278 field = ulp_rte_parser_fld_copy(field,
1279 &tcp_spec->hdr.sent_seq,
1281 size = sizeof(tcp_spec->hdr.recv_ack);
1282 field = ulp_rte_parser_fld_copy(field,
1283 &tcp_spec->hdr.recv_ack,
1285 size = sizeof(tcp_spec->hdr.data_off);
1286 field = ulp_rte_parser_fld_copy(field,
1287 &tcp_spec->hdr.data_off,
1289 size = sizeof(tcp_spec->hdr.tcp_flags);
1290 field = ulp_rte_parser_fld_copy(field,
1291 &tcp_spec->hdr.tcp_flags,
1293 size = sizeof(tcp_spec->hdr.rx_win);
1294 field = ulp_rte_parser_fld_copy(field,
1295 &tcp_spec->hdr.rx_win,
1297 size = sizeof(tcp_spec->hdr.cksum);
1298 field = ulp_rte_parser_fld_copy(field,
1299 &tcp_spec->hdr.cksum,
1301 size = sizeof(tcp_spec->hdr.tcp_urp);
1302 field = ulp_rte_parser_fld_copy(field,
1303 &tcp_spec->hdr.tcp_urp,
1306 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1310 ulp_rte_prsr_mask_copy(params, &idx,
1311 &tcp_mask->hdr.src_port,
1312 sizeof(tcp_mask->hdr.src_port));
1313 ulp_rte_prsr_mask_copy(params, &idx,
1314 &tcp_mask->hdr.dst_port,
1315 sizeof(tcp_mask->hdr.dst_port));
1316 ulp_rte_prsr_mask_copy(params, &idx,
1317 &tcp_mask->hdr.sent_seq,
1318 sizeof(tcp_mask->hdr.sent_seq));
1319 ulp_rte_prsr_mask_copy(params, &idx,
1320 &tcp_mask->hdr.recv_ack,
1321 sizeof(tcp_mask->hdr.recv_ack));
1322 ulp_rte_prsr_mask_copy(params, &idx,
1323 &tcp_mask->hdr.data_off,
1324 sizeof(tcp_mask->hdr.data_off));
1325 ulp_rte_prsr_mask_copy(params, &idx,
1326 &tcp_mask->hdr.tcp_flags,
1327 sizeof(tcp_mask->hdr.tcp_flags));
1328 ulp_rte_prsr_mask_copy(params, &idx,
1329 &tcp_mask->hdr.rx_win,
1330 sizeof(tcp_mask->hdr.rx_win));
1331 ulp_rte_prsr_mask_copy(params, &idx,
1332 &tcp_mask->hdr.cksum,
1333 sizeof(tcp_mask->hdr.cksum));
1334 ulp_rte_prsr_mask_copy(params, &idx,
1335 &tcp_mask->hdr.tcp_urp,
1336 sizeof(tcp_mask->hdr.tcp_urp));
1338 /* add number of TCP header elements */
1339 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1341 /* Set the udp header bitmap and computed l4 header bitmaps */
1342 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1343 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1344 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1345 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1347 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1348 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1350 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1351 return BNXT_TF_RC_SUCCESS;
1354 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1356 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1357 struct ulp_rte_parser_params *params)
1359 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1360 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1361 struct ulp_rte_hdr_field *field;
1362 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1363 uint32_t idx = params->field_idx;
1367 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1371 size = sizeof(vxlan_spec->flags);
1372 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1375 size = sizeof(vxlan_spec->rsvd0);
1376 field = ulp_rte_parser_fld_copy(field,
1379 size = sizeof(vxlan_spec->vni);
1380 field = ulp_rte_parser_fld_copy(field,
1383 size = sizeof(vxlan_spec->rsvd1);
1384 field = ulp_rte_parser_fld_copy(field,
1389 ulp_rte_prsr_mask_copy(params, &idx,
1391 sizeof(vxlan_mask->flags));
1392 ulp_rte_prsr_mask_copy(params, &idx,
1394 sizeof(vxlan_mask->rsvd0));
1395 ulp_rte_prsr_mask_copy(params, &idx,
1397 sizeof(vxlan_mask->vni));
1398 ulp_rte_prsr_mask_copy(params, &idx,
1400 sizeof(vxlan_mask->rsvd1));
1402 /* Add number of vxlan header elements */
1403 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1405 /* Update the hdr_bitmap with vxlan */
1406 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1407 return BNXT_TF_RC_SUCCESS;
1410 /* Function to handle the parsing of RTE Flow item void Header */
1412 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1413 struct ulp_rte_parser_params *params __rte_unused)
1415 return BNXT_TF_RC_SUCCESS;
1418 /* Function to handle the parsing of RTE Flow action void Header. */
1420 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1421 struct ulp_rte_parser_params *params __rte_unused)
1423 return BNXT_TF_RC_SUCCESS;
1426 /* Function to handle the parsing of RTE Flow action Mark Header. */
1428 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1429 struct ulp_rte_parser_params *param)
1431 const struct rte_flow_action_mark *mark;
1432 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1435 mark = action_item->conf;
1437 mark_id = tfp_cpu_to_be_32(mark->id);
1438 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1439 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1441 /* Update the hdr_bitmap with vxlan */
1442 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1443 return BNXT_TF_RC_SUCCESS;
1445 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1446 return BNXT_TF_RC_ERROR;
1449 /* Function to handle the parsing of RTE Flow action RSS Header. */
1451 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1452 struct ulp_rte_parser_params *param)
1454 const struct rte_flow_action_rss *rss = action_item->conf;
1457 /* Update the hdr_bitmap with vxlan */
1458 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1459 return BNXT_TF_RC_SUCCESS;
1461 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1462 return BNXT_TF_RC_ERROR;
1465 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1467 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1468 struct ulp_rte_parser_params *params)
1470 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1471 const struct rte_flow_item *item;
1472 const struct rte_flow_item_eth *eth_spec;
1473 const struct rte_flow_item_ipv4 *ipv4_spec;
1474 const struct rte_flow_item_ipv6 *ipv6_spec;
1475 struct rte_flow_item_vxlan vxlan_spec;
1476 uint32_t vlan_num = 0, vlan_size = 0;
1477 uint32_t ip_size = 0, ip_type = 0;
1478 uint32_t vxlan_size = 0;
1480 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1481 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1483 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1484 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1486 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1487 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1488 const uint8_t *tmp_buff;
1490 vxlan_encap = action_item->conf;
1492 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1493 return BNXT_TF_RC_ERROR;
1496 item = vxlan_encap->definition;
1498 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1499 return BNXT_TF_RC_ERROR;
1502 if (!ulp_rte_item_skip_void(&item, 0))
1503 return BNXT_TF_RC_ERROR;
1505 /* must have ethernet header */
1506 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1507 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1508 return BNXT_TF_RC_ERROR;
1510 eth_spec = item->spec;
1511 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1512 ulp_encap_buffer_copy(buff,
1513 eth_spec->dst.addr_bytes,
1514 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1515 ULP_BUFFER_ALIGN_8_BYTE);
1517 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1518 ulp_encap_buffer_copy(buff,
1519 eth_spec->src.addr_bytes,
1520 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1521 ULP_BUFFER_ALIGN_8_BYTE);
1523 /* Goto the next item */
1524 if (!ulp_rte_item_skip_void(&item, 1))
1525 return BNXT_TF_RC_ERROR;
1527 /* May have vlan header */
1528 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1530 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1531 ulp_encap_buffer_copy(buff,
1533 sizeof(struct rte_flow_item_vlan),
1534 ULP_BUFFER_ALIGN_8_BYTE);
1536 if (!ulp_rte_item_skip_void(&item, 1))
1537 return BNXT_TF_RC_ERROR;
1540 /* may have two vlan headers */
1541 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1543 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1544 sizeof(struct rte_flow_item_vlan)],
1546 sizeof(struct rte_flow_item_vlan));
1547 if (!ulp_rte_item_skip_void(&item, 1))
1548 return BNXT_TF_RC_ERROR;
1550 /* Update the vlan count and size of more than one */
1552 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1553 vlan_num = tfp_cpu_to_be_32(vlan_num);
1554 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1557 vlan_size = tfp_cpu_to_be_32(vlan_size);
1558 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1563 /* L3 must be IPv4, IPv6 */
1564 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1565 ipv4_spec = item->spec;
1566 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1568 /* copy the ipv4 details */
1569 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1570 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1571 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1572 ulp_encap_buffer_copy(buff,
1574 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1575 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1576 ULP_BUFFER_ALIGN_8_BYTE);
1578 /* Total length being ignored in the ip hdr. */
1579 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1580 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1581 ulp_encap_buffer_copy(buff,
1583 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1584 ULP_BUFFER_ALIGN_8_BYTE);
1585 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1586 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1587 ulp_encap_buffer_copy(buff,
1588 &ipv4_spec->hdr.version_ihl,
1589 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1590 ULP_BUFFER_ALIGN_8_BYTE);
1593 /* Update the dst ip address in ip encap buffer */
1594 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1595 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1596 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1597 ulp_encap_buffer_copy(buff,
1598 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1599 sizeof(ipv4_spec->hdr.dst_addr),
1600 ULP_BUFFER_ALIGN_8_BYTE);
1602 /* Update the src ip address */
1603 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1604 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1605 sizeof(ipv4_spec->hdr.src_addr)];
1606 ulp_encap_buffer_copy(buff,
1607 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1608 sizeof(ipv4_spec->hdr.src_addr),
1609 ULP_BUFFER_ALIGN_8_BYTE);
1611 /* Update the ip size details */
1612 ip_size = tfp_cpu_to_be_32(ip_size);
1613 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1614 &ip_size, sizeof(uint32_t));
1616 /* update the ip type */
1617 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1618 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1619 &ip_type, sizeof(uint32_t));
1621 /* update the computed field to notify it is ipv4 header */
1622 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1625 if (!ulp_rte_item_skip_void(&item, 1))
1626 return BNXT_TF_RC_ERROR;
1627 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1628 ipv6_spec = item->spec;
1629 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1631 /* copy the ipv6 details */
1632 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1633 if (ulp_buffer_is_empty(tmp_buff,
1634 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1635 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1636 ulp_encap_buffer_copy(buff,
1638 sizeof(def_ipv6_hdr),
1639 ULP_BUFFER_ALIGN_8_BYTE);
1641 /* The payload length being ignored in the ip hdr. */
1642 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1643 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1644 ulp_encap_buffer_copy(buff,
1646 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1647 ULP_BUFFER_ALIGN_8_BYTE);
1648 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1649 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1650 BNXT_ULP_ENCAP_IPV6_DO];
1651 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1652 ulp_encap_buffer_copy(buff,
1654 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1655 ULP_BUFFER_ALIGN_8_BYTE);
1657 /* Update the dst ip address in ip encap buffer */
1658 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1659 sizeof(def_ipv6_hdr)];
1660 ulp_encap_buffer_copy(buff,
1661 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1662 sizeof(ipv6_spec->hdr.dst_addr),
1663 ULP_BUFFER_ALIGN_8_BYTE);
1665 /* Update the src ip address */
1666 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1667 ulp_encap_buffer_copy(buff,
1668 (const uint8_t *)ipv6_spec->hdr.src_addr,
1669 sizeof(ipv6_spec->hdr.src_addr),
1670 ULP_BUFFER_ALIGN_16_BYTE);
1672 /* Update the ip size details */
1673 ip_size = tfp_cpu_to_be_32(ip_size);
1674 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1675 &ip_size, sizeof(uint32_t));
1677 /* update the ip type */
1678 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1679 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1680 &ip_type, sizeof(uint32_t));
1682 /* update the computed field to notify it is ipv6 header */
1683 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1686 if (!ulp_rte_item_skip_void(&item, 1))
1687 return BNXT_TF_RC_ERROR;
1689 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1690 return BNXT_TF_RC_ERROR;
1694 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1695 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1696 return BNXT_TF_RC_ERROR;
1698 /* copy the udp details */
1699 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1700 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1701 ULP_BUFFER_ALIGN_8_BYTE);
1703 if (!ulp_rte_item_skip_void(&item, 1))
1704 return BNXT_TF_RC_ERROR;
1707 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1708 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1709 return BNXT_TF_RC_ERROR;
1711 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1712 /* copy the vxlan details */
1713 memcpy(&vxlan_spec, item->spec, vxlan_size);
1714 vxlan_spec.flags = 0x08;
1715 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1716 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1717 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1718 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1720 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1721 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1722 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1723 (const uint8_t *)&vxlan_spec.vni,
1724 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1726 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1727 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1728 &vxlan_size, sizeof(uint32_t));
1730 /* update the hdr_bitmap with vxlan */
1731 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1732 return BNXT_TF_RC_SUCCESS;
1735 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1737 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1739 struct ulp_rte_parser_params *params)
1741 /* update the hdr_bitmap with vxlan */
1742 ULP_BITMAP_SET(params->act_bitmap.bits,
1743 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1744 /* Update computational field with tunnel decap info */
1745 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1746 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1747 return BNXT_TF_RC_SUCCESS;
1750 /* Function to handle the parsing of RTE Flow action drop Header. */
1752 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1753 struct ulp_rte_parser_params *params)
1755 /* Update the hdr_bitmap with drop */
1756 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1757 return BNXT_TF_RC_SUCCESS;
1760 /* Function to handle the parsing of RTE Flow action count. */
1762 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1763 struct ulp_rte_parser_params *params)
1766 const struct rte_flow_action_count *act_count;
1767 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1769 act_count = action_item->conf;
1771 if (act_count->shared) {
1773 "Parse Error:Shared count not supported\n");
1774 return BNXT_TF_RC_PARSE_ERR;
1776 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1778 BNXT_ULP_ACT_PROP_SZ_COUNT);
1781 /* Update the hdr_bitmap with count */
1782 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1783 return BNXT_TF_RC_SUCCESS;
1786 /* Function to handle the parsing of action ports. */
1788 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1791 enum bnxt_ulp_direction_type dir;
1794 struct ulp_rte_act_prop *act = ¶m->act_prop;
1795 enum bnxt_ulp_intf_type port_type;
1798 /* Get the direction */
1799 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1800 if (dir == BNXT_ULP_DIR_EGRESS) {
1801 /* For egress direction, fill vport */
1802 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1803 return BNXT_TF_RC_ERROR;
1806 pid = rte_cpu_to_be_32(pid);
1807 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1808 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1810 /* For ingress direction, fill vnic */
1811 port_type = ULP_COMP_FLD_IDX_RD(param,
1812 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1813 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1814 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1816 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1818 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1820 return BNXT_TF_RC_ERROR;
1823 pid = rte_cpu_to_be_32(pid);
1824 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1825 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1828 /* Update the action port set bit */
1829 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1830 return BNXT_TF_RC_SUCCESS;
1833 /* Function to handle the parsing of RTE Flow action PF. */
1835 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1836 struct ulp_rte_parser_params *params)
1840 enum bnxt_ulp_intf_type intf_type;
1842 /* Get the port id of the current device */
1843 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1845 /* Get the port db ifindex */
1846 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1848 BNXT_TF_DBG(ERR, "Invalid port id\n");
1849 return BNXT_TF_RC_ERROR;
1852 /* Check the port is PF port */
1853 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1854 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1855 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1856 return BNXT_TF_RC_ERROR;
1858 /* Update the action properties */
1859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1860 return ulp_rte_parser_act_port_set(params, ifindex);
1863 /* Function to handle the parsing of RTE Flow action VF. */
1865 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1866 struct ulp_rte_parser_params *params)
1868 const struct rte_flow_action_vf *vf_action;
1870 enum bnxt_ulp_intf_type intf_type;
1872 vf_action = action_item->conf;
1874 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1875 return BNXT_TF_RC_PARSE_ERR;
1878 if (vf_action->original) {
1879 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1880 return BNXT_TF_RC_PARSE_ERR;
1883 /* Check the port is VF port */
1884 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1886 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1887 return BNXT_TF_RC_ERROR;
1889 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1890 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1891 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1892 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1893 return BNXT_TF_RC_ERROR;
1896 /* Update the action properties */
1897 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1898 return ulp_rte_parser_act_port_set(params, ifindex);
1901 /* Function to handle the parsing of RTE Flow action port_id. */
1903 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1904 struct ulp_rte_parser_params *param)
1906 const struct rte_flow_action_port_id *port_id = act_item->conf;
1908 enum bnxt_ulp_intf_type intf_type;
1912 "ParseErr: Invalid Argument\n");
1913 return BNXT_TF_RC_PARSE_ERR;
1915 if (port_id->original) {
1917 "ParseErr:Portid Original not supported\n");
1918 return BNXT_TF_RC_PARSE_ERR;
1921 /* Get the port db ifindex */
1922 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1924 BNXT_TF_DBG(ERR, "Invalid port id\n");
1925 return BNXT_TF_RC_ERROR;
1928 /* Get the intf type */
1929 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1931 BNXT_TF_DBG(ERR, "Invalid port type\n");
1932 return BNXT_TF_RC_ERROR;
1935 /* Set the action port */
1936 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1937 return ulp_rte_parser_act_port_set(param, ifindex);
1940 /* Function to handle the parsing of RTE Flow action phy_port. */
1942 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1943 struct ulp_rte_parser_params *prm)
1945 const struct rte_flow_action_phy_port *phy_port;
1949 enum bnxt_ulp_direction_type dir;
1951 phy_port = action_item->conf;
1954 "ParseErr: Invalid Argument\n");
1955 return BNXT_TF_RC_PARSE_ERR;
1958 if (phy_port->original) {
1960 "Parse Err:Port Original not supported\n");
1961 return BNXT_TF_RC_PARSE_ERR;
1963 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1964 if (dir != BNXT_ULP_DIR_EGRESS) {
1966 "Parse Err:Phy ports are valid only for egress\n");
1967 return BNXT_TF_RC_PARSE_ERR;
1969 /* Get the physical port details from port db */
1970 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1973 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1978 pid = rte_cpu_to_be_32(pid);
1979 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1980 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1982 /* Update the action port set bit */
1983 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1984 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1985 BNXT_ULP_INTF_TYPE_PHY_PORT);
1986 return BNXT_TF_RC_SUCCESS;
1989 /* Function to handle the parsing of RTE Flow action pop vlan. */
1991 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1992 struct ulp_rte_parser_params *params)
1994 /* Update the act_bitmap with pop */
1995 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
1996 return BNXT_TF_RC_SUCCESS;
1999 /* Function to handle the parsing of RTE Flow action push vlan. */
2001 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2002 struct ulp_rte_parser_params *params)
2004 const struct rte_flow_action_of_push_vlan *push_vlan;
2006 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2008 push_vlan = action_item->conf;
2010 ethertype = push_vlan->ethertype;
2011 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2013 "Parse Err: Ethertype not supported\n");
2014 return BNXT_TF_RC_PARSE_ERR;
2016 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2017 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2018 /* Update the hdr_bitmap with push vlan */
2019 ULP_BITMAP_SET(params->act_bitmap.bits,
2020 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2021 return BNXT_TF_RC_SUCCESS;
2023 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2024 return BNXT_TF_RC_ERROR;
2027 /* Function to handle the parsing of RTE Flow action set vlan id. */
2029 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2030 struct ulp_rte_parser_params *params)
2032 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2034 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2036 vlan_vid = action_item->conf;
2037 if (vlan_vid && vlan_vid->vlan_vid) {
2038 vid = vlan_vid->vlan_vid;
2039 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2040 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2041 /* Update the hdr_bitmap with vlan vid */
2042 ULP_BITMAP_SET(params->act_bitmap.bits,
2043 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2044 return BNXT_TF_RC_SUCCESS;
2046 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2047 return BNXT_TF_RC_ERROR;
2050 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2052 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2053 struct ulp_rte_parser_params *params)
2055 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2057 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2059 vlan_pcp = action_item->conf;
2061 pcp = vlan_pcp->vlan_pcp;
2062 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2063 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2064 /* Update the hdr_bitmap with vlan vid */
2065 ULP_BITMAP_SET(params->act_bitmap.bits,
2066 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2067 return BNXT_TF_RC_SUCCESS;
2069 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2070 return BNXT_TF_RC_ERROR;
2073 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2075 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2076 struct ulp_rte_parser_params *params)
2078 const struct rte_flow_action_set_ipv4 *set_ipv4;
2079 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2081 set_ipv4 = action_item->conf;
2083 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2084 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2085 /* Update the hdr_bitmap with set ipv4 src */
2086 ULP_BITMAP_SET(params->act_bitmap.bits,
2087 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2088 return BNXT_TF_RC_SUCCESS;
2090 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2091 return BNXT_TF_RC_ERROR;
2094 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2096 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2097 struct ulp_rte_parser_params *params)
2099 const struct rte_flow_action_set_ipv4 *set_ipv4;
2100 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2102 set_ipv4 = action_item->conf;
2104 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2105 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2106 /* Update the hdr_bitmap with set ipv4 dst */
2107 ULP_BITMAP_SET(params->act_bitmap.bits,
2108 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2109 return BNXT_TF_RC_SUCCESS;
2111 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2112 return BNXT_TF_RC_ERROR;
2115 /* Function to handle the parsing of RTE Flow action set tp src.*/
2117 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2118 struct ulp_rte_parser_params *params)
2120 const struct rte_flow_action_set_tp *set_tp;
2121 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2123 set_tp = action_item->conf;
2125 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2126 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2127 /* Update the hdr_bitmap with set tp src */
2128 ULP_BITMAP_SET(params->act_bitmap.bits,
2129 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2130 return BNXT_TF_RC_SUCCESS;
2133 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2134 return BNXT_TF_RC_ERROR;
2137 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2139 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2140 struct ulp_rte_parser_params *params)
2142 const struct rte_flow_action_set_tp *set_tp;
2143 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2145 set_tp = action_item->conf;
2147 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2148 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2149 /* Update the hdr_bitmap with set tp dst */
2150 ULP_BITMAP_SET(params->act_bitmap.bits,
2151 BNXT_ULP_ACT_BIT_SET_TP_DST);
2152 return BNXT_TF_RC_SUCCESS;
2155 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2156 return BNXT_TF_RC_ERROR;
2159 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2161 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2162 struct ulp_rte_parser_params *params)
2164 /* Update the act_bitmap with dec ttl */
2165 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2166 return BNXT_TF_RC_SUCCESS;
2169 /* Function to handle the parsing of RTE Flow action JUMP */
2171 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2172 struct ulp_rte_parser_params *params)
2174 /* Update the act_bitmap with dec ttl */
2175 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2176 return BNXT_TF_RC_SUCCESS;