1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK 0x700
23 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN 4789
26 /* Utility function to skip the void items. */
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
41 /* Utility function to update the field_bitmap */
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 struct ulp_rte_hdr_field *field;
48 field = ¶ms->hdr_field[idx];
49 if (ulp_bitmap_notzero(field->mask, field->size)) {
50 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 if (!ulp_bitmap_is_ones(field->mask, field->size))
53 ULP_COMP_FLD_IDX_WR(params,
54 BNXT_ULP_CF_IDX_WC_MATCH, 1);
56 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
67 memcpy(field->spec, buffer, field->size);
72 /* Utility function to copy field masks items */
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
79 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81 memcpy(field->mask, buffer, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx);
86 /* Utility function to ignore field masks items */
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 const void *buffer __rte_unused,
91 uint32_t size __rte_unused)
97 * Function to handle the parsing of RTE Flows and placing
98 * the RTE flow items into the ulp structures.
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102 struct ulp_rte_parser_params *params)
104 const struct rte_flow_item *item = pattern;
105 struct bnxt_ulp_rte_hdr_info *hdr_info;
107 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 /* Set the computed flags for no vlan tags before parsing */
110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 /* Parse all the items in the pattern */
114 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115 /* get the header information from the flow_hdr_info table */
116 hdr_info = &ulp_hdr_info[item->type];
117 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 "Truflow parser does not support type %d\n",
121 return BNXT_TF_RC_PARSE_ERR;
122 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123 /* call the registered callback handler */
124 if (hdr_info->proto_hdr_func) {
125 if (hdr_info->proto_hdr_func(item, params) !=
126 BNXT_TF_RC_SUCCESS) {
127 return BNXT_TF_RC_ERROR;
133 /* update the implied SVIF */
134 return ulp_rte_parser_implicit_match_port_process(params);
138 * Function to handle the parsing of RTE Flows and placing
139 * the RTE flow actions into the ulp structures.
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143 struct ulp_rte_parser_params *params)
145 const struct rte_flow_action *action_item = actions;
146 struct bnxt_ulp_rte_act_info *hdr_info;
148 /* Parse all the items in the pattern */
149 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150 /* get the header information from the flow_hdr_info table */
151 hdr_info = &ulp_act_info[action_item->type];
152 if (hdr_info->act_type ==
153 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 "Truflow parser does not support act %u\n",
157 return BNXT_TF_RC_ERROR;
158 } else if (hdr_info->act_type ==
159 BNXT_ULP_ACT_TYPE_SUPPORTED) {
160 /* call the registered callback handler */
161 if (hdr_info->proto_act_func) {
162 if (hdr_info->proto_act_func(action_item,
164 BNXT_TF_RC_SUCCESS) {
165 return BNXT_TF_RC_ERROR;
171 /* update the implied port details */
172 ulp_rte_parser_implicit_act_port_process(params);
173 return BNXT_TF_RC_SUCCESS;
177 * Function to handle the post processing of the computed
178 * fields for the interface.
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
184 uint16_t port_id, parif;
186 enum bnxt_ulp_direction_type dir;
188 /* get the direction details */
189 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 /* read the port id details */
192 port_id = ULP_COMP_FLD_IDX_RD(params,
193 BNXT_ULP_CF_IDX_INCOMING_IF);
194 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
197 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
201 if (dir == BNXT_ULP_DIR_INGRESS) {
203 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
211 /* Get the match port type */
212 mtype = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215 ULP_COMP_FLD_IDX_WR(params,
216 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 /* Set VF func PARIF */
219 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220 BNXT_ULP_VF_FUNC_PARIF,
223 "ParseErr:ifindex is not valid\n");
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
231 /* Set DRV func PARIF */
232 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233 BNXT_ULP_DRV_FUNC_PARIF,
236 "ParseErr:ifindex is not valid\n");
239 ULP_COMP_FLD_IDX_WR(params,
240 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
249 enum bnxt_ulp_intf_type match_port_type, act_port_type;
250 enum bnxt_ulp_direction_type dir;
251 uint32_t act_port_set;
253 /* Get the computed details */
254 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255 match_port_type = ULP_COMP_FLD_IDX_RD(params,
256 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257 act_port_type = ULP_COMP_FLD_IDX_RD(params,
258 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259 act_port_set = ULP_COMP_FLD_IDX_RD(params,
260 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
262 /* set the flow direction in the proto and action header */
263 if (dir == BNXT_ULP_DIR_EGRESS) {
264 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266 ULP_BITMAP_SET(params->act_bitmap.bits,
267 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
270 /* calculate the VF to VF flag */
271 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
275 /* Update the decrement ttl computational fields */
276 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277 BNXT_ULP_ACT_BIT_DEC_TTL)) {
279 * Check that vxlan proto is included and vxlan decap
280 * action is not set then decrement tunnel ttl.
281 * Similarly add GRE and NVGRE in future.
283 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284 BNXT_ULP_HDR_BIT_T_VXLAN) &&
285 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
287 ULP_COMP_FLD_IDX_WR(params,
288 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
290 ULP_COMP_FLD_IDX_WR(params,
291 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
295 /* Merge the hdr_fp_bit into the proto header bit */
296 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
298 /* Update the comp fld fid */
299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
301 /* Update the computed interface parameters */
302 bnxt_ulp_comp_fld_intf_update(params);
304 /* TBD: Handle the flow rejection scenarios */
309 * Function to handle the post processing of the parsing details
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
314 ulp_post_process_normal_flow(params);
315 return ulp_post_process_tun_flow(params);
319 * Function to compute the flow direction based on the match port details
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
324 enum bnxt_ulp_intf_type match_port_type;
326 /* Get the match port type */
327 match_port_type = ULP_COMP_FLD_IDX_RD(params,
328 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
330 /* If ingress flow and matchport is vf rep then dir is egress*/
331 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334 BNXT_ULP_DIR_EGRESS);
336 /* Assign the input direction */
337 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339 BNXT_ULP_DIR_INGRESS);
341 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342 BNXT_ULP_DIR_EGRESS);
346 /* Function to handle the parsing of RTE Flow item PF Header. */
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
353 enum bnxt_ulp_direction_type dir;
354 struct ulp_rte_hdr_field *hdr_field;
355 enum bnxt_ulp_svif_type svif_type;
356 enum bnxt_ulp_intf_type port_type;
358 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359 BNXT_ULP_INVALID_SVIF_VAL) {
361 "SVIF already set,multiple source not support'd\n");
362 return BNXT_TF_RC_ERROR;
365 /* Get port type details */
366 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368 BNXT_TF_DBG(ERR, "Invalid port type\n");
369 return BNXT_TF_RC_ERROR;
372 /* Update the match port type */
373 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
375 /* compute the direction */
376 bnxt_ulp_rte_parser_direction_compute(params);
378 /* Get the computed direction */
379 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380 if (dir == BNXT_ULP_DIR_INGRESS) {
381 svif_type = BNXT_ULP_PHY_PORT_SVIF;
383 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384 svif_type = BNXT_ULP_VF_FUNC_SVIF;
386 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
388 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
390 svif = rte_cpu_to_be_16(svif);
391 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392 memcpy(hdr_field->spec, &svif, sizeof(svif));
393 memcpy(hdr_field->mask, &mask, sizeof(mask));
394 hdr_field->size = sizeof(svif);
395 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396 rte_be_to_cpu_16(svif));
397 return BNXT_TF_RC_SUCCESS;
400 /* Function to handle the parsing of the RTE port id */
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
404 uint16_t port_id = 0;
405 uint16_t svif_mask = 0xFFFF;
407 int32_t rc = BNXT_TF_RC_ERROR;
409 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410 BNXT_ULP_INVALID_SVIF_VAL)
411 return BNXT_TF_RC_SUCCESS;
413 /* SVIF not set. So get the port id */
414 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
416 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
419 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
423 /* Update the SVIF details */
424 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
428 /* Function to handle the implicit action port id */
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
432 struct rte_flow_action action_item = {0};
433 struct rte_flow_action_port_id port_id = {0};
435 /* Read the action port set bit */
436 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437 /* Already set, so just exit */
438 return BNXT_TF_RC_SUCCESS;
440 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441 action_item.conf = &port_id;
443 /* Update the action port based on incoming port */
444 ulp_rte_port_id_act_handler(&action_item, params);
446 /* Reset the action port set bit */
447 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448 return BNXT_TF_RC_SUCCESS;
451 /* Function to handle the parsing of RTE Flow item PF Header. */
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454 struct ulp_rte_parser_params *params)
456 uint16_t port_id = 0;
457 uint16_t svif_mask = 0xFFFF;
460 /* Get the implicit port id */
461 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
463 /* perform the conversion from dpdk port to bnxt ifindex */
464 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
467 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468 return BNXT_TF_RC_ERROR;
471 /* Update the SVIF details */
472 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
475 /* Function to handle the parsing of RTE Flow item VF Header. */
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478 struct ulp_rte_parser_params *params)
480 const struct rte_flow_item_vf *vf_spec = item->spec;
481 const struct rte_flow_item_vf *vf_mask = item->mask;
484 int32_t rc = BNXT_TF_RC_PARSE_ERR;
486 /* Get VF rte_flow_item for Port details */
488 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
492 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
497 /* perform the conversion from VF Func id to bnxt ifindex */
498 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
501 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
504 /* Update the SVIF details */
505 return ulp_rte_parser_svif_set(params, ifindex, mask);
508 /* Function to handle the parsing of RTE Flow item port id Header. */
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511 struct ulp_rte_parser_params *params)
513 const struct rte_flow_item_port_id *port_spec = item->spec;
514 const struct rte_flow_item_port_id *port_mask = item->mask;
516 int32_t rc = BNXT_TF_RC_PARSE_ERR;
520 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
524 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
527 mask = port_mask->id;
529 /* perform the conversion from dpdk port to bnxt ifindex */
530 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
533 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
536 /* Update the SVIF details */
537 return ulp_rte_parser_svif_set(params, ifindex, mask);
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543 struct ulp_rte_parser_params *params)
545 const struct rte_flow_item_phy_port *port_spec = item->spec;
546 const struct rte_flow_item_phy_port *port_mask = item->mask;
548 int32_t rc = BNXT_TF_RC_ERROR;
550 enum bnxt_ulp_direction_type dir;
551 struct ulp_rte_hdr_field *hdr_field;
553 /* Copy the rte_flow_item for phy port into hdr_field */
555 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
559 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
562 mask = port_mask->index;
564 /* Update the match port type */
565 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566 BNXT_ULP_INTF_TYPE_PHY_PORT);
568 /* Compute the Hw direction */
569 bnxt_ulp_rte_parser_direction_compute(params);
571 /* Direction validation */
572 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573 if (dir == BNXT_ULP_DIR_EGRESS) {
575 "Parse Err:Phy ports are valid only for ingress\n");
576 return BNXT_TF_RC_PARSE_ERR;
579 /* Get the physical port details from port db */
580 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
583 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584 return BNXT_TF_RC_PARSE_ERR;
587 /* Update the SVIF details */
588 svif = rte_cpu_to_be_16(svif);
589 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590 memcpy(hdr_field->spec, &svif, sizeof(svif));
591 memcpy(hdr_field->mask, &mask, sizeof(mask));
592 hdr_field->size = sizeof(svif);
593 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594 rte_be_to_cpu_16(svif));
595 return BNXT_TF_RC_SUCCESS;
598 /* Function to handle the update of proto header based on field values */
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601 uint16_t type, uint32_t in_flag)
603 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
605 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606 BNXT_ULP_HDR_BIT_I_IPV4);
607 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
609 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610 BNXT_ULP_HDR_BIT_O_IPV4);
611 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
613 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
615 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616 BNXT_ULP_HDR_BIT_I_IPV6);
617 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
619 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620 BNXT_ULP_HDR_BIT_O_IPV6);
621 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
626 /* Internal Function to identify broadcast or multicast packets */
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
630 if (rte_is_multicast_ether_addr(eth_addr) ||
631 rte_is_broadcast_ether_addr(eth_addr)) {
633 "No support for bcast or mcast addr offload\n");
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642 struct ulp_rte_parser_params *params)
644 const struct rte_flow_item_eth *eth_spec = item->spec;
645 const struct rte_flow_item_eth *eth_mask = item->mask;
646 struct ulp_rte_hdr_field *field;
647 uint32_t idx = params->field_idx;
649 uint16_t eth_type = 0;
650 uint32_t inner_flag = 0;
653 * Copy the rte_flow_item for eth into hdr_field using ethernet
657 size = sizeof(eth_spec->dst.addr_bytes);
658 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
659 eth_spec->dst.addr_bytes,
661 /* Todo: work around to avoid multicast and broadcast addr */
662 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
663 return BNXT_TF_RC_PARSE_ERR;
665 size = sizeof(eth_spec->src.addr_bytes);
666 field = ulp_rte_parser_fld_copy(field,
667 eth_spec->src.addr_bytes,
669 /* Todo: work around to avoid multicast and broadcast addr */
670 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
671 return BNXT_TF_RC_PARSE_ERR;
673 field = ulp_rte_parser_fld_copy(field,
675 sizeof(eth_spec->type));
676 eth_type = eth_spec->type;
679 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680 sizeof(eth_mask->dst.addr_bytes));
681 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682 sizeof(eth_mask->src.addr_bytes));
683 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
684 sizeof(eth_mask->type));
686 /* Add number of Eth header elements */
687 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
689 /* Update the protocol hdr bitmap */
690 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691 BNXT_ULP_HDR_BIT_O_ETH) ||
692 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693 BNXT_ULP_HDR_BIT_O_IPV4) ||
694 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695 BNXT_ULP_HDR_BIT_O_IPV6) ||
696 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_UDP) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_TCP)) {
700 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
703 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
705 /* Update the field protocol hdr bitmap */
706 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
708 return BNXT_TF_RC_SUCCESS;
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714 struct ulp_rte_parser_params *params)
716 const struct rte_flow_item_vlan *vlan_spec = item->spec;
717 const struct rte_flow_item_vlan *vlan_mask = item->mask;
718 struct ulp_rte_hdr_field *field;
719 struct ulp_rte_hdr_bitmap *hdr_bit;
720 uint32_t idx = params->field_idx;
721 uint16_t vlan_tag, priority;
722 uint32_t outer_vtag_num;
723 uint32_t inner_vtag_num;
724 uint16_t eth_type = 0;
725 uint32_t inner_flag = 0;
728 * Copy the rte_flow_item for vlan into hdr_field using Vlan
732 vlan_tag = ntohs(vlan_spec->tci);
733 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734 vlan_tag &= ULP_VLAN_TAG_MASK;
735 vlan_tag = htons(vlan_tag);
737 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
740 field = ulp_rte_parser_fld_copy(field,
743 field = ulp_rte_parser_fld_copy(field,
744 &vlan_spec->inner_type,
745 sizeof(vlan_spec->inner_type));
746 eth_type = vlan_spec->inner_type;
750 vlan_tag = ntohs(vlan_mask->tci);
751 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
755 * the storage for priority and vlan tag is 2 bytes
756 * The mask of priority which is 3 bits if it is all 1's
757 * then make the rest bits 13 bits as 1's
758 * so that it is matched as exact match.
760 if (priority == ULP_VLAN_PRIORITY_MASK)
761 priority |= ~ULP_VLAN_PRIORITY_MASK;
762 if (vlan_tag == ULP_VLAN_TAG_MASK)
763 vlan_tag |= ~ULP_VLAN_TAG_MASK;
764 vlan_tag = htons(vlan_tag);
767 * The priority field is ignored since OVS is setting it as
768 * wild card match and it is not supported. This is a work
769 * around and shall be addressed in the future.
771 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
774 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
776 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777 sizeof(vlan_mask->inner_type));
779 /* Set the field index to new incremented value */
780 params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
782 /* Get the outer tag and inner tag counts */
783 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784 BNXT_ULP_CF_IDX_O_VTAG_NUM);
785 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786 BNXT_ULP_CF_IDX_I_VTAG_NUM);
788 /* Update the hdr_bitmap of the vlans */
789 hdr_bit = ¶ms->hdr_bitmap;
790 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
793 /* Update the vlan tag num */
795 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
797 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800 BNXT_ULP_HDR_BIT_OO_VLAN);
801 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803 outer_vtag_num == 1) {
804 /* update the vlan tag num */
806 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
808 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811 BNXT_ULP_HDR_BIT_OI_VLAN);
812 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
815 /* update the vlan tag num */
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822 BNXT_ULP_HDR_BIT_IO_VLAN);
824 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826 inner_vtag_num == 1) {
827 /* update the vlan tag num */
829 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
831 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834 BNXT_ULP_HDR_BIT_II_VLAN);
837 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838 return BNXT_TF_RC_ERROR;
840 /* Update the field protocol hdr bitmap */
841 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842 return BNXT_TF_RC_SUCCESS;
845 /* Function to handle the update of proto header based on field values */
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848 uint8_t proto, uint32_t in_flag)
850 if (proto == IPPROTO_UDP) {
852 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853 BNXT_ULP_HDR_BIT_I_UDP);
854 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
856 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857 BNXT_ULP_HDR_BIT_O_UDP);
858 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
860 } else if (proto == IPPROTO_TCP) {
862 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863 BNXT_ULP_HDR_BIT_I_TCP);
864 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
866 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867 BNXT_ULP_HDR_BIT_O_TCP);
868 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876 struct ulp_rte_parser_params *params)
878 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880 struct ulp_rte_hdr_field *field;
881 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
882 uint32_t idx = params->field_idx;
885 uint32_t inner_flag = 0;
888 /* validate there are no 3rd L3 header */
889 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
891 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892 return BNXT_TF_RC_ERROR;
895 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896 BNXT_ULP_HDR_BIT_O_ETH) &&
897 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898 BNXT_ULP_HDR_BIT_I_ETH)) {
899 /* Since F2 flow does not include eth item, when parser detects
900 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903 * This will allow the parser post processor to update the
904 * t_dmac in hdr_field[o_eth.dmac]
906 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907 BNXT_ULP_PROTO_HDR_VLAN_NUM);
908 params->field_idx = idx;
912 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
916 size = sizeof(ipv4_spec->hdr.version_ihl);
917 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
918 &ipv4_spec->hdr.version_ihl,
920 size = sizeof(ipv4_spec->hdr.type_of_service);
921 field = ulp_rte_parser_fld_copy(field,
922 &ipv4_spec->hdr.type_of_service,
924 size = sizeof(ipv4_spec->hdr.total_length);
925 field = ulp_rte_parser_fld_copy(field,
926 &ipv4_spec->hdr.total_length,
928 size = sizeof(ipv4_spec->hdr.packet_id);
929 field = ulp_rte_parser_fld_copy(field,
930 &ipv4_spec->hdr.packet_id,
932 size = sizeof(ipv4_spec->hdr.fragment_offset);
933 field = ulp_rte_parser_fld_copy(field,
934 &ipv4_spec->hdr.fragment_offset,
936 size = sizeof(ipv4_spec->hdr.time_to_live);
937 field = ulp_rte_parser_fld_copy(field,
938 &ipv4_spec->hdr.time_to_live,
940 size = sizeof(ipv4_spec->hdr.next_proto_id);
941 field = ulp_rte_parser_fld_copy(field,
942 &ipv4_spec->hdr.next_proto_id,
944 proto = ipv4_spec->hdr.next_proto_id;
945 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946 field = ulp_rte_parser_fld_copy(field,
947 &ipv4_spec->hdr.hdr_checksum,
949 size = sizeof(ipv4_spec->hdr.src_addr);
950 field = ulp_rte_parser_fld_copy(field,
951 &ipv4_spec->hdr.src_addr,
953 size = sizeof(ipv4_spec->hdr.dst_addr);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.dst_addr,
959 ulp_rte_prsr_mask_copy(params, &idx,
960 &ipv4_mask->hdr.version_ihl,
961 sizeof(ipv4_mask->hdr.version_ihl));
963 * The tos field is ignored since OVS is setting it as wild card
964 * match and it is not supported. This is a work around and
965 * shall be addressed in the future.
967 ulp_rte_prsr_mask_ignore(params, &idx,
968 &ipv4_mask->hdr.type_of_service,
969 sizeof(ipv4_mask->hdr.type_of_service)
972 ulp_rte_prsr_mask_copy(params, &idx,
973 &ipv4_mask->hdr.total_length,
974 sizeof(ipv4_mask->hdr.total_length));
975 ulp_rte_prsr_mask_copy(params, &idx,
976 &ipv4_mask->hdr.packet_id,
977 sizeof(ipv4_mask->hdr.packet_id));
978 ulp_rte_prsr_mask_copy(params, &idx,
979 &ipv4_mask->hdr.fragment_offset,
980 sizeof(ipv4_mask->hdr.fragment_offset));
981 ulp_rte_prsr_mask_copy(params, &idx,
982 &ipv4_mask->hdr.time_to_live,
983 sizeof(ipv4_mask->hdr.time_to_live));
984 ulp_rte_prsr_mask_copy(params, &idx,
985 &ipv4_mask->hdr.next_proto_id,
986 sizeof(ipv4_mask->hdr.next_proto_id));
987 ulp_rte_prsr_mask_copy(params, &idx,
988 &ipv4_mask->hdr.hdr_checksum,
989 sizeof(ipv4_mask->hdr.hdr_checksum));
990 ulp_rte_prsr_mask_copy(params, &idx,
991 &ipv4_mask->hdr.src_addr,
992 sizeof(ipv4_mask->hdr.src_addr));
993 ulp_rte_prsr_mask_copy(params, &idx,
994 &ipv4_mask->hdr.dst_addr,
995 sizeof(ipv4_mask->hdr.dst_addr));
997 /* Add the number of ipv4 header elements */
998 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1000 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1007 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1011 /* Update the field protocol hdr bitmap */
1012 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1013 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1014 return BNXT_TF_RC_SUCCESS;
1017 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1019 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1020 struct ulp_rte_parser_params *params)
1022 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1023 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1024 struct ulp_rte_hdr_field *field;
1025 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1026 uint32_t idx = params->field_idx;
1028 uint32_t vtcf, vtcf_mask;
1030 uint32_t inner_flag = 0;
1033 /* validate there are no 3rd L3 header */
1034 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1036 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1037 return BNXT_TF_RC_ERROR;
1040 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1041 BNXT_ULP_HDR_BIT_O_ETH) &&
1042 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1043 BNXT_ULP_HDR_BIT_I_ETH)) {
1044 /* Since F2 flow does not include eth item, when parser detects
1045 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1046 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1047 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1048 * This will allow the parser post processor to update the
1049 * t_dmac in hdr_field[o_eth.dmac]
1051 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1052 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1053 params->field_idx = idx;
1057 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1061 size = sizeof(ipv6_spec->hdr.vtc_flow);
1063 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1064 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1068 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1069 field = ulp_rte_parser_fld_copy(field,
1073 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1074 field = ulp_rte_parser_fld_copy(field,
1078 size = sizeof(ipv6_spec->hdr.payload_len);
1079 field = ulp_rte_parser_fld_copy(field,
1080 &ipv6_spec->hdr.payload_len,
1082 size = sizeof(ipv6_spec->hdr.proto);
1083 field = ulp_rte_parser_fld_copy(field,
1084 &ipv6_spec->hdr.proto,
1086 proto = ipv6_spec->hdr.proto;
1087 size = sizeof(ipv6_spec->hdr.hop_limits);
1088 field = ulp_rte_parser_fld_copy(field,
1089 &ipv6_spec->hdr.hop_limits,
1091 size = sizeof(ipv6_spec->hdr.src_addr);
1092 field = ulp_rte_parser_fld_copy(field,
1093 &ipv6_spec->hdr.src_addr,
1095 size = sizeof(ipv6_spec->hdr.dst_addr);
1096 field = ulp_rte_parser_fld_copy(field,
1097 &ipv6_spec->hdr.dst_addr,
1101 size = sizeof(ipv6_mask->hdr.vtc_flow);
1103 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1104 ulp_rte_prsr_mask_copy(params, &idx,
1108 * The TC and flow label field are ignored since OVS is setting
1109 * it for match and it is not supported.
1110 * This is a work around and
1111 * shall be addressed in the future.
1113 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1114 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1116 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1117 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1119 ulp_rte_prsr_mask_copy(params, &idx,
1120 &ipv6_mask->hdr.payload_len,
1121 sizeof(ipv6_mask->hdr.payload_len));
1122 ulp_rte_prsr_mask_copy(params, &idx,
1123 &ipv6_mask->hdr.proto,
1124 sizeof(ipv6_mask->hdr.proto));
1125 ulp_rte_prsr_mask_copy(params, &idx,
1126 &ipv6_mask->hdr.hop_limits,
1127 sizeof(ipv6_mask->hdr.hop_limits));
1128 ulp_rte_prsr_mask_copy(params, &idx,
1129 &ipv6_mask->hdr.src_addr,
1130 sizeof(ipv6_mask->hdr.src_addr));
1131 ulp_rte_prsr_mask_copy(params, &idx,
1132 &ipv6_mask->hdr.dst_addr,
1133 sizeof(ipv6_mask->hdr.dst_addr));
1135 /* add number of ipv6 header elements */
1136 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1138 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1139 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1140 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1141 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1142 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1145 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1146 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1149 /* Update the field protocol hdr bitmap */
1150 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1151 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1153 return BNXT_TF_RC_SUCCESS;
1156 /* Function to handle the update of proto header based on field values */
1158 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1161 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1162 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1163 BNXT_ULP_HDR_BIT_T_VXLAN);
1164 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1168 /* Function to handle the parsing of RTE Flow item UDP Header. */
1170 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1171 struct ulp_rte_parser_params *params)
1173 const struct rte_flow_item_udp *udp_spec = item->spec;
1174 const struct rte_flow_item_udp *udp_mask = item->mask;
1175 struct ulp_rte_hdr_field *field;
1176 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1177 uint32_t idx = params->field_idx;
1179 uint16_t dport = 0, sport = 0;
1182 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1184 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1185 return BNXT_TF_RC_ERROR;
1189 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1193 size = sizeof(udp_spec->hdr.src_port);
1194 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1195 &udp_spec->hdr.src_port,
1197 sport = udp_spec->hdr.src_port;
1198 size = sizeof(udp_spec->hdr.dst_port);
1199 field = ulp_rte_parser_fld_copy(field,
1200 &udp_spec->hdr.dst_port,
1202 dport = udp_spec->hdr.dst_port;
1203 size = sizeof(udp_spec->hdr.dgram_len);
1204 field = ulp_rte_parser_fld_copy(field,
1205 &udp_spec->hdr.dgram_len,
1207 size = sizeof(udp_spec->hdr.dgram_cksum);
1208 field = ulp_rte_parser_fld_copy(field,
1209 &udp_spec->hdr.dgram_cksum,
1213 ulp_rte_prsr_mask_copy(params, &idx,
1214 &udp_mask->hdr.src_port,
1215 sizeof(udp_mask->hdr.src_port));
1216 ulp_rte_prsr_mask_copy(params, &idx,
1217 &udp_mask->hdr.dst_port,
1218 sizeof(udp_mask->hdr.dst_port));
1219 ulp_rte_prsr_mask_copy(params, &idx,
1220 &udp_mask->hdr.dgram_len,
1221 sizeof(udp_mask->hdr.dgram_len));
1222 ulp_rte_prsr_mask_copy(params, &idx,
1223 &udp_mask->hdr.dgram_cksum,
1224 sizeof(udp_mask->hdr.dgram_cksum));
1227 /* Add number of UDP header elements */
1228 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1230 /* Set the udp header bitmap and computed l4 header bitmaps */
1231 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1232 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1233 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1234 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1235 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1236 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1239 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1240 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1242 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1244 /* Update the field protocol hdr bitmap */
1245 ulp_rte_l4_proto_type_update(params, dport);
1247 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1248 return BNXT_TF_RC_SUCCESS;
1251 /* Function to handle the parsing of RTE Flow item TCP Header. */
1253 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1254 struct ulp_rte_parser_params *params)
1256 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1257 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1258 struct ulp_rte_hdr_field *field;
1259 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1260 uint32_t idx = params->field_idx;
1261 uint16_t dport = 0, sport = 0;
1265 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1267 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1268 return BNXT_TF_RC_ERROR;
1272 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1276 sport = tcp_spec->hdr.src_port;
1277 size = sizeof(tcp_spec->hdr.src_port);
1278 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1279 &tcp_spec->hdr.src_port,
1281 dport = tcp_spec->hdr.dst_port;
1282 size = sizeof(tcp_spec->hdr.dst_port);
1283 field = ulp_rte_parser_fld_copy(field,
1284 &tcp_spec->hdr.dst_port,
1286 size = sizeof(tcp_spec->hdr.sent_seq);
1287 field = ulp_rte_parser_fld_copy(field,
1288 &tcp_spec->hdr.sent_seq,
1290 size = sizeof(tcp_spec->hdr.recv_ack);
1291 field = ulp_rte_parser_fld_copy(field,
1292 &tcp_spec->hdr.recv_ack,
1294 size = sizeof(tcp_spec->hdr.data_off);
1295 field = ulp_rte_parser_fld_copy(field,
1296 &tcp_spec->hdr.data_off,
1298 size = sizeof(tcp_spec->hdr.tcp_flags);
1299 field = ulp_rte_parser_fld_copy(field,
1300 &tcp_spec->hdr.tcp_flags,
1302 size = sizeof(tcp_spec->hdr.rx_win);
1303 field = ulp_rte_parser_fld_copy(field,
1304 &tcp_spec->hdr.rx_win,
1306 size = sizeof(tcp_spec->hdr.cksum);
1307 field = ulp_rte_parser_fld_copy(field,
1308 &tcp_spec->hdr.cksum,
1310 size = sizeof(tcp_spec->hdr.tcp_urp);
1311 field = ulp_rte_parser_fld_copy(field,
1312 &tcp_spec->hdr.tcp_urp,
1315 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1319 ulp_rte_prsr_mask_copy(params, &idx,
1320 &tcp_mask->hdr.src_port,
1321 sizeof(tcp_mask->hdr.src_port));
1322 ulp_rte_prsr_mask_copy(params, &idx,
1323 &tcp_mask->hdr.dst_port,
1324 sizeof(tcp_mask->hdr.dst_port));
1325 ulp_rte_prsr_mask_copy(params, &idx,
1326 &tcp_mask->hdr.sent_seq,
1327 sizeof(tcp_mask->hdr.sent_seq));
1328 ulp_rte_prsr_mask_copy(params, &idx,
1329 &tcp_mask->hdr.recv_ack,
1330 sizeof(tcp_mask->hdr.recv_ack));
1331 ulp_rte_prsr_mask_copy(params, &idx,
1332 &tcp_mask->hdr.data_off,
1333 sizeof(tcp_mask->hdr.data_off));
1334 ulp_rte_prsr_mask_copy(params, &idx,
1335 &tcp_mask->hdr.tcp_flags,
1336 sizeof(tcp_mask->hdr.tcp_flags));
1337 ulp_rte_prsr_mask_copy(params, &idx,
1338 &tcp_mask->hdr.rx_win,
1339 sizeof(tcp_mask->hdr.rx_win));
1340 ulp_rte_prsr_mask_copy(params, &idx,
1341 &tcp_mask->hdr.cksum,
1342 sizeof(tcp_mask->hdr.cksum));
1343 ulp_rte_prsr_mask_copy(params, &idx,
1344 &tcp_mask->hdr.tcp_urp,
1345 sizeof(tcp_mask->hdr.tcp_urp));
1347 /* add number of TCP header elements */
1348 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1350 /* Set the udp header bitmap and computed l4 header bitmaps */
1351 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1352 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1353 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1354 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1355 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1356 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1358 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1359 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1360 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1361 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1363 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1364 return BNXT_TF_RC_SUCCESS;
1367 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1369 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1370 struct ulp_rte_parser_params *params)
1372 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1373 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1374 struct ulp_rte_hdr_field *field;
1375 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1376 uint32_t idx = params->field_idx;
1380 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1384 size = sizeof(vxlan_spec->flags);
1385 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1388 size = sizeof(vxlan_spec->rsvd0);
1389 field = ulp_rte_parser_fld_copy(field,
1392 size = sizeof(vxlan_spec->vni);
1393 field = ulp_rte_parser_fld_copy(field,
1396 size = sizeof(vxlan_spec->rsvd1);
1397 field = ulp_rte_parser_fld_copy(field,
1402 ulp_rte_prsr_mask_copy(params, &idx,
1404 sizeof(vxlan_mask->flags));
1405 ulp_rte_prsr_mask_copy(params, &idx,
1407 sizeof(vxlan_mask->rsvd0));
1408 ulp_rte_prsr_mask_copy(params, &idx,
1410 sizeof(vxlan_mask->vni));
1411 ulp_rte_prsr_mask_copy(params, &idx,
1413 sizeof(vxlan_mask->rsvd1));
1415 /* Add number of vxlan header elements */
1416 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1418 /* Update the hdr_bitmap with vxlan */
1419 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1420 return BNXT_TF_RC_SUCCESS;
1423 /* Function to handle the parsing of RTE Flow item void Header */
1425 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1426 struct ulp_rte_parser_params *params __rte_unused)
1428 return BNXT_TF_RC_SUCCESS;
1431 /* Function to handle the parsing of RTE Flow action void Header. */
1433 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1434 struct ulp_rte_parser_params *params __rte_unused)
1436 return BNXT_TF_RC_SUCCESS;
1439 /* Function to handle the parsing of RTE Flow action Mark Header. */
1441 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1442 struct ulp_rte_parser_params *param)
1444 const struct rte_flow_action_mark *mark;
1445 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1448 mark = action_item->conf;
1450 mark_id = tfp_cpu_to_be_32(mark->id);
1451 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1452 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1454 /* Update the hdr_bitmap with vxlan */
1455 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1456 return BNXT_TF_RC_SUCCESS;
1458 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1459 return BNXT_TF_RC_ERROR;
1462 /* Function to handle the parsing of RTE Flow action RSS Header. */
1464 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1465 struct ulp_rte_parser_params *param)
1467 const struct rte_flow_action_rss *rss = action_item->conf;
1470 /* Update the hdr_bitmap with vxlan */
1471 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1472 return BNXT_TF_RC_SUCCESS;
1474 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1475 return BNXT_TF_RC_ERROR;
1478 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1480 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1481 struct ulp_rte_parser_params *params)
1483 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1484 const struct rte_flow_item *item;
1485 const struct rte_flow_item_eth *eth_spec;
1486 const struct rte_flow_item_ipv4 *ipv4_spec;
1487 const struct rte_flow_item_ipv6 *ipv6_spec;
1488 struct rte_flow_item_vxlan vxlan_spec;
1489 uint32_t vlan_num = 0, vlan_size = 0;
1490 uint32_t ip_size = 0, ip_type = 0;
1491 uint32_t vxlan_size = 0;
1493 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1494 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1496 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1497 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1499 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1500 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1501 const uint8_t *tmp_buff;
1503 vxlan_encap = action_item->conf;
1505 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1506 return BNXT_TF_RC_ERROR;
1509 item = vxlan_encap->definition;
1511 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1512 return BNXT_TF_RC_ERROR;
1515 if (!ulp_rte_item_skip_void(&item, 0))
1516 return BNXT_TF_RC_ERROR;
1518 /* must have ethernet header */
1519 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1520 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1521 return BNXT_TF_RC_ERROR;
1523 eth_spec = item->spec;
1524 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1525 ulp_encap_buffer_copy(buff,
1526 eth_spec->dst.addr_bytes,
1527 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1528 ULP_BUFFER_ALIGN_8_BYTE);
1530 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1531 ulp_encap_buffer_copy(buff,
1532 eth_spec->src.addr_bytes,
1533 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1534 ULP_BUFFER_ALIGN_8_BYTE);
1536 /* Goto the next item */
1537 if (!ulp_rte_item_skip_void(&item, 1))
1538 return BNXT_TF_RC_ERROR;
1540 /* May have vlan header */
1541 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1543 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1544 ulp_encap_buffer_copy(buff,
1546 sizeof(struct rte_flow_item_vlan),
1547 ULP_BUFFER_ALIGN_8_BYTE);
1549 if (!ulp_rte_item_skip_void(&item, 1))
1550 return BNXT_TF_RC_ERROR;
1553 /* may have two vlan headers */
1554 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1556 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1557 sizeof(struct rte_flow_item_vlan)],
1559 sizeof(struct rte_flow_item_vlan));
1560 if (!ulp_rte_item_skip_void(&item, 1))
1561 return BNXT_TF_RC_ERROR;
1563 /* Update the vlan count and size of more than one */
1565 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1566 vlan_num = tfp_cpu_to_be_32(vlan_num);
1567 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1570 vlan_size = tfp_cpu_to_be_32(vlan_size);
1571 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1576 /* L3 must be IPv4, IPv6 */
1577 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1578 ipv4_spec = item->spec;
1579 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1581 /* copy the ipv4 details */
1582 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1583 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1584 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1585 ulp_encap_buffer_copy(buff,
1587 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1588 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1589 ULP_BUFFER_ALIGN_8_BYTE);
1591 /* Total length being ignored in the ip hdr. */
1592 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1593 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1594 ulp_encap_buffer_copy(buff,
1596 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1597 ULP_BUFFER_ALIGN_8_BYTE);
1598 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1599 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1600 ulp_encap_buffer_copy(buff,
1601 &ipv4_spec->hdr.version_ihl,
1602 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1603 ULP_BUFFER_ALIGN_8_BYTE);
1606 /* Update the dst ip address in ip encap buffer */
1607 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1608 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1609 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1610 ulp_encap_buffer_copy(buff,
1611 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1612 sizeof(ipv4_spec->hdr.dst_addr),
1613 ULP_BUFFER_ALIGN_8_BYTE);
1615 /* Update the src ip address */
1616 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1617 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1618 sizeof(ipv4_spec->hdr.src_addr)];
1619 ulp_encap_buffer_copy(buff,
1620 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1621 sizeof(ipv4_spec->hdr.src_addr),
1622 ULP_BUFFER_ALIGN_8_BYTE);
1624 /* Update the ip size details */
1625 ip_size = tfp_cpu_to_be_32(ip_size);
1626 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1627 &ip_size, sizeof(uint32_t));
1629 /* update the ip type */
1630 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1631 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1632 &ip_type, sizeof(uint32_t));
1634 /* update the computed field to notify it is ipv4 header */
1635 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1638 if (!ulp_rte_item_skip_void(&item, 1))
1639 return BNXT_TF_RC_ERROR;
1640 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1641 ipv6_spec = item->spec;
1642 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1644 /* copy the ipv6 details */
1645 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1646 if (ulp_buffer_is_empty(tmp_buff,
1647 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1648 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1649 ulp_encap_buffer_copy(buff,
1651 sizeof(def_ipv6_hdr),
1652 ULP_BUFFER_ALIGN_8_BYTE);
1654 /* The payload length being ignored in the ip hdr. */
1655 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1656 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1657 ulp_encap_buffer_copy(buff,
1659 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1660 ULP_BUFFER_ALIGN_8_BYTE);
1661 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1662 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1663 BNXT_ULP_ENCAP_IPV6_DO];
1664 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1665 ulp_encap_buffer_copy(buff,
1667 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1668 ULP_BUFFER_ALIGN_8_BYTE);
1670 /* Update the dst ip address in ip encap buffer */
1671 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1672 sizeof(def_ipv6_hdr)];
1673 ulp_encap_buffer_copy(buff,
1674 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1675 sizeof(ipv6_spec->hdr.dst_addr),
1676 ULP_BUFFER_ALIGN_8_BYTE);
1678 /* Update the src ip address */
1679 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1680 ulp_encap_buffer_copy(buff,
1681 (const uint8_t *)ipv6_spec->hdr.src_addr,
1682 sizeof(ipv6_spec->hdr.src_addr),
1683 ULP_BUFFER_ALIGN_16_BYTE);
1685 /* Update the ip size details */
1686 ip_size = tfp_cpu_to_be_32(ip_size);
1687 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1688 &ip_size, sizeof(uint32_t));
1690 /* update the ip type */
1691 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1692 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1693 &ip_type, sizeof(uint32_t));
1695 /* update the computed field to notify it is ipv6 header */
1696 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1699 if (!ulp_rte_item_skip_void(&item, 1))
1700 return BNXT_TF_RC_ERROR;
1702 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1703 return BNXT_TF_RC_ERROR;
1707 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1708 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1709 return BNXT_TF_RC_ERROR;
1711 /* copy the udp details */
1712 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1713 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1714 ULP_BUFFER_ALIGN_8_BYTE);
1716 if (!ulp_rte_item_skip_void(&item, 1))
1717 return BNXT_TF_RC_ERROR;
1720 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1721 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1722 return BNXT_TF_RC_ERROR;
1724 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1725 /* copy the vxlan details */
1726 memcpy(&vxlan_spec, item->spec, vxlan_size);
1727 vxlan_spec.flags = 0x08;
1728 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1729 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1730 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1731 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1733 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1734 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1735 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1736 (const uint8_t *)&vxlan_spec.vni,
1737 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1739 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1740 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1741 &vxlan_size, sizeof(uint32_t));
1743 /* update the hdr_bitmap with vxlan */
1744 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1745 return BNXT_TF_RC_SUCCESS;
1748 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1750 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1752 struct ulp_rte_parser_params *params)
1754 /* update the hdr_bitmap with vxlan */
1755 ULP_BITMAP_SET(params->act_bitmap.bits,
1756 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1757 /* Update computational field with tunnel decap info */
1758 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1760 return BNXT_TF_RC_SUCCESS;
1763 /* Function to handle the parsing of RTE Flow action drop Header. */
1765 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1766 struct ulp_rte_parser_params *params)
1768 /* Update the hdr_bitmap with drop */
1769 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1770 return BNXT_TF_RC_SUCCESS;
1773 /* Function to handle the parsing of RTE Flow action count. */
1775 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1776 struct ulp_rte_parser_params *params)
1779 const struct rte_flow_action_count *act_count;
1780 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1782 act_count = action_item->conf;
1784 if (act_count->shared) {
1786 "Parse Error:Shared count not supported\n");
1787 return BNXT_TF_RC_PARSE_ERR;
1789 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1791 BNXT_ULP_ACT_PROP_SZ_COUNT);
1794 /* Update the hdr_bitmap with count */
1795 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1796 return BNXT_TF_RC_SUCCESS;
1799 /* Function to handle the parsing of action ports. */
1801 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1804 enum bnxt_ulp_direction_type dir;
1807 struct ulp_rte_act_prop *act = ¶m->act_prop;
1808 enum bnxt_ulp_intf_type port_type;
1811 /* Get the direction */
1812 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1813 if (dir == BNXT_ULP_DIR_EGRESS) {
1814 /* For egress direction, fill vport */
1815 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1816 return BNXT_TF_RC_ERROR;
1819 pid = rte_cpu_to_be_32(pid);
1820 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1821 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1823 /* For ingress direction, fill vnic */
1824 port_type = ULP_COMP_FLD_IDX_RD(param,
1825 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1826 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1827 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1829 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1831 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1833 return BNXT_TF_RC_ERROR;
1836 pid = rte_cpu_to_be_32(pid);
1837 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1838 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1841 /* Update the action port set bit */
1842 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1843 return BNXT_TF_RC_SUCCESS;
1846 /* Function to handle the parsing of RTE Flow action PF. */
1848 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1849 struct ulp_rte_parser_params *params)
1853 enum bnxt_ulp_intf_type intf_type;
1855 /* Get the port id of the current device */
1856 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1858 /* Get the port db ifindex */
1859 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1861 BNXT_TF_DBG(ERR, "Invalid port id\n");
1862 return BNXT_TF_RC_ERROR;
1865 /* Check the port is PF port */
1866 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1867 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1868 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1869 return BNXT_TF_RC_ERROR;
1871 /* Update the action properties */
1872 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1873 return ulp_rte_parser_act_port_set(params, ifindex);
1876 /* Function to handle the parsing of RTE Flow action VF. */
1878 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1879 struct ulp_rte_parser_params *params)
1881 const struct rte_flow_action_vf *vf_action;
1883 enum bnxt_ulp_intf_type intf_type;
1885 vf_action = action_item->conf;
1887 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1888 return BNXT_TF_RC_PARSE_ERR;
1891 if (vf_action->original) {
1892 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1893 return BNXT_TF_RC_PARSE_ERR;
1896 /* Check the port is VF port */
1897 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1899 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1900 return BNXT_TF_RC_ERROR;
1902 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1903 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1904 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1905 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1906 return BNXT_TF_RC_ERROR;
1909 /* Update the action properties */
1910 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1911 return ulp_rte_parser_act_port_set(params, ifindex);
1914 /* Function to handle the parsing of RTE Flow action port_id. */
1916 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1917 struct ulp_rte_parser_params *param)
1919 const struct rte_flow_action_port_id *port_id = act_item->conf;
1921 enum bnxt_ulp_intf_type intf_type;
1925 "ParseErr: Invalid Argument\n");
1926 return BNXT_TF_RC_PARSE_ERR;
1928 if (port_id->original) {
1930 "ParseErr:Portid Original not supported\n");
1931 return BNXT_TF_RC_PARSE_ERR;
1934 /* Get the port db ifindex */
1935 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1937 BNXT_TF_DBG(ERR, "Invalid port id\n");
1938 return BNXT_TF_RC_ERROR;
1941 /* Get the intf type */
1942 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1944 BNXT_TF_DBG(ERR, "Invalid port type\n");
1945 return BNXT_TF_RC_ERROR;
1948 /* Set the action port */
1949 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1950 return ulp_rte_parser_act_port_set(param, ifindex);
1953 /* Function to handle the parsing of RTE Flow action phy_port. */
1955 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1956 struct ulp_rte_parser_params *prm)
1958 const struct rte_flow_action_phy_port *phy_port;
1962 enum bnxt_ulp_direction_type dir;
1964 phy_port = action_item->conf;
1967 "ParseErr: Invalid Argument\n");
1968 return BNXT_TF_RC_PARSE_ERR;
1971 if (phy_port->original) {
1973 "Parse Err:Port Original not supported\n");
1974 return BNXT_TF_RC_PARSE_ERR;
1976 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1977 if (dir != BNXT_ULP_DIR_EGRESS) {
1979 "Parse Err:Phy ports are valid only for egress\n");
1980 return BNXT_TF_RC_PARSE_ERR;
1982 /* Get the physical port details from port db */
1983 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1986 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1991 pid = rte_cpu_to_be_32(pid);
1992 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1993 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1995 /* Update the action port set bit */
1996 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1997 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1998 BNXT_ULP_INTF_TYPE_PHY_PORT);
1999 return BNXT_TF_RC_SUCCESS;
2002 /* Function to handle the parsing of RTE Flow action pop vlan. */
2004 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2005 struct ulp_rte_parser_params *params)
2007 /* Update the act_bitmap with pop */
2008 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2009 return BNXT_TF_RC_SUCCESS;
2012 /* Function to handle the parsing of RTE Flow action push vlan. */
2014 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2015 struct ulp_rte_parser_params *params)
2017 const struct rte_flow_action_of_push_vlan *push_vlan;
2019 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2021 push_vlan = action_item->conf;
2023 ethertype = push_vlan->ethertype;
2024 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2026 "Parse Err: Ethertype not supported\n");
2027 return BNXT_TF_RC_PARSE_ERR;
2029 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2030 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2031 /* Update the hdr_bitmap with push vlan */
2032 ULP_BITMAP_SET(params->act_bitmap.bits,
2033 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2034 return BNXT_TF_RC_SUCCESS;
2036 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2037 return BNXT_TF_RC_ERROR;
2040 /* Function to handle the parsing of RTE Flow action set vlan id. */
2042 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2043 struct ulp_rte_parser_params *params)
2045 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2047 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2049 vlan_vid = action_item->conf;
2050 if (vlan_vid && vlan_vid->vlan_vid) {
2051 vid = vlan_vid->vlan_vid;
2052 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2053 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2054 /* Update the hdr_bitmap with vlan vid */
2055 ULP_BITMAP_SET(params->act_bitmap.bits,
2056 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2057 return BNXT_TF_RC_SUCCESS;
2059 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2060 return BNXT_TF_RC_ERROR;
2063 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2065 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2066 struct ulp_rte_parser_params *params)
2068 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2070 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2072 vlan_pcp = action_item->conf;
2074 pcp = vlan_pcp->vlan_pcp;
2075 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2076 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2077 /* Update the hdr_bitmap with vlan vid */
2078 ULP_BITMAP_SET(params->act_bitmap.bits,
2079 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2080 return BNXT_TF_RC_SUCCESS;
2082 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2083 return BNXT_TF_RC_ERROR;
2086 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2088 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2089 struct ulp_rte_parser_params *params)
2091 const struct rte_flow_action_set_ipv4 *set_ipv4;
2092 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2094 set_ipv4 = action_item->conf;
2096 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2097 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2098 /* Update the hdr_bitmap with set ipv4 src */
2099 ULP_BITMAP_SET(params->act_bitmap.bits,
2100 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2101 return BNXT_TF_RC_SUCCESS;
2103 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2104 return BNXT_TF_RC_ERROR;
2107 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2109 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2110 struct ulp_rte_parser_params *params)
2112 const struct rte_flow_action_set_ipv4 *set_ipv4;
2113 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2115 set_ipv4 = action_item->conf;
2117 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2118 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2119 /* Update the hdr_bitmap with set ipv4 dst */
2120 ULP_BITMAP_SET(params->act_bitmap.bits,
2121 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2122 return BNXT_TF_RC_SUCCESS;
2124 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2125 return BNXT_TF_RC_ERROR;
2128 /* Function to handle the parsing of RTE Flow action set tp src.*/
2130 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2131 struct ulp_rte_parser_params *params)
2133 const struct rte_flow_action_set_tp *set_tp;
2134 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2136 set_tp = action_item->conf;
2138 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2139 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2140 /* Update the hdr_bitmap with set tp src */
2141 ULP_BITMAP_SET(params->act_bitmap.bits,
2142 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2143 return BNXT_TF_RC_SUCCESS;
2146 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2147 return BNXT_TF_RC_ERROR;
2150 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2152 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2153 struct ulp_rte_parser_params *params)
2155 const struct rte_flow_action_set_tp *set_tp;
2156 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2158 set_tp = action_item->conf;
2160 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2161 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2162 /* Update the hdr_bitmap with set tp dst */
2163 ULP_BITMAP_SET(params->act_bitmap.bits,
2164 BNXT_ULP_ACT_BIT_SET_TP_DST);
2165 return BNXT_TF_RC_SUCCESS;
2168 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2169 return BNXT_TF_RC_ERROR;
2172 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2174 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2175 struct ulp_rte_parser_params *params)
2177 /* Update the act_bitmap with dec ttl */
2178 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2179 return BNXT_TF_RC_SUCCESS;
2182 /* Function to handle the parsing of RTE Flow action JUMP */
2184 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2185 struct ulp_rte_parser_params *params)
2187 /* Update the act_bitmap with dec ttl */
2188 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2189 return BNXT_TF_RC_SUCCESS;