1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK 0x700
23 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN 4789
26 /* Utility function to skip the void items. */
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
41 /* Utility function to update the field_bitmap */
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 struct ulp_rte_hdr_field *field;
48 field = ¶ms->hdr_field[idx];
49 if (ulp_bitmap_notzero(field->mask, field->size)) {
50 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 if (!ulp_bitmap_is_ones(field->mask, field->size))
53 ULP_BITMAP_SET(params->fld_bitmap.bits,
54 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
56 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
67 memcpy(field->spec, buffer, field->size);
72 /* Utility function to copy field masks items */
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
79 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81 memcpy(field->mask, buffer, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx);
86 /* Utility function to ignore field masks items */
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 const void *buffer __rte_unused,
91 uint32_t size __rte_unused)
97 * Function to handle the parsing of RTE Flows and placing
98 * the RTE flow items into the ulp structures.
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102 struct ulp_rte_parser_params *params)
104 const struct rte_flow_item *item = pattern;
105 struct bnxt_ulp_rte_hdr_info *hdr_info;
107 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 /* Set the computed flags for no vlan tags before parsing */
110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 /* Parse all the items in the pattern */
114 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115 /* get the header information from the flow_hdr_info table */
116 hdr_info = &ulp_hdr_info[item->type];
117 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 "Truflow parser does not support type %d\n",
121 return BNXT_TF_RC_PARSE_ERR;
122 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123 /* call the registered callback handler */
124 if (hdr_info->proto_hdr_func) {
125 if (hdr_info->proto_hdr_func(item, params) !=
126 BNXT_TF_RC_SUCCESS) {
127 return BNXT_TF_RC_ERROR;
133 /* update the implied SVIF */
134 return ulp_rte_parser_implicit_match_port_process(params);
138 * Function to handle the parsing of RTE Flows and placing
139 * the RTE flow actions into the ulp structures.
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143 struct ulp_rte_parser_params *params)
145 const struct rte_flow_action *action_item = actions;
146 struct bnxt_ulp_rte_act_info *hdr_info;
148 /* Parse all the items in the pattern */
149 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150 /* get the header information from the flow_hdr_info table */
151 hdr_info = &ulp_act_info[action_item->type];
152 if (hdr_info->act_type ==
153 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 "Truflow parser does not support act %u\n",
157 return BNXT_TF_RC_ERROR;
158 } else if (hdr_info->act_type ==
159 BNXT_ULP_ACT_TYPE_SUPPORTED) {
160 /* call the registered callback handler */
161 if (hdr_info->proto_act_func) {
162 if (hdr_info->proto_act_func(action_item,
164 BNXT_TF_RC_SUCCESS) {
165 return BNXT_TF_RC_ERROR;
171 /* update the implied port details */
172 ulp_rte_parser_implicit_act_port_process(params);
173 return BNXT_TF_RC_SUCCESS;
177 * Function to handle the post processing of the computed
178 * fields for the interface.
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
184 uint16_t port_id, parif;
186 enum bnxt_ulp_direction_type dir;
188 /* get the direction details */
189 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 /* read the port id details */
192 port_id = ULP_COMP_FLD_IDX_RD(params,
193 BNXT_ULP_CF_IDX_INCOMING_IF);
194 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
197 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
201 if (dir == BNXT_ULP_DIR_INGRESS) {
203 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
211 /* Get the match port type */
212 mtype = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215 ULP_COMP_FLD_IDX_WR(params,
216 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 /* Set VF func PARIF */
219 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220 BNXT_ULP_VF_FUNC_PARIF,
223 "ParseErr:ifindex is not valid\n");
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
230 /* populate the loopback parif */
231 ULP_COMP_FLD_IDX_WR(params,
232 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
233 BNXT_ULP_SYM_VF_FUNC_PARIF);
236 /* Set DRV func PARIF */
237 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
238 BNXT_ULP_DRV_FUNC_PARIF,
241 "ParseErr:ifindex is not valid\n");
244 ULP_COMP_FLD_IDX_WR(params,
245 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
252 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
254 enum bnxt_ulp_intf_type match_port_type, act_port_type;
255 enum bnxt_ulp_direction_type dir;
256 uint32_t act_port_set;
258 /* Get the computed details */
259 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
260 match_port_type = ULP_COMP_FLD_IDX_RD(params,
261 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
262 act_port_type = ULP_COMP_FLD_IDX_RD(params,
263 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
264 act_port_set = ULP_COMP_FLD_IDX_RD(params,
265 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
267 /* set the flow direction in the proto and action header */
268 if (dir == BNXT_ULP_DIR_EGRESS) {
269 ULP_BITMAP_SET(params->hdr_bitmap.bits,
270 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
271 ULP_BITMAP_SET(params->act_bitmap.bits,
272 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
275 /* calculate the VF to VF flag */
276 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
277 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
280 /* Update the decrement ttl computational fields */
281 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
282 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
284 * Check that vxlan proto is included and vxlan decap
285 * action is not set then decrement tunnel ttl.
286 * Similarly add GRE and NVGRE in future.
288 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
289 BNXT_ULP_HDR_BIT_T_VXLAN) &&
290 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
291 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
292 ULP_COMP_FLD_IDX_WR(params,
293 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
295 ULP_COMP_FLD_IDX_WR(params,
296 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
300 /* Merge the hdr_fp_bit into the proto header bit */
301 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
303 /* Update the computed interface parameters */
304 bnxt_ulp_comp_fld_intf_update(params);
306 /* TBD: Handle the flow rejection scenarios */
311 * Function to handle the post processing of the parsing details
314 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
316 ulp_post_process_normal_flow(params);
317 return ulp_post_process_tun_flow(params);
321 * Function to compute the flow direction based on the match port details
324 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
326 enum bnxt_ulp_intf_type match_port_type;
328 /* Get the match port type */
329 match_port_type = ULP_COMP_FLD_IDX_RD(params,
330 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
332 /* If ingress flow and matchport is vf rep then dir is egress*/
333 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
334 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
335 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
336 BNXT_ULP_DIR_EGRESS);
338 /* Assign the input direction */
339 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
340 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
341 BNXT_ULP_DIR_INGRESS);
343 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
344 BNXT_ULP_DIR_EGRESS);
348 /* Function to handle the parsing of RTE Flow item PF Header. */
350 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
355 enum bnxt_ulp_direction_type dir;
356 struct ulp_rte_hdr_field *hdr_field;
357 enum bnxt_ulp_svif_type svif_type;
358 enum bnxt_ulp_intf_type port_type;
360 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
361 BNXT_ULP_INVALID_SVIF_VAL) {
363 "SVIF already set,multiple source not support'd\n");
364 return BNXT_TF_RC_ERROR;
367 /* Get port type details */
368 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
369 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
370 BNXT_TF_DBG(ERR, "Invalid port type\n");
371 return BNXT_TF_RC_ERROR;
374 /* Update the match port type */
375 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
377 /* compute the direction */
378 bnxt_ulp_rte_parser_direction_compute(params);
380 /* Get the computed direction */
381 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
382 if (dir == BNXT_ULP_DIR_INGRESS) {
383 svif_type = BNXT_ULP_PHY_PORT_SVIF;
385 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
386 svif_type = BNXT_ULP_VF_FUNC_SVIF;
388 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
390 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
392 svif = rte_cpu_to_be_16(svif);
393 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
394 memcpy(hdr_field->spec, &svif, sizeof(svif));
395 memcpy(hdr_field->mask, &mask, sizeof(mask));
396 hdr_field->size = sizeof(svif);
397 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
398 rte_be_to_cpu_16(svif));
399 return BNXT_TF_RC_SUCCESS;
402 /* Function to handle the parsing of the RTE port id */
404 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
406 uint16_t port_id = 0;
407 uint16_t svif_mask = 0xFFFF;
409 int32_t rc = BNXT_TF_RC_ERROR;
411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412 BNXT_ULP_INVALID_SVIF_VAL)
413 return BNXT_TF_RC_SUCCESS;
415 /* SVIF not set. So get the port id */
416 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
418 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
421 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
425 /* Update the SVIF details */
426 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
430 /* Function to handle the implicit action port id */
432 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
434 struct rte_flow_action action_item = {0};
435 struct rte_flow_action_port_id port_id = {0};
437 /* Read the action port set bit */
438 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
439 /* Already set, so just exit */
440 return BNXT_TF_RC_SUCCESS;
442 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443 action_item.conf = &port_id;
445 /* Update the action port based on incoming port */
446 ulp_rte_port_id_act_handler(&action_item, params);
448 /* Reset the action port set bit */
449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
450 return BNXT_TF_RC_SUCCESS;
453 /* Function to handle the parsing of RTE Flow item PF Header. */
455 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
456 struct ulp_rte_parser_params *params)
458 uint16_t port_id = 0;
459 uint16_t svif_mask = 0xFFFF;
462 /* Get the implicit port id */
463 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
465 /* perform the conversion from dpdk port to bnxt ifindex */
466 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
469 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
470 return BNXT_TF_RC_ERROR;
473 /* Update the SVIF details */
474 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
477 /* Function to handle the parsing of RTE Flow item VF Header. */
479 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
480 struct ulp_rte_parser_params *params)
482 const struct rte_flow_item_vf *vf_spec = item->spec;
483 const struct rte_flow_item_vf *vf_mask = item->mask;
486 int32_t rc = BNXT_TF_RC_PARSE_ERR;
488 /* Get VF rte_flow_item for Port details */
490 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
494 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
499 /* perform the conversion from VF Func id to bnxt ifindex */
500 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
503 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
506 /* Update the SVIF details */
507 return ulp_rte_parser_svif_set(params, ifindex, mask);
510 /* Function to handle the parsing of RTE Flow item port id Header. */
512 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
513 struct ulp_rte_parser_params *params)
515 const struct rte_flow_item_port_id *port_spec = item->spec;
516 const struct rte_flow_item_port_id *port_mask = item->mask;
518 int32_t rc = BNXT_TF_RC_PARSE_ERR;
522 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
526 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
529 mask = port_mask->id;
531 /* perform the conversion from dpdk port to bnxt ifindex */
532 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
535 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
538 /* Update the SVIF details */
539 return ulp_rte_parser_svif_set(params, ifindex, mask);
542 /* Function to handle the parsing of RTE Flow item phy port Header. */
544 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
545 struct ulp_rte_parser_params *params)
547 const struct rte_flow_item_phy_port *port_spec = item->spec;
548 const struct rte_flow_item_phy_port *port_mask = item->mask;
550 int32_t rc = BNXT_TF_RC_ERROR;
552 enum bnxt_ulp_direction_type dir;
553 struct ulp_rte_hdr_field *hdr_field;
555 /* Copy the rte_flow_item for phy port into hdr_field */
557 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
561 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
564 mask = port_mask->index;
566 /* Update the match port type */
567 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
568 BNXT_ULP_INTF_TYPE_PHY_PORT);
570 /* Compute the Hw direction */
571 bnxt_ulp_rte_parser_direction_compute(params);
573 /* Direction validation */
574 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
575 if (dir == BNXT_ULP_DIR_EGRESS) {
577 "Parse Err:Phy ports are valid only for ingress\n");
578 return BNXT_TF_RC_PARSE_ERR;
581 /* Get the physical port details from port db */
582 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
585 BNXT_TF_DBG(ERR, "Failed to get port details\n");
586 return BNXT_TF_RC_PARSE_ERR;
589 /* Update the SVIF details */
590 svif = rte_cpu_to_be_16(svif);
591 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
592 memcpy(hdr_field->spec, &svif, sizeof(svif));
593 memcpy(hdr_field->mask, &mask, sizeof(mask));
594 hdr_field->size = sizeof(svif);
595 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
596 rte_be_to_cpu_16(svif));
597 return BNXT_TF_RC_SUCCESS;
600 /* Function to handle the update of proto header based on field values */
602 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
603 uint16_t type, uint32_t in_flag)
605 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
607 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
608 BNXT_ULP_HDR_BIT_I_IPV4);
609 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
611 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612 BNXT_ULP_HDR_BIT_O_IPV4);
613 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
615 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
617 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
618 BNXT_ULP_HDR_BIT_I_IPV6);
619 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
621 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 BNXT_ULP_HDR_BIT_O_IPV6);
623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
628 /* Internal Function to identify broadcast or multicast packets */
630 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
632 if (rte_is_multicast_ether_addr(eth_addr) ||
633 rte_is_broadcast_ether_addr(eth_addr)) {
635 "No support for bcast or mcast addr offload\n");
641 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
643 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
644 struct ulp_rte_parser_params *params)
646 const struct rte_flow_item_eth *eth_spec = item->spec;
647 const struct rte_flow_item_eth *eth_mask = item->mask;
648 struct ulp_rte_hdr_field *field;
649 uint32_t idx = params->field_idx;
651 uint16_t eth_type = 0;
652 uint32_t inner_flag = 0;
655 * Copy the rte_flow_item for eth into hdr_field using ethernet
659 size = sizeof(eth_spec->dst.addr_bytes);
660 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
661 eth_spec->dst.addr_bytes,
663 /* Todo: work around to avoid multicast and broadcast addr */
664 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
665 return BNXT_TF_RC_PARSE_ERR;
667 size = sizeof(eth_spec->src.addr_bytes);
668 field = ulp_rte_parser_fld_copy(field,
669 eth_spec->src.addr_bytes,
671 /* Todo: work around to avoid multicast and broadcast addr */
672 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
673 return BNXT_TF_RC_PARSE_ERR;
675 field = ulp_rte_parser_fld_copy(field,
677 sizeof(eth_spec->type));
678 eth_type = eth_spec->type;
681 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
682 sizeof(eth_mask->dst.addr_bytes));
683 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
684 sizeof(eth_mask->src.addr_bytes));
685 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
686 sizeof(eth_mask->type));
688 /* Add number of vlan header elements */
689 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
690 params->vlan_idx = params->field_idx;
691 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
693 /* Update the protocol hdr bitmap */
694 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695 BNXT_ULP_HDR_BIT_O_ETH) ||
696 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_IPV4) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_IPV6) ||
700 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701 BNXT_ULP_HDR_BIT_O_UDP) ||
702 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703 BNXT_ULP_HDR_BIT_O_TCP)) {
704 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
707 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
709 /* Update the field protocol hdr bitmap */
710 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
712 return BNXT_TF_RC_SUCCESS;
715 /* Function to handle the parsing of RTE Flow item Vlan Header. */
717 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
718 struct ulp_rte_parser_params *params)
720 const struct rte_flow_item_vlan *vlan_spec = item->spec;
721 const struct rte_flow_item_vlan *vlan_mask = item->mask;
722 struct ulp_rte_hdr_field *field;
723 struct ulp_rte_hdr_bitmap *hdr_bit;
724 uint32_t idx = params->vlan_idx;
725 uint16_t vlan_tag, priority;
726 uint32_t outer_vtag_num;
727 uint32_t inner_vtag_num;
728 uint16_t eth_type = 0;
729 uint32_t inner_flag = 0;
732 * Copy the rte_flow_item for vlan into hdr_field using Vlan
736 vlan_tag = ntohs(vlan_spec->tci);
737 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
738 vlan_tag &= ULP_VLAN_TAG_MASK;
739 vlan_tag = htons(vlan_tag);
741 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
744 field = ulp_rte_parser_fld_copy(field,
747 field = ulp_rte_parser_fld_copy(field,
748 &vlan_spec->inner_type,
749 sizeof(vlan_spec->inner_type));
750 eth_type = vlan_spec->inner_type;
754 vlan_tag = ntohs(vlan_mask->tci);
755 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
759 * the storage for priority and vlan tag is 2 bytes
760 * The mask of priority which is 3 bits if it is all 1's
761 * then make the rest bits 13 bits as 1's
762 * so that it is matched as exact match.
764 if (priority == ULP_VLAN_PRIORITY_MASK)
765 priority |= ~ULP_VLAN_PRIORITY_MASK;
766 if (vlan_tag == ULP_VLAN_TAG_MASK)
767 vlan_tag |= ~ULP_VLAN_TAG_MASK;
768 vlan_tag = htons(vlan_tag);
771 * The priority field is ignored since OVS is setting it as
772 * wild card match and it is not supported. This is a work
773 * around and shall be addressed in the future.
775 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
778 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
780 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
781 sizeof(vlan_mask->inner_type));
783 /* Set the vlan index to new incremented value */
784 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
786 /* Get the outer tag and inner tag counts */
787 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
788 BNXT_ULP_CF_IDX_O_VTAG_NUM);
789 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
790 BNXT_ULP_CF_IDX_I_VTAG_NUM);
792 /* Update the hdr_bitmap of the vlans */
793 hdr_bit = ¶ms->hdr_bitmap;
794 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
795 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
797 /* Update the vlan tag num */
799 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
801 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
803 ULP_BITMAP_SET(params->hdr_bitmap.bits,
804 BNXT_ULP_HDR_BIT_OO_VLAN);
805 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
806 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
807 outer_vtag_num == 1) {
808 /* update the vlan tag num */
810 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
812 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
813 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
814 ULP_BITMAP_SET(params->hdr_bitmap.bits,
815 BNXT_ULP_HDR_BIT_OI_VLAN);
816 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
817 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
819 /* update the vlan tag num */
821 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
823 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
824 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
825 ULP_BITMAP_SET(params->hdr_bitmap.bits,
826 BNXT_ULP_HDR_BIT_IO_VLAN);
828 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
829 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
830 inner_vtag_num == 1) {
831 /* update the vlan tag num */
833 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
835 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
836 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
837 ULP_BITMAP_SET(params->hdr_bitmap.bits,
838 BNXT_ULP_HDR_BIT_II_VLAN);
841 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
842 return BNXT_TF_RC_ERROR;
844 /* Update the field protocol hdr bitmap */
845 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
846 return BNXT_TF_RC_SUCCESS;
849 /* Function to handle the update of proto header based on field values */
851 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
852 uint8_t proto, uint32_t in_flag)
854 if (proto == IPPROTO_UDP) {
856 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857 BNXT_ULP_HDR_BIT_I_UDP);
858 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
860 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
861 BNXT_ULP_HDR_BIT_O_UDP);
862 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
864 } else if (proto == IPPROTO_TCP) {
866 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867 BNXT_ULP_HDR_BIT_I_TCP);
868 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
870 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
871 BNXT_ULP_HDR_BIT_O_TCP);
872 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
877 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
879 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
880 struct ulp_rte_parser_params *params)
882 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
883 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
884 struct ulp_rte_hdr_field *field;
885 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
886 uint32_t idx = params->field_idx;
889 uint32_t inner_flag = 0;
892 /* validate there are no 3rd L3 header */
893 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
895 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
896 return BNXT_TF_RC_ERROR;
899 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
900 BNXT_ULP_HDR_BIT_O_ETH) &&
901 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
902 BNXT_ULP_HDR_BIT_I_ETH)) {
903 /* Since F2 flow does not include eth item, when parser detects
904 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
905 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
906 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
907 * This will allow the parser post processor to update the
908 * t_dmac in hdr_field[o_eth.dmac]
910 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
911 BNXT_ULP_PROTO_HDR_VLAN_NUM);
912 params->field_idx = idx;
916 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
920 size = sizeof(ipv4_spec->hdr.version_ihl);
921 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
922 &ipv4_spec->hdr.version_ihl,
924 size = sizeof(ipv4_spec->hdr.type_of_service);
925 field = ulp_rte_parser_fld_copy(field,
926 &ipv4_spec->hdr.type_of_service,
928 size = sizeof(ipv4_spec->hdr.total_length);
929 field = ulp_rte_parser_fld_copy(field,
930 &ipv4_spec->hdr.total_length,
932 size = sizeof(ipv4_spec->hdr.packet_id);
933 field = ulp_rte_parser_fld_copy(field,
934 &ipv4_spec->hdr.packet_id,
936 size = sizeof(ipv4_spec->hdr.fragment_offset);
937 field = ulp_rte_parser_fld_copy(field,
938 &ipv4_spec->hdr.fragment_offset,
940 size = sizeof(ipv4_spec->hdr.time_to_live);
941 field = ulp_rte_parser_fld_copy(field,
942 &ipv4_spec->hdr.time_to_live,
944 size = sizeof(ipv4_spec->hdr.next_proto_id);
945 field = ulp_rte_parser_fld_copy(field,
946 &ipv4_spec->hdr.next_proto_id,
948 proto = ipv4_spec->hdr.next_proto_id;
949 size = sizeof(ipv4_spec->hdr.hdr_checksum);
950 field = ulp_rte_parser_fld_copy(field,
951 &ipv4_spec->hdr.hdr_checksum,
953 size = sizeof(ipv4_spec->hdr.src_addr);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.src_addr,
957 size = sizeof(ipv4_spec->hdr.dst_addr);
958 field = ulp_rte_parser_fld_copy(field,
959 &ipv4_spec->hdr.dst_addr,
963 ulp_rte_prsr_mask_copy(params, &idx,
964 &ipv4_mask->hdr.version_ihl,
965 sizeof(ipv4_mask->hdr.version_ihl));
967 * The tos field is ignored since OVS is setting it as wild card
968 * match and it is not supported. This is a work around and
969 * shall be addressed in the future.
971 ulp_rte_prsr_mask_ignore(params, &idx,
972 &ipv4_mask->hdr.type_of_service,
973 sizeof(ipv4_mask->hdr.type_of_service)
976 ulp_rte_prsr_mask_copy(params, &idx,
977 &ipv4_mask->hdr.total_length,
978 sizeof(ipv4_mask->hdr.total_length));
979 ulp_rte_prsr_mask_copy(params, &idx,
980 &ipv4_mask->hdr.packet_id,
981 sizeof(ipv4_mask->hdr.packet_id));
982 ulp_rte_prsr_mask_copy(params, &idx,
983 &ipv4_mask->hdr.fragment_offset,
984 sizeof(ipv4_mask->hdr.fragment_offset));
985 ulp_rte_prsr_mask_copy(params, &idx,
986 &ipv4_mask->hdr.time_to_live,
987 sizeof(ipv4_mask->hdr.time_to_live));
988 ulp_rte_prsr_mask_copy(params, &idx,
989 &ipv4_mask->hdr.next_proto_id,
990 sizeof(ipv4_mask->hdr.next_proto_id));
991 ulp_rte_prsr_mask_copy(params, &idx,
992 &ipv4_mask->hdr.hdr_checksum,
993 sizeof(ipv4_mask->hdr.hdr_checksum));
994 ulp_rte_prsr_mask_copy(params, &idx,
995 &ipv4_mask->hdr.src_addr,
996 sizeof(ipv4_mask->hdr.src_addr));
997 ulp_rte_prsr_mask_copy(params, &idx,
998 &ipv4_mask->hdr.dst_addr,
999 sizeof(ipv4_mask->hdr.dst_addr));
1001 /* Add the number of ipv4 header elements */
1002 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1004 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1005 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1006 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1007 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1008 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1011 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1012 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1015 /* Some of the PMD applications may set the protocol field
1016 * in the IPv4 spec but don't set the mask. So, consider
1017 * the mask in the proto value calculation.
1020 proto &= ipv4_mask->hdr.next_proto_id;
1022 /* Update the field protocol hdr bitmap */
1023 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1024 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1025 return BNXT_TF_RC_SUCCESS;
1028 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1030 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1031 struct ulp_rte_parser_params *params)
1033 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1034 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1035 struct ulp_rte_hdr_field *field;
1036 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1037 uint32_t idx = params->field_idx;
1039 uint32_t vtcf, vtcf_mask;
1041 uint32_t inner_flag = 0;
1044 /* validate there are no 3rd L3 header */
1045 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1047 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1048 return BNXT_TF_RC_ERROR;
1051 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1052 BNXT_ULP_HDR_BIT_O_ETH) &&
1053 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1054 BNXT_ULP_HDR_BIT_I_ETH)) {
1055 /* Since F2 flow does not include eth item, when parser detects
1056 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1057 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1058 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1059 * This will allow the parser post processor to update the
1060 * t_dmac in hdr_field[o_eth.dmac]
1062 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1063 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1064 params->field_idx = idx;
1068 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1072 size = sizeof(ipv6_spec->hdr.vtc_flow);
1074 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1075 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1079 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1080 field = ulp_rte_parser_fld_copy(field,
1084 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1085 field = ulp_rte_parser_fld_copy(field,
1089 size = sizeof(ipv6_spec->hdr.payload_len);
1090 field = ulp_rte_parser_fld_copy(field,
1091 &ipv6_spec->hdr.payload_len,
1093 size = sizeof(ipv6_spec->hdr.proto);
1094 field = ulp_rte_parser_fld_copy(field,
1095 &ipv6_spec->hdr.proto,
1097 proto = ipv6_spec->hdr.proto;
1098 size = sizeof(ipv6_spec->hdr.hop_limits);
1099 field = ulp_rte_parser_fld_copy(field,
1100 &ipv6_spec->hdr.hop_limits,
1102 size = sizeof(ipv6_spec->hdr.src_addr);
1103 field = ulp_rte_parser_fld_copy(field,
1104 &ipv6_spec->hdr.src_addr,
1106 size = sizeof(ipv6_spec->hdr.dst_addr);
1107 field = ulp_rte_parser_fld_copy(field,
1108 &ipv6_spec->hdr.dst_addr,
1112 size = sizeof(ipv6_mask->hdr.vtc_flow);
1114 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1115 ulp_rte_prsr_mask_copy(params, &idx,
1119 * The TC and flow label field are ignored since OVS is
1120 * setting it for match and it is not supported.
1121 * This is a work around and
1122 * shall be addressed in the future.
1124 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1125 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1127 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1128 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1130 ulp_rte_prsr_mask_copy(params, &idx,
1131 &ipv6_mask->hdr.payload_len,
1132 sizeof(ipv6_mask->hdr.payload_len));
1133 ulp_rte_prsr_mask_copy(params, &idx,
1134 &ipv6_mask->hdr.proto,
1135 sizeof(ipv6_mask->hdr.proto));
1136 ulp_rte_prsr_mask_copy(params, &idx,
1137 &ipv6_mask->hdr.hop_limits,
1138 sizeof(ipv6_mask->hdr.hop_limits));
1139 ulp_rte_prsr_mask_copy(params, &idx,
1140 &ipv6_mask->hdr.src_addr,
1141 sizeof(ipv6_mask->hdr.src_addr));
1142 ulp_rte_prsr_mask_copy(params, &idx,
1143 &ipv6_mask->hdr.dst_addr,
1144 sizeof(ipv6_mask->hdr.dst_addr));
1146 /* add number of ipv6 header elements */
1147 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1149 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1150 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1151 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1152 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1153 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1156 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1157 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1160 /* Some of the PMD applications may set the protocol field
1161 * in the IPv6 spec but don't set the mask. So, consider
1162 * the mask in proto value calculation.
1165 proto &= ipv6_mask->hdr.proto;
1167 /* Update the field protocol hdr bitmap */
1168 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1169 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1171 return BNXT_TF_RC_SUCCESS;
1174 /* Function to handle the update of proto header based on field values */
1176 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1179 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1180 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1181 BNXT_ULP_HDR_BIT_T_VXLAN);
1182 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1186 /* Function to handle the parsing of RTE Flow item UDP Header. */
1188 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1189 struct ulp_rte_parser_params *params)
1191 const struct rte_flow_item_udp *udp_spec = item->spec;
1192 const struct rte_flow_item_udp *udp_mask = item->mask;
1193 struct ulp_rte_hdr_field *field;
1194 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1195 uint32_t idx = params->field_idx;
1197 uint16_t dst_port = 0;
1200 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1202 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1203 return BNXT_TF_RC_ERROR;
1207 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1211 size = sizeof(udp_spec->hdr.src_port);
1212 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1213 &udp_spec->hdr.src_port,
1216 size = sizeof(udp_spec->hdr.dst_port);
1217 field = ulp_rte_parser_fld_copy(field,
1218 &udp_spec->hdr.dst_port,
1220 dst_port = udp_spec->hdr.dst_port;
1221 size = sizeof(udp_spec->hdr.dgram_len);
1222 field = ulp_rte_parser_fld_copy(field,
1223 &udp_spec->hdr.dgram_len,
1225 size = sizeof(udp_spec->hdr.dgram_cksum);
1226 field = ulp_rte_parser_fld_copy(field,
1227 &udp_spec->hdr.dgram_cksum,
1231 ulp_rte_prsr_mask_copy(params, &idx,
1232 &udp_mask->hdr.src_port,
1233 sizeof(udp_mask->hdr.src_port));
1234 ulp_rte_prsr_mask_copy(params, &idx,
1235 &udp_mask->hdr.dst_port,
1236 sizeof(udp_mask->hdr.dst_port));
1237 ulp_rte_prsr_mask_copy(params, &idx,
1238 &udp_mask->hdr.dgram_len,
1239 sizeof(udp_mask->hdr.dgram_len));
1240 ulp_rte_prsr_mask_copy(params, &idx,
1241 &udp_mask->hdr.dgram_cksum,
1242 sizeof(udp_mask->hdr.dgram_cksum));
1245 /* Add number of UDP header elements */
1246 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1248 /* Set the udp header bitmap and computed l4 header bitmaps */
1249 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1250 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1251 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1254 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1256 /* Update the field protocol hdr bitmap */
1257 ulp_rte_l4_proto_type_update(params, dst_port);
1259 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1260 return BNXT_TF_RC_SUCCESS;
1263 /* Function to handle the parsing of RTE Flow item TCP Header. */
1265 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1266 struct ulp_rte_parser_params *params)
1268 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1269 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1270 struct ulp_rte_hdr_field *field;
1271 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1272 uint32_t idx = params->field_idx;
1276 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1278 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1279 return BNXT_TF_RC_ERROR;
1283 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1287 size = sizeof(tcp_spec->hdr.src_port);
1288 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1289 &tcp_spec->hdr.src_port,
1291 size = sizeof(tcp_spec->hdr.dst_port);
1292 field = ulp_rte_parser_fld_copy(field,
1293 &tcp_spec->hdr.dst_port,
1295 size = sizeof(tcp_spec->hdr.sent_seq);
1296 field = ulp_rte_parser_fld_copy(field,
1297 &tcp_spec->hdr.sent_seq,
1299 size = sizeof(tcp_spec->hdr.recv_ack);
1300 field = ulp_rte_parser_fld_copy(field,
1301 &tcp_spec->hdr.recv_ack,
1303 size = sizeof(tcp_spec->hdr.data_off);
1304 field = ulp_rte_parser_fld_copy(field,
1305 &tcp_spec->hdr.data_off,
1307 size = sizeof(tcp_spec->hdr.tcp_flags);
1308 field = ulp_rte_parser_fld_copy(field,
1309 &tcp_spec->hdr.tcp_flags,
1311 size = sizeof(tcp_spec->hdr.rx_win);
1312 field = ulp_rte_parser_fld_copy(field,
1313 &tcp_spec->hdr.rx_win,
1315 size = sizeof(tcp_spec->hdr.cksum);
1316 field = ulp_rte_parser_fld_copy(field,
1317 &tcp_spec->hdr.cksum,
1319 size = sizeof(tcp_spec->hdr.tcp_urp);
1320 field = ulp_rte_parser_fld_copy(field,
1321 &tcp_spec->hdr.tcp_urp,
1324 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1328 ulp_rte_prsr_mask_copy(params, &idx,
1329 &tcp_mask->hdr.src_port,
1330 sizeof(tcp_mask->hdr.src_port));
1331 ulp_rte_prsr_mask_copy(params, &idx,
1332 &tcp_mask->hdr.dst_port,
1333 sizeof(tcp_mask->hdr.dst_port));
1334 ulp_rte_prsr_mask_copy(params, &idx,
1335 &tcp_mask->hdr.sent_seq,
1336 sizeof(tcp_mask->hdr.sent_seq));
1337 ulp_rte_prsr_mask_copy(params, &idx,
1338 &tcp_mask->hdr.recv_ack,
1339 sizeof(tcp_mask->hdr.recv_ack));
1340 ulp_rte_prsr_mask_copy(params, &idx,
1341 &tcp_mask->hdr.data_off,
1342 sizeof(tcp_mask->hdr.data_off));
1343 ulp_rte_prsr_mask_copy(params, &idx,
1344 &tcp_mask->hdr.tcp_flags,
1345 sizeof(tcp_mask->hdr.tcp_flags));
1346 ulp_rte_prsr_mask_copy(params, &idx,
1347 &tcp_mask->hdr.rx_win,
1348 sizeof(tcp_mask->hdr.rx_win));
1349 ulp_rte_prsr_mask_copy(params, &idx,
1350 &tcp_mask->hdr.cksum,
1351 sizeof(tcp_mask->hdr.cksum));
1352 ulp_rte_prsr_mask_copy(params, &idx,
1353 &tcp_mask->hdr.tcp_urp,
1354 sizeof(tcp_mask->hdr.tcp_urp));
1356 /* add number of TCP header elements */
1357 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1359 /* Set the udp header bitmap and computed l4 header bitmaps */
1360 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1361 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1362 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1363 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1365 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1366 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1368 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1369 return BNXT_TF_RC_SUCCESS;
1372 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1374 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1375 struct ulp_rte_parser_params *params)
1377 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1378 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1379 struct ulp_rte_hdr_field *field;
1380 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1381 uint32_t idx = params->field_idx;
1385 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1389 size = sizeof(vxlan_spec->flags);
1390 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1393 size = sizeof(vxlan_spec->rsvd0);
1394 field = ulp_rte_parser_fld_copy(field,
1397 size = sizeof(vxlan_spec->vni);
1398 field = ulp_rte_parser_fld_copy(field,
1401 size = sizeof(vxlan_spec->rsvd1);
1402 field = ulp_rte_parser_fld_copy(field,
1407 ulp_rte_prsr_mask_copy(params, &idx,
1409 sizeof(vxlan_mask->flags));
1410 ulp_rte_prsr_mask_copy(params, &idx,
1412 sizeof(vxlan_mask->rsvd0));
1413 ulp_rte_prsr_mask_copy(params, &idx,
1415 sizeof(vxlan_mask->vni));
1416 ulp_rte_prsr_mask_copy(params, &idx,
1418 sizeof(vxlan_mask->rsvd1));
1420 /* Add number of vxlan header elements */
1421 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1423 /* Update the hdr_bitmap with vxlan */
1424 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1425 return BNXT_TF_RC_SUCCESS;
1428 /* Function to handle the parsing of RTE Flow item void Header */
1430 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1431 struct ulp_rte_parser_params *params __rte_unused)
1433 return BNXT_TF_RC_SUCCESS;
1436 /* Function to handle the parsing of RTE Flow action void Header. */
1438 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1439 struct ulp_rte_parser_params *params __rte_unused)
1441 return BNXT_TF_RC_SUCCESS;
1444 /* Function to handle the parsing of RTE Flow action Mark Header. */
1446 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1447 struct ulp_rte_parser_params *param)
1449 const struct rte_flow_action_mark *mark;
1450 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1453 mark = action_item->conf;
1455 mark_id = tfp_cpu_to_be_32(mark->id);
1456 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1457 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1459 /* Update the hdr_bitmap with vxlan */
1460 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1461 return BNXT_TF_RC_SUCCESS;
1463 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1464 return BNXT_TF_RC_ERROR;
1467 /* Function to handle the parsing of RTE Flow action RSS Header. */
1469 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1470 struct ulp_rte_parser_params *param)
1472 const struct rte_flow_action_rss *rss = action_item->conf;
1475 /* Update the hdr_bitmap with vxlan */
1476 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1477 return BNXT_TF_RC_SUCCESS;
1479 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1480 return BNXT_TF_RC_ERROR;
1483 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1485 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1486 struct ulp_rte_parser_params *params)
1488 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1489 const struct rte_flow_item *item;
1490 const struct rte_flow_item_eth *eth_spec;
1491 const struct rte_flow_item_ipv4 *ipv4_spec;
1492 const struct rte_flow_item_ipv6 *ipv6_spec;
1493 struct rte_flow_item_vxlan vxlan_spec;
1494 uint32_t vlan_num = 0, vlan_size = 0;
1495 uint32_t ip_size = 0, ip_type = 0;
1496 uint32_t vxlan_size = 0;
1498 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1499 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1501 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1502 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1504 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1505 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1506 const uint8_t *tmp_buff;
1508 vxlan_encap = action_item->conf;
1510 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1511 return BNXT_TF_RC_ERROR;
1514 item = vxlan_encap->definition;
1516 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1517 return BNXT_TF_RC_ERROR;
1520 if (!ulp_rte_item_skip_void(&item, 0))
1521 return BNXT_TF_RC_ERROR;
1523 /* must have ethernet header */
1524 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1525 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1526 return BNXT_TF_RC_ERROR;
1528 eth_spec = item->spec;
1529 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1530 ulp_encap_buffer_copy(buff,
1531 eth_spec->dst.addr_bytes,
1532 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1533 ULP_BUFFER_ALIGN_8_BYTE);
1535 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1536 ulp_encap_buffer_copy(buff,
1537 eth_spec->src.addr_bytes,
1538 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1539 ULP_BUFFER_ALIGN_8_BYTE);
1541 /* Goto the next item */
1542 if (!ulp_rte_item_skip_void(&item, 1))
1543 return BNXT_TF_RC_ERROR;
1545 /* May have vlan header */
1546 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1548 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1549 ulp_encap_buffer_copy(buff,
1551 sizeof(struct rte_flow_item_vlan),
1552 ULP_BUFFER_ALIGN_8_BYTE);
1554 if (!ulp_rte_item_skip_void(&item, 1))
1555 return BNXT_TF_RC_ERROR;
1558 /* may have two vlan headers */
1559 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1561 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1562 sizeof(struct rte_flow_item_vlan)],
1564 sizeof(struct rte_flow_item_vlan));
1565 if (!ulp_rte_item_skip_void(&item, 1))
1566 return BNXT_TF_RC_ERROR;
1568 /* Update the vlan count and size of more than one */
1570 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1571 vlan_num = tfp_cpu_to_be_32(vlan_num);
1572 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1575 vlan_size = tfp_cpu_to_be_32(vlan_size);
1576 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1581 /* L3 must be IPv4, IPv6 */
1582 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1583 ipv4_spec = item->spec;
1584 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1586 /* copy the ipv4 details */
1587 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1588 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1589 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1590 ulp_encap_buffer_copy(buff,
1592 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1593 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1594 ULP_BUFFER_ALIGN_8_BYTE);
1596 /* Total length being ignored in the ip hdr. */
1597 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1598 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1599 ulp_encap_buffer_copy(buff,
1601 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1602 ULP_BUFFER_ALIGN_8_BYTE);
1603 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1604 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1605 ulp_encap_buffer_copy(buff,
1606 &ipv4_spec->hdr.version_ihl,
1607 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1608 ULP_BUFFER_ALIGN_8_BYTE);
1611 /* Update the dst ip address in ip encap buffer */
1612 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1613 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1614 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1615 ulp_encap_buffer_copy(buff,
1616 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1617 sizeof(ipv4_spec->hdr.dst_addr),
1618 ULP_BUFFER_ALIGN_8_BYTE);
1620 /* Update the src ip address */
1621 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1622 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1623 sizeof(ipv4_spec->hdr.src_addr)];
1624 ulp_encap_buffer_copy(buff,
1625 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1626 sizeof(ipv4_spec->hdr.src_addr),
1627 ULP_BUFFER_ALIGN_8_BYTE);
1629 /* Update the ip size details */
1630 ip_size = tfp_cpu_to_be_32(ip_size);
1631 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1632 &ip_size, sizeof(uint32_t));
1634 /* update the ip type */
1635 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1636 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1637 &ip_type, sizeof(uint32_t));
1639 /* update the computed field to notify it is ipv4 header */
1640 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1643 if (!ulp_rte_item_skip_void(&item, 1))
1644 return BNXT_TF_RC_ERROR;
1645 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1646 ipv6_spec = item->spec;
1647 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1649 /* copy the ipv6 details */
1650 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1651 if (ulp_buffer_is_empty(tmp_buff,
1652 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1653 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1654 ulp_encap_buffer_copy(buff,
1656 sizeof(def_ipv6_hdr),
1657 ULP_BUFFER_ALIGN_8_BYTE);
1659 /* The payload length being ignored in the ip hdr. */
1660 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1661 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1662 ulp_encap_buffer_copy(buff,
1664 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1665 ULP_BUFFER_ALIGN_8_BYTE);
1666 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1667 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1668 BNXT_ULP_ENCAP_IPV6_DO];
1669 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1670 ulp_encap_buffer_copy(buff,
1672 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1673 ULP_BUFFER_ALIGN_8_BYTE);
1675 /* Update the dst ip address in ip encap buffer */
1676 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1677 sizeof(def_ipv6_hdr)];
1678 ulp_encap_buffer_copy(buff,
1679 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1680 sizeof(ipv6_spec->hdr.dst_addr),
1681 ULP_BUFFER_ALIGN_8_BYTE);
1683 /* Update the src ip address */
1684 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1685 ulp_encap_buffer_copy(buff,
1686 (const uint8_t *)ipv6_spec->hdr.src_addr,
1687 sizeof(ipv6_spec->hdr.src_addr),
1688 ULP_BUFFER_ALIGN_16_BYTE);
1690 /* Update the ip size details */
1691 ip_size = tfp_cpu_to_be_32(ip_size);
1692 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1693 &ip_size, sizeof(uint32_t));
1695 /* update the ip type */
1696 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1697 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1698 &ip_type, sizeof(uint32_t));
1700 /* update the computed field to notify it is ipv6 header */
1701 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1704 if (!ulp_rte_item_skip_void(&item, 1))
1705 return BNXT_TF_RC_ERROR;
1707 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1708 return BNXT_TF_RC_ERROR;
1712 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1713 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1714 return BNXT_TF_RC_ERROR;
1716 /* copy the udp details */
1717 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1718 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1719 ULP_BUFFER_ALIGN_8_BYTE);
1721 if (!ulp_rte_item_skip_void(&item, 1))
1722 return BNXT_TF_RC_ERROR;
1725 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1726 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1727 return BNXT_TF_RC_ERROR;
1729 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1730 /* copy the vxlan details */
1731 memcpy(&vxlan_spec, item->spec, vxlan_size);
1732 vxlan_spec.flags = 0x08;
1733 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1734 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1735 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1736 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1738 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1739 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1740 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1741 (const uint8_t *)&vxlan_spec.vni,
1742 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1744 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1745 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1746 &vxlan_size, sizeof(uint32_t));
1748 /* update the hdr_bitmap with vxlan */
1749 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1750 return BNXT_TF_RC_SUCCESS;
1753 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1755 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1757 struct ulp_rte_parser_params *params)
1759 /* update the hdr_bitmap with vxlan */
1760 ULP_BITMAP_SET(params->act_bitmap.bits,
1761 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1762 /* Update computational field with tunnel decap info */
1763 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1764 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1765 return BNXT_TF_RC_SUCCESS;
1768 /* Function to handle the parsing of RTE Flow action drop Header. */
1770 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1771 struct ulp_rte_parser_params *params)
1773 /* Update the hdr_bitmap with drop */
1774 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1775 return BNXT_TF_RC_SUCCESS;
1778 /* Function to handle the parsing of RTE Flow action count. */
1780 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1781 struct ulp_rte_parser_params *params)
1784 const struct rte_flow_action_count *act_count;
1785 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1787 act_count = action_item->conf;
1789 if (act_count->shared) {
1791 "Parse Error:Shared count not supported\n");
1792 return BNXT_TF_RC_PARSE_ERR;
1794 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1796 BNXT_ULP_ACT_PROP_SZ_COUNT);
1799 /* Update the hdr_bitmap with count */
1800 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1801 return BNXT_TF_RC_SUCCESS;
1804 /* Function to handle the parsing of action ports. */
1806 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1809 enum bnxt_ulp_direction_type dir;
1812 struct ulp_rte_act_prop *act = ¶m->act_prop;
1813 enum bnxt_ulp_intf_type port_type;
1816 /* Get the direction */
1817 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1818 if (dir == BNXT_ULP_DIR_EGRESS) {
1819 /* For egress direction, fill vport */
1820 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1821 return BNXT_TF_RC_ERROR;
1824 pid = rte_cpu_to_be_32(pid);
1825 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1826 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1828 /* For ingress direction, fill vnic */
1829 port_type = ULP_COMP_FLD_IDX_RD(param,
1830 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1831 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1832 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1834 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1836 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1838 return BNXT_TF_RC_ERROR;
1841 pid = rte_cpu_to_be_32(pid);
1842 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1843 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1846 /* Update the action port set bit */
1847 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1848 return BNXT_TF_RC_SUCCESS;
1851 /* Function to handle the parsing of RTE Flow action PF. */
1853 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1854 struct ulp_rte_parser_params *params)
1858 enum bnxt_ulp_intf_type intf_type;
1860 /* Get the port id of the current device */
1861 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1863 /* Get the port db ifindex */
1864 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1866 BNXT_TF_DBG(ERR, "Invalid port id\n");
1867 return BNXT_TF_RC_ERROR;
1870 /* Check the port is PF port */
1871 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1872 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1873 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1874 return BNXT_TF_RC_ERROR;
1876 /* Update the action properties */
1877 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1878 return ulp_rte_parser_act_port_set(params, ifindex);
1881 /* Function to handle the parsing of RTE Flow action VF. */
1883 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1884 struct ulp_rte_parser_params *params)
1886 const struct rte_flow_action_vf *vf_action;
1888 enum bnxt_ulp_intf_type intf_type;
1890 vf_action = action_item->conf;
1892 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1893 return BNXT_TF_RC_PARSE_ERR;
1896 if (vf_action->original) {
1897 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1898 return BNXT_TF_RC_PARSE_ERR;
1901 /* Check the port is VF port */
1902 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1904 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1905 return BNXT_TF_RC_ERROR;
1907 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1908 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1909 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1910 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1911 return BNXT_TF_RC_ERROR;
1914 /* Update the action properties */
1915 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1916 return ulp_rte_parser_act_port_set(params, ifindex);
1919 /* Function to handle the parsing of RTE Flow action port_id. */
1921 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1922 struct ulp_rte_parser_params *param)
1924 const struct rte_flow_action_port_id *port_id = act_item->conf;
1926 enum bnxt_ulp_intf_type intf_type;
1930 "ParseErr: Invalid Argument\n");
1931 return BNXT_TF_RC_PARSE_ERR;
1933 if (port_id->original) {
1935 "ParseErr:Portid Original not supported\n");
1936 return BNXT_TF_RC_PARSE_ERR;
1939 /* Get the port db ifindex */
1940 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1942 BNXT_TF_DBG(ERR, "Invalid port id\n");
1943 return BNXT_TF_RC_ERROR;
1946 /* Get the intf type */
1947 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1949 BNXT_TF_DBG(ERR, "Invalid port type\n");
1950 return BNXT_TF_RC_ERROR;
1953 /* Set the action port */
1954 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1955 return ulp_rte_parser_act_port_set(param, ifindex);
1958 /* Function to handle the parsing of RTE Flow action phy_port. */
1960 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1961 struct ulp_rte_parser_params *prm)
1963 const struct rte_flow_action_phy_port *phy_port;
1967 enum bnxt_ulp_direction_type dir;
1969 phy_port = action_item->conf;
1972 "ParseErr: Invalid Argument\n");
1973 return BNXT_TF_RC_PARSE_ERR;
1976 if (phy_port->original) {
1978 "Parse Err:Port Original not supported\n");
1979 return BNXT_TF_RC_PARSE_ERR;
1981 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1982 if (dir != BNXT_ULP_DIR_EGRESS) {
1984 "Parse Err:Phy ports are valid only for egress\n");
1985 return BNXT_TF_RC_PARSE_ERR;
1987 /* Get the physical port details from port db */
1988 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1991 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1996 pid = rte_cpu_to_be_32(pid);
1997 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1998 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2000 /* Update the action port set bit */
2001 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2002 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2003 BNXT_ULP_INTF_TYPE_PHY_PORT);
2004 return BNXT_TF_RC_SUCCESS;
2007 /* Function to handle the parsing of RTE Flow action pop vlan. */
2009 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2010 struct ulp_rte_parser_params *params)
2012 /* Update the act_bitmap with pop */
2013 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2014 return BNXT_TF_RC_SUCCESS;
2017 /* Function to handle the parsing of RTE Flow action push vlan. */
2019 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2020 struct ulp_rte_parser_params *params)
2022 const struct rte_flow_action_of_push_vlan *push_vlan;
2024 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2026 push_vlan = action_item->conf;
2028 ethertype = push_vlan->ethertype;
2029 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2031 "Parse Err: Ethertype not supported\n");
2032 return BNXT_TF_RC_PARSE_ERR;
2034 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2035 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2036 /* Update the hdr_bitmap with push vlan */
2037 ULP_BITMAP_SET(params->act_bitmap.bits,
2038 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2039 return BNXT_TF_RC_SUCCESS;
2041 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2042 return BNXT_TF_RC_ERROR;
2045 /* Function to handle the parsing of RTE Flow action set vlan id. */
2047 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2048 struct ulp_rte_parser_params *params)
2050 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2052 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2054 vlan_vid = action_item->conf;
2055 if (vlan_vid && vlan_vid->vlan_vid) {
2056 vid = vlan_vid->vlan_vid;
2057 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2058 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2059 /* Update the hdr_bitmap with vlan vid */
2060 ULP_BITMAP_SET(params->act_bitmap.bits,
2061 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2062 return BNXT_TF_RC_SUCCESS;
2064 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2065 return BNXT_TF_RC_ERROR;
2068 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2070 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2071 struct ulp_rte_parser_params *params)
2073 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2075 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2077 vlan_pcp = action_item->conf;
2079 pcp = vlan_pcp->vlan_pcp;
2080 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2081 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2082 /* Update the hdr_bitmap with vlan vid */
2083 ULP_BITMAP_SET(params->act_bitmap.bits,
2084 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2085 return BNXT_TF_RC_SUCCESS;
2087 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2088 return BNXT_TF_RC_ERROR;
2091 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2093 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2094 struct ulp_rte_parser_params *params)
2096 const struct rte_flow_action_set_ipv4 *set_ipv4;
2097 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2099 set_ipv4 = action_item->conf;
2101 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2102 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2103 /* Update the hdr_bitmap with set ipv4 src */
2104 ULP_BITMAP_SET(params->act_bitmap.bits,
2105 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2106 return BNXT_TF_RC_SUCCESS;
2108 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2109 return BNXT_TF_RC_ERROR;
2112 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2114 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2115 struct ulp_rte_parser_params *params)
2117 const struct rte_flow_action_set_ipv4 *set_ipv4;
2118 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2120 set_ipv4 = action_item->conf;
2122 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2123 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2124 /* Update the hdr_bitmap with set ipv4 dst */
2125 ULP_BITMAP_SET(params->act_bitmap.bits,
2126 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2127 return BNXT_TF_RC_SUCCESS;
2129 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2130 return BNXT_TF_RC_ERROR;
2133 /* Function to handle the parsing of RTE Flow action set tp src.*/
2135 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2136 struct ulp_rte_parser_params *params)
2138 const struct rte_flow_action_set_tp *set_tp;
2139 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2141 set_tp = action_item->conf;
2143 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2144 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2145 /* Update the hdr_bitmap with set tp src */
2146 ULP_BITMAP_SET(params->act_bitmap.bits,
2147 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2148 return BNXT_TF_RC_SUCCESS;
2151 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2152 return BNXT_TF_RC_ERROR;
2155 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2157 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2158 struct ulp_rte_parser_params *params)
2160 const struct rte_flow_action_set_tp *set_tp;
2161 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2163 set_tp = action_item->conf;
2165 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2166 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2167 /* Update the hdr_bitmap with set tp dst */
2168 ULP_BITMAP_SET(params->act_bitmap.bits,
2169 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2170 return BNXT_TF_RC_SUCCESS;
2173 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2174 return BNXT_TF_RC_ERROR;
2177 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2179 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2180 struct ulp_rte_parser_params *params)
2182 /* Update the act_bitmap with dec ttl */
2183 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2184 return BNXT_TF_RC_SUCCESS;
2187 /* Function to handle the parsing of RTE Flow action JUMP */
2189 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2190 struct ulp_rte_parser_params *params)
2192 /* Update the act_bitmap with dec ttl */
2193 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2194 return BNXT_TF_RC_SUCCESS;