1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK 0x700
23 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN 4789
26 /* Utility function to skip the void items. */
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
41 /* Utility function to update the field_bitmap */
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 struct ulp_rte_hdr_field *field;
48 field = ¶ms->hdr_field[idx];
49 if (ulp_bitmap_notzero(field->mask, field->size)) {
50 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 if (!ulp_bitmap_is_ones(field->mask, field->size))
53 ULP_BITMAP_SET(params->fld_bitmap.bits,
54 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
56 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
67 memcpy(field->spec, buffer, field->size);
72 /* Utility function to copy field masks items */
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
79 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81 memcpy(field->mask, buffer, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx);
86 /* Utility function to ignore field masks items */
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 const void *buffer __rte_unused,
91 uint32_t size __rte_unused)
97 * Function to handle the parsing of RTE Flows and placing
98 * the RTE flow items into the ulp structures.
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102 struct ulp_rte_parser_params *params)
104 const struct rte_flow_item *item = pattern;
105 struct bnxt_ulp_rte_hdr_info *hdr_info;
107 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 /* Set the computed flags for no vlan tags before parsing */
110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 /* Parse all the items in the pattern */
114 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115 /* get the header information from the flow_hdr_info table */
116 hdr_info = &ulp_hdr_info[item->type];
117 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 "Truflow parser does not support type %d\n",
121 return BNXT_TF_RC_PARSE_ERR;
122 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123 /* call the registered callback handler */
124 if (hdr_info->proto_hdr_func) {
125 if (hdr_info->proto_hdr_func(item, params) !=
126 BNXT_TF_RC_SUCCESS) {
127 return BNXT_TF_RC_ERROR;
133 /* update the implied SVIF */
134 return ulp_rte_parser_implicit_match_port_process(params);
138 * Function to handle the parsing of RTE Flows and placing
139 * the RTE flow actions into the ulp structures.
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143 struct ulp_rte_parser_params *params)
145 const struct rte_flow_action *action_item = actions;
146 struct bnxt_ulp_rte_act_info *hdr_info;
148 /* Parse all the items in the pattern */
149 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150 /* get the header information from the flow_hdr_info table */
151 hdr_info = &ulp_act_info[action_item->type];
152 if (hdr_info->act_type ==
153 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 "Truflow parser does not support act %u\n",
157 return BNXT_TF_RC_ERROR;
158 } else if (hdr_info->act_type ==
159 BNXT_ULP_ACT_TYPE_SUPPORTED) {
160 /* call the registered callback handler */
161 if (hdr_info->proto_act_func) {
162 if (hdr_info->proto_act_func(action_item,
164 BNXT_TF_RC_SUCCESS) {
165 return BNXT_TF_RC_ERROR;
171 /* update the implied port details */
172 ulp_rte_parser_implicit_act_port_process(params);
173 return BNXT_TF_RC_SUCCESS;
177 * Function to handle the post processing of the computed
178 * fields for the interface.
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
184 uint16_t port_id, parif;
186 enum bnxt_ulp_direction_type dir;
188 /* get the direction details */
189 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 /* read the port id details */
192 port_id = ULP_COMP_FLD_IDX_RD(params,
193 BNXT_ULP_CF_IDX_INCOMING_IF);
194 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
197 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
201 if (dir == BNXT_ULP_DIR_INGRESS) {
203 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
208 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
211 /* Get the match port type */
212 mtype = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215 ULP_COMP_FLD_IDX_WR(params,
216 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 /* Set VF func PARIF */
219 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220 BNXT_ULP_VF_FUNC_PARIF,
223 "ParseErr:ifindex is not valid\n");
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
230 /* populate the loopback parif */
231 ULP_COMP_FLD_IDX_WR(params,
232 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
233 BNXT_ULP_SYM_VF_FUNC_PARIF);
236 /* Set DRV func PARIF */
237 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
238 BNXT_ULP_DRV_FUNC_PARIF,
241 "ParseErr:ifindex is not valid\n");
244 ULP_COMP_FLD_IDX_WR(params,
245 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
252 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
254 enum bnxt_ulp_intf_type match_port_type, act_port_type;
255 enum bnxt_ulp_direction_type dir;
256 uint32_t act_port_set;
258 /* Get the computed details */
259 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
260 match_port_type = ULP_COMP_FLD_IDX_RD(params,
261 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
262 act_port_type = ULP_COMP_FLD_IDX_RD(params,
263 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
264 act_port_set = ULP_COMP_FLD_IDX_RD(params,
265 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
267 /* set the flow direction in the proto and action header */
268 if (dir == BNXT_ULP_DIR_EGRESS) {
269 ULP_BITMAP_SET(params->hdr_bitmap.bits,
270 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
271 ULP_BITMAP_SET(params->act_bitmap.bits,
272 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
275 /* calculate the VF to VF flag */
276 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
277 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
280 /* Update the decrement ttl computational fields */
281 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
282 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
284 * Check that vxlan proto is included and vxlan decap
285 * action is not set then decrement tunnel ttl.
286 * Similarly add GRE and NVGRE in future.
288 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
289 BNXT_ULP_HDR_BIT_T_VXLAN) &&
290 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
291 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
292 ULP_COMP_FLD_IDX_WR(params,
293 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
295 ULP_COMP_FLD_IDX_WR(params,
296 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
300 /* Merge the hdr_fp_bit into the proto header bit */
301 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
303 /* Update the computed interface parameters */
304 bnxt_ulp_comp_fld_intf_update(params);
306 /* TBD: Handle the flow rejection scenarios */
311 * Function to handle the post processing of the parsing details
314 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
316 ulp_post_process_normal_flow(params);
317 return ulp_post_process_tun_flow(params);
321 * Function to compute the flow direction based on the match port details
324 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
326 enum bnxt_ulp_intf_type match_port_type;
328 /* Get the match port type */
329 match_port_type = ULP_COMP_FLD_IDX_RD(params,
330 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
332 /* If ingress flow and matchport is vf rep then dir is egress*/
333 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
334 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
335 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
336 BNXT_ULP_DIR_EGRESS);
338 /* Assign the input direction */
339 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
340 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
341 BNXT_ULP_DIR_INGRESS);
343 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
344 BNXT_ULP_DIR_EGRESS);
348 /* Function to handle the parsing of RTE Flow item PF Header. */
350 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
355 enum bnxt_ulp_direction_type dir;
356 struct ulp_rte_hdr_field *hdr_field;
357 enum bnxt_ulp_svif_type svif_type;
358 enum bnxt_ulp_intf_type port_type;
360 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
361 BNXT_ULP_INVALID_SVIF_VAL) {
363 "SVIF already set,multiple source not support'd\n");
364 return BNXT_TF_RC_ERROR;
367 /* Get port type details */
368 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
369 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
370 BNXT_TF_DBG(ERR, "Invalid port type\n");
371 return BNXT_TF_RC_ERROR;
374 /* Update the match port type */
375 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
377 /* compute the direction */
378 bnxt_ulp_rte_parser_direction_compute(params);
380 /* Get the computed direction */
381 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
382 if (dir == BNXT_ULP_DIR_INGRESS) {
383 svif_type = BNXT_ULP_PHY_PORT_SVIF;
385 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
386 svif_type = BNXT_ULP_VF_FUNC_SVIF;
388 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
390 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
392 svif = rte_cpu_to_be_16(svif);
393 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
394 memcpy(hdr_field->spec, &svif, sizeof(svif));
395 memcpy(hdr_field->mask, &mask, sizeof(mask));
396 hdr_field->size = sizeof(svif);
397 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
398 rte_be_to_cpu_16(svif));
399 return BNXT_TF_RC_SUCCESS;
402 /* Function to handle the parsing of the RTE port id */
404 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
406 uint16_t port_id = 0;
407 uint16_t svif_mask = 0xFFFF;
409 int32_t rc = BNXT_TF_RC_ERROR;
411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412 BNXT_ULP_INVALID_SVIF_VAL)
413 return BNXT_TF_RC_SUCCESS;
415 /* SVIF not set. So get the port id */
416 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
418 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
421 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
425 /* Update the SVIF details */
426 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
430 /* Function to handle the implicit action port id */
432 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
434 struct rte_flow_action action_item = {0};
435 struct rte_flow_action_port_id port_id = {0};
437 /* Read the action port set bit */
438 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
439 /* Already set, so just exit */
440 return BNXT_TF_RC_SUCCESS;
442 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443 action_item.conf = &port_id;
445 /* Update the action port based on incoming port */
446 ulp_rte_port_id_act_handler(&action_item, params);
448 /* Reset the action port set bit */
449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
450 return BNXT_TF_RC_SUCCESS;
453 /* Function to handle the parsing of RTE Flow item PF Header. */
455 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
456 struct ulp_rte_parser_params *params)
458 uint16_t port_id = 0;
459 uint16_t svif_mask = 0xFFFF;
462 /* Get the implicit port id */
463 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
465 /* perform the conversion from dpdk port to bnxt ifindex */
466 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
469 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
470 return BNXT_TF_RC_ERROR;
473 /* Update the SVIF details */
474 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
477 /* Function to handle the parsing of RTE Flow item VF Header. */
479 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
480 struct ulp_rte_parser_params *params)
482 const struct rte_flow_item_vf *vf_spec = item->spec;
483 const struct rte_flow_item_vf *vf_mask = item->mask;
486 int32_t rc = BNXT_TF_RC_PARSE_ERR;
488 /* Get VF rte_flow_item for Port details */
490 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
494 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
499 /* perform the conversion from VF Func id to bnxt ifindex */
500 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
503 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
506 /* Update the SVIF details */
507 return ulp_rte_parser_svif_set(params, ifindex, mask);
510 /* Function to handle the parsing of RTE Flow item port id Header. */
512 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
513 struct ulp_rte_parser_params *params)
515 const struct rte_flow_item_port_id *port_spec = item->spec;
516 const struct rte_flow_item_port_id *port_mask = item->mask;
518 int32_t rc = BNXT_TF_RC_PARSE_ERR;
522 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
526 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
529 mask = port_mask->id;
531 /* perform the conversion from dpdk port to bnxt ifindex */
532 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
535 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
538 /* Update the SVIF details */
539 return ulp_rte_parser_svif_set(params, ifindex, mask);
542 /* Function to handle the parsing of RTE Flow item phy port Header. */
544 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
545 struct ulp_rte_parser_params *params)
547 const struct rte_flow_item_phy_port *port_spec = item->spec;
548 const struct rte_flow_item_phy_port *port_mask = item->mask;
550 int32_t rc = BNXT_TF_RC_ERROR;
552 enum bnxt_ulp_direction_type dir;
553 struct ulp_rte_hdr_field *hdr_field;
555 /* Copy the rte_flow_item for phy port into hdr_field */
557 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
561 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
564 mask = port_mask->index;
566 /* Update the match port type */
567 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
568 BNXT_ULP_INTF_TYPE_PHY_PORT);
570 /* Compute the Hw direction */
571 bnxt_ulp_rte_parser_direction_compute(params);
573 /* Direction validation */
574 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
575 if (dir == BNXT_ULP_DIR_EGRESS) {
577 "Parse Err:Phy ports are valid only for ingress\n");
578 return BNXT_TF_RC_PARSE_ERR;
581 /* Get the physical port details from port db */
582 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
585 BNXT_TF_DBG(ERR, "Failed to get port details\n");
586 return BNXT_TF_RC_PARSE_ERR;
589 /* Update the SVIF details */
590 svif = rte_cpu_to_be_16(svif);
591 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
592 memcpy(hdr_field->spec, &svif, sizeof(svif));
593 memcpy(hdr_field->mask, &mask, sizeof(mask));
594 hdr_field->size = sizeof(svif);
595 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
596 rte_be_to_cpu_16(svif));
597 return BNXT_TF_RC_SUCCESS;
600 /* Function to handle the update of proto header based on field values */
602 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
603 uint16_t type, uint32_t in_flag)
605 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
607 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
608 BNXT_ULP_HDR_BIT_I_IPV4);
609 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
611 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612 BNXT_ULP_HDR_BIT_O_IPV4);
613 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
615 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
617 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
618 BNXT_ULP_HDR_BIT_I_IPV6);
619 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
621 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 BNXT_ULP_HDR_BIT_O_IPV6);
623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
628 /* Internal Function to identify broadcast or multicast packets */
630 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
632 if (rte_is_multicast_ether_addr(eth_addr) ||
633 rte_is_broadcast_ether_addr(eth_addr)) {
635 "No support for bcast or mcast addr offload\n");
641 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
643 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
644 struct ulp_rte_parser_params *params)
646 const struct rte_flow_item_eth *eth_spec = item->spec;
647 const struct rte_flow_item_eth *eth_mask = item->mask;
648 struct ulp_rte_hdr_field *field;
649 uint32_t idx = params->field_idx;
651 uint16_t eth_type = 0;
652 uint32_t inner_flag = 0;
655 * Copy the rte_flow_item for eth into hdr_field using ethernet
659 size = sizeof(eth_spec->dst.addr_bytes);
660 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
661 eth_spec->dst.addr_bytes,
663 /* Todo: work around to avoid multicast and broadcast addr */
664 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
665 return BNXT_TF_RC_PARSE_ERR;
667 size = sizeof(eth_spec->src.addr_bytes);
668 field = ulp_rte_parser_fld_copy(field,
669 eth_spec->src.addr_bytes,
671 /* Todo: work around to avoid multicast and broadcast addr */
672 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
673 return BNXT_TF_RC_PARSE_ERR;
675 field = ulp_rte_parser_fld_copy(field,
677 sizeof(eth_spec->type));
678 eth_type = eth_spec->type;
681 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
682 sizeof(eth_mask->dst.addr_bytes));
683 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
684 sizeof(eth_mask->src.addr_bytes));
685 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
686 sizeof(eth_mask->type));
688 /* Add number of vlan header elements */
689 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
690 params->vlan_idx = params->field_idx;
691 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
693 /* Update the protocol hdr bitmap */
694 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695 BNXT_ULP_HDR_BIT_O_ETH) ||
696 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697 BNXT_ULP_HDR_BIT_O_IPV4) ||
698 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699 BNXT_ULP_HDR_BIT_O_IPV6) ||
700 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701 BNXT_ULP_HDR_BIT_O_UDP) ||
702 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703 BNXT_ULP_HDR_BIT_O_TCP)) {
704 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
707 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
709 /* Update the field protocol hdr bitmap */
710 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
712 return BNXT_TF_RC_SUCCESS;
715 /* Function to handle the parsing of RTE Flow item Vlan Header. */
717 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
718 struct ulp_rte_parser_params *params)
720 const struct rte_flow_item_vlan *vlan_spec = item->spec;
721 const struct rte_flow_item_vlan *vlan_mask = item->mask;
722 struct ulp_rte_hdr_field *field;
723 struct ulp_rte_hdr_bitmap *hdr_bit;
724 uint32_t idx = params->vlan_idx;
725 uint16_t vlan_tag, priority;
726 uint32_t outer_vtag_num;
727 uint32_t inner_vtag_num;
728 uint16_t eth_type = 0;
729 uint32_t inner_flag = 0;
732 * Copy the rte_flow_item for vlan into hdr_field using Vlan
736 vlan_tag = ntohs(vlan_spec->tci);
737 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
738 vlan_tag &= ULP_VLAN_TAG_MASK;
739 vlan_tag = htons(vlan_tag);
741 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
744 field = ulp_rte_parser_fld_copy(field,
747 field = ulp_rte_parser_fld_copy(field,
748 &vlan_spec->inner_type,
749 sizeof(vlan_spec->inner_type));
750 eth_type = vlan_spec->inner_type;
754 vlan_tag = ntohs(vlan_mask->tci);
755 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
759 * the storage for priority and vlan tag is 2 bytes
760 * The mask of priority which is 3 bits if it is all 1's
761 * then make the rest bits 13 bits as 1's
762 * so that it is matched as exact match.
764 if (priority == ULP_VLAN_PRIORITY_MASK)
765 priority |= ~ULP_VLAN_PRIORITY_MASK;
766 if (vlan_tag == ULP_VLAN_TAG_MASK)
767 vlan_tag |= ~ULP_VLAN_TAG_MASK;
768 vlan_tag = htons(vlan_tag);
771 * The priority field is ignored since OVS is setting it as
772 * wild card match and it is not supported. This is a work
773 * around and shall be addressed in the future.
775 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
778 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
780 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
781 sizeof(vlan_mask->inner_type));
783 /* Set the vlan index to new incremented value */
784 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
786 /* Get the outer tag and inner tag counts */
787 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
788 BNXT_ULP_CF_IDX_O_VTAG_NUM);
789 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
790 BNXT_ULP_CF_IDX_I_VTAG_NUM);
792 /* Update the hdr_bitmap of the vlans */
793 hdr_bit = ¶ms->hdr_bitmap;
794 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
795 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
797 /* Update the vlan tag num */
799 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
801 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
803 ULP_BITMAP_SET(params->hdr_bitmap.bits,
804 BNXT_ULP_HDR_BIT_OO_VLAN);
805 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
806 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
807 outer_vtag_num == 1) {
808 /* update the vlan tag num */
810 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
812 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
813 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
814 ULP_BITMAP_SET(params->hdr_bitmap.bits,
815 BNXT_ULP_HDR_BIT_OI_VLAN);
816 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
817 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
819 /* update the vlan tag num */
821 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
823 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
824 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
825 ULP_BITMAP_SET(params->hdr_bitmap.bits,
826 BNXT_ULP_HDR_BIT_IO_VLAN);
828 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
829 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
830 inner_vtag_num == 1) {
831 /* update the vlan tag num */
833 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
835 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
836 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
837 ULP_BITMAP_SET(params->hdr_bitmap.bits,
838 BNXT_ULP_HDR_BIT_II_VLAN);
841 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
842 return BNXT_TF_RC_ERROR;
844 /* Update the field protocol hdr bitmap */
845 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
846 return BNXT_TF_RC_SUCCESS;
849 /* Function to handle the update of proto header based on field values */
851 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
852 uint8_t proto, uint32_t in_flag)
854 if (proto == IPPROTO_UDP) {
856 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857 BNXT_ULP_HDR_BIT_I_UDP);
858 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
860 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
861 BNXT_ULP_HDR_BIT_O_UDP);
862 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
864 } else if (proto == IPPROTO_TCP) {
866 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867 BNXT_ULP_HDR_BIT_I_TCP);
868 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
870 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
871 BNXT_ULP_HDR_BIT_O_TCP);
872 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
877 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
879 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
880 struct ulp_rte_parser_params *params)
882 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
883 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
884 struct ulp_rte_hdr_field *field;
885 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
886 uint32_t idx = params->field_idx;
889 uint32_t inner_flag = 0;
892 /* validate there are no 3rd L3 header */
893 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
895 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
896 return BNXT_TF_RC_ERROR;
899 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
900 BNXT_ULP_HDR_BIT_O_ETH) &&
901 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
902 BNXT_ULP_HDR_BIT_I_ETH)) {
903 /* Since F2 flow does not include eth item, when parser detects
904 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
905 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
906 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
907 * This will allow the parser post processor to update the
908 * t_dmac in hdr_field[o_eth.dmac]
910 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
911 BNXT_ULP_PROTO_HDR_VLAN_NUM);
912 params->field_idx = idx;
916 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
920 size = sizeof(ipv4_spec->hdr.version_ihl);
921 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
922 &ipv4_spec->hdr.version_ihl,
924 size = sizeof(ipv4_spec->hdr.type_of_service);
925 field = ulp_rte_parser_fld_copy(field,
926 &ipv4_spec->hdr.type_of_service,
928 size = sizeof(ipv4_spec->hdr.total_length);
929 field = ulp_rte_parser_fld_copy(field,
930 &ipv4_spec->hdr.total_length,
932 size = sizeof(ipv4_spec->hdr.packet_id);
933 field = ulp_rte_parser_fld_copy(field,
934 &ipv4_spec->hdr.packet_id,
936 size = sizeof(ipv4_spec->hdr.fragment_offset);
937 field = ulp_rte_parser_fld_copy(field,
938 &ipv4_spec->hdr.fragment_offset,
940 size = sizeof(ipv4_spec->hdr.time_to_live);
941 field = ulp_rte_parser_fld_copy(field,
942 &ipv4_spec->hdr.time_to_live,
944 size = sizeof(ipv4_spec->hdr.next_proto_id);
945 field = ulp_rte_parser_fld_copy(field,
946 &ipv4_spec->hdr.next_proto_id,
948 proto = ipv4_spec->hdr.next_proto_id;
949 size = sizeof(ipv4_spec->hdr.hdr_checksum);
950 field = ulp_rte_parser_fld_copy(field,
951 &ipv4_spec->hdr.hdr_checksum,
953 size = sizeof(ipv4_spec->hdr.src_addr);
954 field = ulp_rte_parser_fld_copy(field,
955 &ipv4_spec->hdr.src_addr,
957 size = sizeof(ipv4_spec->hdr.dst_addr);
958 field = ulp_rte_parser_fld_copy(field,
959 &ipv4_spec->hdr.dst_addr,
963 ulp_rte_prsr_mask_copy(params, &idx,
964 &ipv4_mask->hdr.version_ihl,
965 sizeof(ipv4_mask->hdr.version_ihl));
967 * The tos field is ignored since OVS is setting it as wild card
968 * match and it is not supported. This is a work around and
969 * shall be addressed in the future.
971 ulp_rte_prsr_mask_ignore(params, &idx,
972 &ipv4_mask->hdr.type_of_service,
973 sizeof(ipv4_mask->hdr.type_of_service)
976 ulp_rte_prsr_mask_copy(params, &idx,
977 &ipv4_mask->hdr.total_length,
978 sizeof(ipv4_mask->hdr.total_length));
979 ulp_rte_prsr_mask_copy(params, &idx,
980 &ipv4_mask->hdr.packet_id,
981 sizeof(ipv4_mask->hdr.packet_id));
982 ulp_rte_prsr_mask_copy(params, &idx,
983 &ipv4_mask->hdr.fragment_offset,
984 sizeof(ipv4_mask->hdr.fragment_offset));
985 ulp_rte_prsr_mask_copy(params, &idx,
986 &ipv4_mask->hdr.time_to_live,
987 sizeof(ipv4_mask->hdr.time_to_live));
988 ulp_rte_prsr_mask_copy(params, &idx,
989 &ipv4_mask->hdr.next_proto_id,
990 sizeof(ipv4_mask->hdr.next_proto_id));
991 ulp_rte_prsr_mask_copy(params, &idx,
992 &ipv4_mask->hdr.hdr_checksum,
993 sizeof(ipv4_mask->hdr.hdr_checksum));
994 ulp_rte_prsr_mask_copy(params, &idx,
995 &ipv4_mask->hdr.src_addr,
996 sizeof(ipv4_mask->hdr.src_addr));
997 ulp_rte_prsr_mask_copy(params, &idx,
998 &ipv4_mask->hdr.dst_addr,
999 sizeof(ipv4_mask->hdr.dst_addr));
1001 /* Add the number of ipv4 header elements */
1002 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1004 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1005 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1006 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1007 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1008 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1011 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1012 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1015 /* Update the field protocol hdr bitmap */
1016 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1017 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1018 return BNXT_TF_RC_SUCCESS;
1021 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1023 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1024 struct ulp_rte_parser_params *params)
1026 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1027 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1028 struct ulp_rte_hdr_field *field;
1029 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1030 uint32_t idx = params->field_idx;
1032 uint32_t vtcf, vtcf_mask;
1034 uint32_t inner_flag = 0;
1037 /* validate there are no 3rd L3 header */
1038 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1040 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1041 return BNXT_TF_RC_ERROR;
1044 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1045 BNXT_ULP_HDR_BIT_O_ETH) &&
1046 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1047 BNXT_ULP_HDR_BIT_I_ETH)) {
1048 /* Since F2 flow does not include eth item, when parser detects
1049 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1050 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1051 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1052 * This will allow the parser post processor to update the
1053 * t_dmac in hdr_field[o_eth.dmac]
1055 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1056 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1057 params->field_idx = idx;
1061 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1065 size = sizeof(ipv6_spec->hdr.vtc_flow);
1067 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1068 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1072 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1073 field = ulp_rte_parser_fld_copy(field,
1077 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1078 field = ulp_rte_parser_fld_copy(field,
1082 size = sizeof(ipv6_spec->hdr.payload_len);
1083 field = ulp_rte_parser_fld_copy(field,
1084 &ipv6_spec->hdr.payload_len,
1086 size = sizeof(ipv6_spec->hdr.proto);
1087 field = ulp_rte_parser_fld_copy(field,
1088 &ipv6_spec->hdr.proto,
1090 proto = ipv6_spec->hdr.proto;
1091 size = sizeof(ipv6_spec->hdr.hop_limits);
1092 field = ulp_rte_parser_fld_copy(field,
1093 &ipv6_spec->hdr.hop_limits,
1095 size = sizeof(ipv6_spec->hdr.src_addr);
1096 field = ulp_rte_parser_fld_copy(field,
1097 &ipv6_spec->hdr.src_addr,
1099 size = sizeof(ipv6_spec->hdr.dst_addr);
1100 field = ulp_rte_parser_fld_copy(field,
1101 &ipv6_spec->hdr.dst_addr,
1105 size = sizeof(ipv6_mask->hdr.vtc_flow);
1107 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1108 ulp_rte_prsr_mask_copy(params, &idx,
1112 * The TC and flow label field are ignored since OVS is
1113 * setting it for match and it is not supported.
1114 * This is a work around and
1115 * shall be addressed in the future.
1117 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1118 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1120 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1121 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1123 ulp_rte_prsr_mask_copy(params, &idx,
1124 &ipv6_mask->hdr.payload_len,
1125 sizeof(ipv6_mask->hdr.payload_len));
1126 ulp_rte_prsr_mask_copy(params, &idx,
1127 &ipv6_mask->hdr.proto,
1128 sizeof(ipv6_mask->hdr.proto));
1129 ulp_rte_prsr_mask_copy(params, &idx,
1130 &ipv6_mask->hdr.hop_limits,
1131 sizeof(ipv6_mask->hdr.hop_limits));
1132 ulp_rte_prsr_mask_copy(params, &idx,
1133 &ipv6_mask->hdr.src_addr,
1134 sizeof(ipv6_mask->hdr.src_addr));
1135 ulp_rte_prsr_mask_copy(params, &idx,
1136 &ipv6_mask->hdr.dst_addr,
1137 sizeof(ipv6_mask->hdr.dst_addr));
1139 /* add number of ipv6 header elements */
1140 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1142 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1143 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1144 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1145 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1146 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1149 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1150 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1153 /* Update the field protocol hdr bitmap */
1154 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1155 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1157 return BNXT_TF_RC_SUCCESS;
1160 /* Function to handle the update of proto header based on field values */
1162 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1165 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1166 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1167 BNXT_ULP_HDR_BIT_T_VXLAN);
1168 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1172 /* Function to handle the parsing of RTE Flow item UDP Header. */
1174 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1175 struct ulp_rte_parser_params *params)
1177 const struct rte_flow_item_udp *udp_spec = item->spec;
1178 const struct rte_flow_item_udp *udp_mask = item->mask;
1179 struct ulp_rte_hdr_field *field;
1180 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1181 uint32_t idx = params->field_idx;
1183 uint16_t dst_port = 0;
1186 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1188 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1189 return BNXT_TF_RC_ERROR;
1193 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1197 size = sizeof(udp_spec->hdr.src_port);
1198 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1199 &udp_spec->hdr.src_port,
1202 size = sizeof(udp_spec->hdr.dst_port);
1203 field = ulp_rte_parser_fld_copy(field,
1204 &udp_spec->hdr.dst_port,
1206 dst_port = udp_spec->hdr.dst_port;
1207 size = sizeof(udp_spec->hdr.dgram_len);
1208 field = ulp_rte_parser_fld_copy(field,
1209 &udp_spec->hdr.dgram_len,
1211 size = sizeof(udp_spec->hdr.dgram_cksum);
1212 field = ulp_rte_parser_fld_copy(field,
1213 &udp_spec->hdr.dgram_cksum,
1217 ulp_rte_prsr_mask_copy(params, &idx,
1218 &udp_mask->hdr.src_port,
1219 sizeof(udp_mask->hdr.src_port));
1220 ulp_rte_prsr_mask_copy(params, &idx,
1221 &udp_mask->hdr.dst_port,
1222 sizeof(udp_mask->hdr.dst_port));
1223 ulp_rte_prsr_mask_copy(params, &idx,
1224 &udp_mask->hdr.dgram_len,
1225 sizeof(udp_mask->hdr.dgram_len));
1226 ulp_rte_prsr_mask_copy(params, &idx,
1227 &udp_mask->hdr.dgram_cksum,
1228 sizeof(udp_mask->hdr.dgram_cksum));
1231 /* Add number of UDP header elements */
1232 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1234 /* Set the udp header bitmap and computed l4 header bitmaps */
1235 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1236 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1237 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1238 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1240 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1242 /* Update the field protocol hdr bitmap */
1243 ulp_rte_l4_proto_type_update(params, dst_port);
1245 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1246 return BNXT_TF_RC_SUCCESS;
1249 /* Function to handle the parsing of RTE Flow item TCP Header. */
1251 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1252 struct ulp_rte_parser_params *params)
1254 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1255 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1256 struct ulp_rte_hdr_field *field;
1257 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1258 uint32_t idx = params->field_idx;
1262 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1264 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1265 return BNXT_TF_RC_ERROR;
1269 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1273 size = sizeof(tcp_spec->hdr.src_port);
1274 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1275 &tcp_spec->hdr.src_port,
1277 size = sizeof(tcp_spec->hdr.dst_port);
1278 field = ulp_rte_parser_fld_copy(field,
1279 &tcp_spec->hdr.dst_port,
1281 size = sizeof(tcp_spec->hdr.sent_seq);
1282 field = ulp_rte_parser_fld_copy(field,
1283 &tcp_spec->hdr.sent_seq,
1285 size = sizeof(tcp_spec->hdr.recv_ack);
1286 field = ulp_rte_parser_fld_copy(field,
1287 &tcp_spec->hdr.recv_ack,
1289 size = sizeof(tcp_spec->hdr.data_off);
1290 field = ulp_rte_parser_fld_copy(field,
1291 &tcp_spec->hdr.data_off,
1293 size = sizeof(tcp_spec->hdr.tcp_flags);
1294 field = ulp_rte_parser_fld_copy(field,
1295 &tcp_spec->hdr.tcp_flags,
1297 size = sizeof(tcp_spec->hdr.rx_win);
1298 field = ulp_rte_parser_fld_copy(field,
1299 &tcp_spec->hdr.rx_win,
1301 size = sizeof(tcp_spec->hdr.cksum);
1302 field = ulp_rte_parser_fld_copy(field,
1303 &tcp_spec->hdr.cksum,
1305 size = sizeof(tcp_spec->hdr.tcp_urp);
1306 field = ulp_rte_parser_fld_copy(field,
1307 &tcp_spec->hdr.tcp_urp,
1310 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1314 ulp_rte_prsr_mask_copy(params, &idx,
1315 &tcp_mask->hdr.src_port,
1316 sizeof(tcp_mask->hdr.src_port));
1317 ulp_rte_prsr_mask_copy(params, &idx,
1318 &tcp_mask->hdr.dst_port,
1319 sizeof(tcp_mask->hdr.dst_port));
1320 ulp_rte_prsr_mask_copy(params, &idx,
1321 &tcp_mask->hdr.sent_seq,
1322 sizeof(tcp_mask->hdr.sent_seq));
1323 ulp_rte_prsr_mask_copy(params, &idx,
1324 &tcp_mask->hdr.recv_ack,
1325 sizeof(tcp_mask->hdr.recv_ack));
1326 ulp_rte_prsr_mask_copy(params, &idx,
1327 &tcp_mask->hdr.data_off,
1328 sizeof(tcp_mask->hdr.data_off));
1329 ulp_rte_prsr_mask_copy(params, &idx,
1330 &tcp_mask->hdr.tcp_flags,
1331 sizeof(tcp_mask->hdr.tcp_flags));
1332 ulp_rte_prsr_mask_copy(params, &idx,
1333 &tcp_mask->hdr.rx_win,
1334 sizeof(tcp_mask->hdr.rx_win));
1335 ulp_rte_prsr_mask_copy(params, &idx,
1336 &tcp_mask->hdr.cksum,
1337 sizeof(tcp_mask->hdr.cksum));
1338 ulp_rte_prsr_mask_copy(params, &idx,
1339 &tcp_mask->hdr.tcp_urp,
1340 sizeof(tcp_mask->hdr.tcp_urp));
1342 /* add number of TCP header elements */
1343 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1345 /* Set the udp header bitmap and computed l4 header bitmaps */
1346 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1347 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1348 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1349 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1351 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1352 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1354 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1355 return BNXT_TF_RC_SUCCESS;
1358 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1360 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1361 struct ulp_rte_parser_params *params)
1363 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1364 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1365 struct ulp_rte_hdr_field *field;
1366 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1367 uint32_t idx = params->field_idx;
1371 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1375 size = sizeof(vxlan_spec->flags);
1376 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1379 size = sizeof(vxlan_spec->rsvd0);
1380 field = ulp_rte_parser_fld_copy(field,
1383 size = sizeof(vxlan_spec->vni);
1384 field = ulp_rte_parser_fld_copy(field,
1387 size = sizeof(vxlan_spec->rsvd1);
1388 field = ulp_rte_parser_fld_copy(field,
1393 ulp_rte_prsr_mask_copy(params, &idx,
1395 sizeof(vxlan_mask->flags));
1396 ulp_rte_prsr_mask_copy(params, &idx,
1398 sizeof(vxlan_mask->rsvd0));
1399 ulp_rte_prsr_mask_copy(params, &idx,
1401 sizeof(vxlan_mask->vni));
1402 ulp_rte_prsr_mask_copy(params, &idx,
1404 sizeof(vxlan_mask->rsvd1));
1406 /* Add number of vxlan header elements */
1407 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1409 /* Update the hdr_bitmap with vxlan */
1410 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1411 return BNXT_TF_RC_SUCCESS;
1414 /* Function to handle the parsing of RTE Flow item void Header */
1416 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1417 struct ulp_rte_parser_params *params __rte_unused)
1419 return BNXT_TF_RC_SUCCESS;
1422 /* Function to handle the parsing of RTE Flow action void Header. */
1424 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1425 struct ulp_rte_parser_params *params __rte_unused)
1427 return BNXT_TF_RC_SUCCESS;
1430 /* Function to handle the parsing of RTE Flow action Mark Header. */
1432 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1433 struct ulp_rte_parser_params *param)
1435 const struct rte_flow_action_mark *mark;
1436 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1439 mark = action_item->conf;
1441 mark_id = tfp_cpu_to_be_32(mark->id);
1442 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1443 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1445 /* Update the hdr_bitmap with vxlan */
1446 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1447 return BNXT_TF_RC_SUCCESS;
1449 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1450 return BNXT_TF_RC_ERROR;
1453 /* Function to handle the parsing of RTE Flow action RSS Header. */
1455 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1456 struct ulp_rte_parser_params *param)
1458 const struct rte_flow_action_rss *rss = action_item->conf;
1461 /* Update the hdr_bitmap with vxlan */
1462 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1463 return BNXT_TF_RC_SUCCESS;
1465 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1466 return BNXT_TF_RC_ERROR;
1469 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1471 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1472 struct ulp_rte_parser_params *params)
1474 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1475 const struct rte_flow_item *item;
1476 const struct rte_flow_item_eth *eth_spec;
1477 const struct rte_flow_item_ipv4 *ipv4_spec;
1478 const struct rte_flow_item_ipv6 *ipv6_spec;
1479 struct rte_flow_item_vxlan vxlan_spec;
1480 uint32_t vlan_num = 0, vlan_size = 0;
1481 uint32_t ip_size = 0, ip_type = 0;
1482 uint32_t vxlan_size = 0;
1484 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1485 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1487 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1488 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1490 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1491 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1492 const uint8_t *tmp_buff;
1494 vxlan_encap = action_item->conf;
1496 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1497 return BNXT_TF_RC_ERROR;
1500 item = vxlan_encap->definition;
1502 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1503 return BNXT_TF_RC_ERROR;
1506 if (!ulp_rte_item_skip_void(&item, 0))
1507 return BNXT_TF_RC_ERROR;
1509 /* must have ethernet header */
1510 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1511 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1512 return BNXT_TF_RC_ERROR;
1514 eth_spec = item->spec;
1515 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1516 ulp_encap_buffer_copy(buff,
1517 eth_spec->dst.addr_bytes,
1518 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1519 ULP_BUFFER_ALIGN_8_BYTE);
1521 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1522 ulp_encap_buffer_copy(buff,
1523 eth_spec->src.addr_bytes,
1524 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1525 ULP_BUFFER_ALIGN_8_BYTE);
1527 /* Goto the next item */
1528 if (!ulp_rte_item_skip_void(&item, 1))
1529 return BNXT_TF_RC_ERROR;
1531 /* May have vlan header */
1532 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1534 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1535 ulp_encap_buffer_copy(buff,
1537 sizeof(struct rte_flow_item_vlan),
1538 ULP_BUFFER_ALIGN_8_BYTE);
1540 if (!ulp_rte_item_skip_void(&item, 1))
1541 return BNXT_TF_RC_ERROR;
1544 /* may have two vlan headers */
1545 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1547 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1548 sizeof(struct rte_flow_item_vlan)],
1550 sizeof(struct rte_flow_item_vlan));
1551 if (!ulp_rte_item_skip_void(&item, 1))
1552 return BNXT_TF_RC_ERROR;
1554 /* Update the vlan count and size of more than one */
1556 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1557 vlan_num = tfp_cpu_to_be_32(vlan_num);
1558 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1561 vlan_size = tfp_cpu_to_be_32(vlan_size);
1562 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1567 /* L3 must be IPv4, IPv6 */
1568 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1569 ipv4_spec = item->spec;
1570 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1572 /* copy the ipv4 details */
1573 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1574 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1575 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1576 ulp_encap_buffer_copy(buff,
1578 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1579 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1580 ULP_BUFFER_ALIGN_8_BYTE);
1582 /* Total length being ignored in the ip hdr. */
1583 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1584 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1585 ulp_encap_buffer_copy(buff,
1587 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1588 ULP_BUFFER_ALIGN_8_BYTE);
1589 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1590 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1591 ulp_encap_buffer_copy(buff,
1592 &ipv4_spec->hdr.version_ihl,
1593 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1594 ULP_BUFFER_ALIGN_8_BYTE);
1597 /* Update the dst ip address in ip encap buffer */
1598 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1599 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1600 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1601 ulp_encap_buffer_copy(buff,
1602 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1603 sizeof(ipv4_spec->hdr.dst_addr),
1604 ULP_BUFFER_ALIGN_8_BYTE);
1606 /* Update the src ip address */
1607 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1608 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1609 sizeof(ipv4_spec->hdr.src_addr)];
1610 ulp_encap_buffer_copy(buff,
1611 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1612 sizeof(ipv4_spec->hdr.src_addr),
1613 ULP_BUFFER_ALIGN_8_BYTE);
1615 /* Update the ip size details */
1616 ip_size = tfp_cpu_to_be_32(ip_size);
1617 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1618 &ip_size, sizeof(uint32_t));
1620 /* update the ip type */
1621 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1622 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1623 &ip_type, sizeof(uint32_t));
1625 /* update the computed field to notify it is ipv4 header */
1626 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1629 if (!ulp_rte_item_skip_void(&item, 1))
1630 return BNXT_TF_RC_ERROR;
1631 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1632 ipv6_spec = item->spec;
1633 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1635 /* copy the ipv6 details */
1636 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1637 if (ulp_buffer_is_empty(tmp_buff,
1638 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1639 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1640 ulp_encap_buffer_copy(buff,
1642 sizeof(def_ipv6_hdr),
1643 ULP_BUFFER_ALIGN_8_BYTE);
1645 /* The payload length being ignored in the ip hdr. */
1646 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1647 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1648 ulp_encap_buffer_copy(buff,
1650 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1651 ULP_BUFFER_ALIGN_8_BYTE);
1652 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1653 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1654 BNXT_ULP_ENCAP_IPV6_DO];
1655 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1656 ulp_encap_buffer_copy(buff,
1658 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1659 ULP_BUFFER_ALIGN_8_BYTE);
1661 /* Update the dst ip address in ip encap buffer */
1662 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1663 sizeof(def_ipv6_hdr)];
1664 ulp_encap_buffer_copy(buff,
1665 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1666 sizeof(ipv6_spec->hdr.dst_addr),
1667 ULP_BUFFER_ALIGN_8_BYTE);
1669 /* Update the src ip address */
1670 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1671 ulp_encap_buffer_copy(buff,
1672 (const uint8_t *)ipv6_spec->hdr.src_addr,
1673 sizeof(ipv6_spec->hdr.src_addr),
1674 ULP_BUFFER_ALIGN_16_BYTE);
1676 /* Update the ip size details */
1677 ip_size = tfp_cpu_to_be_32(ip_size);
1678 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1679 &ip_size, sizeof(uint32_t));
1681 /* update the ip type */
1682 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1683 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1684 &ip_type, sizeof(uint32_t));
1686 /* update the computed field to notify it is ipv6 header */
1687 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1690 if (!ulp_rte_item_skip_void(&item, 1))
1691 return BNXT_TF_RC_ERROR;
1693 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1694 return BNXT_TF_RC_ERROR;
1698 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1699 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1700 return BNXT_TF_RC_ERROR;
1702 /* copy the udp details */
1703 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1704 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1705 ULP_BUFFER_ALIGN_8_BYTE);
1707 if (!ulp_rte_item_skip_void(&item, 1))
1708 return BNXT_TF_RC_ERROR;
1711 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1712 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1713 return BNXT_TF_RC_ERROR;
1715 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1716 /* copy the vxlan details */
1717 memcpy(&vxlan_spec, item->spec, vxlan_size);
1718 vxlan_spec.flags = 0x08;
1719 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1720 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1721 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1722 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1724 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1725 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1726 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1727 (const uint8_t *)&vxlan_spec.vni,
1728 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1730 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1731 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1732 &vxlan_size, sizeof(uint32_t));
1734 /* update the hdr_bitmap with vxlan */
1735 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1736 return BNXT_TF_RC_SUCCESS;
1739 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1741 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1743 struct ulp_rte_parser_params *params)
1745 /* update the hdr_bitmap with vxlan */
1746 ULP_BITMAP_SET(params->act_bitmap.bits,
1747 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1748 /* Update computational field with tunnel decap info */
1749 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1750 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1751 return BNXT_TF_RC_SUCCESS;
1754 /* Function to handle the parsing of RTE Flow action drop Header. */
1756 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1757 struct ulp_rte_parser_params *params)
1759 /* Update the hdr_bitmap with drop */
1760 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1761 return BNXT_TF_RC_SUCCESS;
1764 /* Function to handle the parsing of RTE Flow action count. */
1766 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1767 struct ulp_rte_parser_params *params)
1770 const struct rte_flow_action_count *act_count;
1771 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1773 act_count = action_item->conf;
1775 if (act_count->shared) {
1777 "Parse Error:Shared count not supported\n");
1778 return BNXT_TF_RC_PARSE_ERR;
1780 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1782 BNXT_ULP_ACT_PROP_SZ_COUNT);
1785 /* Update the hdr_bitmap with count */
1786 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1787 return BNXT_TF_RC_SUCCESS;
1790 /* Function to handle the parsing of action ports. */
1792 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1795 enum bnxt_ulp_direction_type dir;
1798 struct ulp_rte_act_prop *act = ¶m->act_prop;
1799 enum bnxt_ulp_intf_type port_type;
1802 /* Get the direction */
1803 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1804 if (dir == BNXT_ULP_DIR_EGRESS) {
1805 /* For egress direction, fill vport */
1806 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1807 return BNXT_TF_RC_ERROR;
1810 pid = rte_cpu_to_be_32(pid);
1811 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1812 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1814 /* For ingress direction, fill vnic */
1815 port_type = ULP_COMP_FLD_IDX_RD(param,
1816 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1817 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1818 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1820 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1822 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1824 return BNXT_TF_RC_ERROR;
1827 pid = rte_cpu_to_be_32(pid);
1828 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1829 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1832 /* Update the action port set bit */
1833 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1834 return BNXT_TF_RC_SUCCESS;
1837 /* Function to handle the parsing of RTE Flow action PF. */
1839 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1840 struct ulp_rte_parser_params *params)
1844 enum bnxt_ulp_intf_type intf_type;
1846 /* Get the port id of the current device */
1847 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1849 /* Get the port db ifindex */
1850 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1852 BNXT_TF_DBG(ERR, "Invalid port id\n");
1853 return BNXT_TF_RC_ERROR;
1856 /* Check the port is PF port */
1857 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1858 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1859 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1860 return BNXT_TF_RC_ERROR;
1862 /* Update the action properties */
1863 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1864 return ulp_rte_parser_act_port_set(params, ifindex);
1867 /* Function to handle the parsing of RTE Flow action VF. */
1869 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1870 struct ulp_rte_parser_params *params)
1872 const struct rte_flow_action_vf *vf_action;
1874 enum bnxt_ulp_intf_type intf_type;
1876 vf_action = action_item->conf;
1878 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1879 return BNXT_TF_RC_PARSE_ERR;
1882 if (vf_action->original) {
1883 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1884 return BNXT_TF_RC_PARSE_ERR;
1887 /* Check the port is VF port */
1888 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1890 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1891 return BNXT_TF_RC_ERROR;
1893 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1894 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1895 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1896 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1897 return BNXT_TF_RC_ERROR;
1900 /* Update the action properties */
1901 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1902 return ulp_rte_parser_act_port_set(params, ifindex);
1905 /* Function to handle the parsing of RTE Flow action port_id. */
1907 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1908 struct ulp_rte_parser_params *param)
1910 const struct rte_flow_action_port_id *port_id = act_item->conf;
1912 enum bnxt_ulp_intf_type intf_type;
1916 "ParseErr: Invalid Argument\n");
1917 return BNXT_TF_RC_PARSE_ERR;
1919 if (port_id->original) {
1921 "ParseErr:Portid Original not supported\n");
1922 return BNXT_TF_RC_PARSE_ERR;
1925 /* Get the port db ifindex */
1926 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1928 BNXT_TF_DBG(ERR, "Invalid port id\n");
1929 return BNXT_TF_RC_ERROR;
1932 /* Get the intf type */
1933 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1935 BNXT_TF_DBG(ERR, "Invalid port type\n");
1936 return BNXT_TF_RC_ERROR;
1939 /* Set the action port */
1940 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1941 return ulp_rte_parser_act_port_set(param, ifindex);
1944 /* Function to handle the parsing of RTE Flow action phy_port. */
1946 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1947 struct ulp_rte_parser_params *prm)
1949 const struct rte_flow_action_phy_port *phy_port;
1953 enum bnxt_ulp_direction_type dir;
1955 phy_port = action_item->conf;
1958 "ParseErr: Invalid Argument\n");
1959 return BNXT_TF_RC_PARSE_ERR;
1962 if (phy_port->original) {
1964 "Parse Err:Port Original not supported\n");
1965 return BNXT_TF_RC_PARSE_ERR;
1967 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1968 if (dir != BNXT_ULP_DIR_EGRESS) {
1970 "Parse Err:Phy ports are valid only for egress\n");
1971 return BNXT_TF_RC_PARSE_ERR;
1973 /* Get the physical port details from port db */
1974 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1977 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1982 pid = rte_cpu_to_be_32(pid);
1983 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1984 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1986 /* Update the action port set bit */
1987 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1988 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1989 BNXT_ULP_INTF_TYPE_PHY_PORT);
1990 return BNXT_TF_RC_SUCCESS;
1993 /* Function to handle the parsing of RTE Flow action pop vlan. */
1995 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1996 struct ulp_rte_parser_params *params)
1998 /* Update the act_bitmap with pop */
1999 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2000 return BNXT_TF_RC_SUCCESS;
2003 /* Function to handle the parsing of RTE Flow action push vlan. */
2005 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2006 struct ulp_rte_parser_params *params)
2008 const struct rte_flow_action_of_push_vlan *push_vlan;
2010 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2012 push_vlan = action_item->conf;
2014 ethertype = push_vlan->ethertype;
2015 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2017 "Parse Err: Ethertype not supported\n");
2018 return BNXT_TF_RC_PARSE_ERR;
2020 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2021 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2022 /* Update the hdr_bitmap with push vlan */
2023 ULP_BITMAP_SET(params->act_bitmap.bits,
2024 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2025 return BNXT_TF_RC_SUCCESS;
2027 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2028 return BNXT_TF_RC_ERROR;
2031 /* Function to handle the parsing of RTE Flow action set vlan id. */
2033 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2034 struct ulp_rte_parser_params *params)
2036 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2038 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2040 vlan_vid = action_item->conf;
2041 if (vlan_vid && vlan_vid->vlan_vid) {
2042 vid = vlan_vid->vlan_vid;
2043 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2044 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2045 /* Update the hdr_bitmap with vlan vid */
2046 ULP_BITMAP_SET(params->act_bitmap.bits,
2047 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2048 return BNXT_TF_RC_SUCCESS;
2050 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2051 return BNXT_TF_RC_ERROR;
2054 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2056 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2057 struct ulp_rte_parser_params *params)
2059 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2061 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2063 vlan_pcp = action_item->conf;
2065 pcp = vlan_pcp->vlan_pcp;
2066 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2067 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2068 /* Update the hdr_bitmap with vlan vid */
2069 ULP_BITMAP_SET(params->act_bitmap.bits,
2070 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2071 return BNXT_TF_RC_SUCCESS;
2073 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2074 return BNXT_TF_RC_ERROR;
2077 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2079 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2080 struct ulp_rte_parser_params *params)
2082 const struct rte_flow_action_set_ipv4 *set_ipv4;
2083 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2085 set_ipv4 = action_item->conf;
2087 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2088 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2089 /* Update the hdr_bitmap with set ipv4 src */
2090 ULP_BITMAP_SET(params->act_bitmap.bits,
2091 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2092 return BNXT_TF_RC_SUCCESS;
2094 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2095 return BNXT_TF_RC_ERROR;
2098 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2100 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2101 struct ulp_rte_parser_params *params)
2103 const struct rte_flow_action_set_ipv4 *set_ipv4;
2104 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2106 set_ipv4 = action_item->conf;
2108 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2109 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2110 /* Update the hdr_bitmap with set ipv4 dst */
2111 ULP_BITMAP_SET(params->act_bitmap.bits,
2112 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2113 return BNXT_TF_RC_SUCCESS;
2115 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2116 return BNXT_TF_RC_ERROR;
2119 /* Function to handle the parsing of RTE Flow action set tp src.*/
2121 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2122 struct ulp_rte_parser_params *params)
2124 const struct rte_flow_action_set_tp *set_tp;
2125 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2127 set_tp = action_item->conf;
2129 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2130 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2131 /* Update the hdr_bitmap with set tp src */
2132 ULP_BITMAP_SET(params->act_bitmap.bits,
2133 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2134 return BNXT_TF_RC_SUCCESS;
2137 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2138 return BNXT_TF_RC_ERROR;
2141 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2143 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2144 struct ulp_rte_parser_params *params)
2146 const struct rte_flow_action_set_tp *set_tp;
2147 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2149 set_tp = action_item->conf;
2151 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2152 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2153 /* Update the hdr_bitmap with set tp dst */
2154 ULP_BITMAP_SET(params->act_bitmap.bits,
2155 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2156 return BNXT_TF_RC_SUCCESS;
2159 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2160 return BNXT_TF_RC_ERROR;
2163 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2165 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2166 struct ulp_rte_parser_params *params)
2168 /* Update the act_bitmap with dec ttl */
2169 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2170 return BNXT_TF_RC_SUCCESS;
2173 /* Function to handle the parsing of RTE Flow action JUMP */
2175 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2176 struct ulp_rte_parser_params *params)
2178 /* Update the act_bitmap with dec ttl */
2179 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2180 return BNXT_TF_RC_SUCCESS;