1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include "ulp_template_db_enum.h"
9 #include "ulp_template_struct.h"
11 #include "bnxt_tf_common.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK 0x700
24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN 4789
27 /* Utility function to skip the void items. */
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42 /* Utility function to update the field_bitmap */
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
47 struct ulp_rte_hdr_field *field;
49 field = ¶ms->hdr_field[idx];
50 if (ulp_bitmap_notzero(field->mask, field->size)) {
51 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
53 if (!ulp_bitmap_is_ones(field->mask, field->size))
54 ULP_BITMAP_SET(params->fld_bitmap.bits,
55 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
57 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
61 /* Utility function to copy field spec items */
62 static struct ulp_rte_hdr_field *
63 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
68 memcpy(field->spec, buffer, field->size);
73 /* Utility function to copy field masks items */
75 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
80 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
82 memcpy(field->mask, buffer, size);
83 ulp_rte_parser_field_bitmap_update(params, *idx);
87 /* Utility function to ignore field masks items */
89 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
91 const void *buffer __rte_unused,
92 uint32_t size __rte_unused)
98 * Function to handle the parsing of RTE Flows and placing
99 * the RTE flow items into the ulp structures.
102 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103 struct ulp_rte_parser_params *params)
105 const struct rte_flow_item *item = pattern;
106 struct bnxt_ulp_rte_hdr_info *hdr_info;
108 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
110 /* Set the computed flags for no vlan tags before parsing */
111 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
114 /* Parse all the items in the pattern */
115 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116 /* get the header information from the flow_hdr_info table */
117 hdr_info = &ulp_hdr_info[item->type];
118 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
120 "Truflow parser does not support type %d\n",
122 return BNXT_TF_RC_PARSE_ERR;
123 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124 /* call the registered callback handler */
125 if (hdr_info->proto_hdr_func) {
126 if (hdr_info->proto_hdr_func(item, params) !=
127 BNXT_TF_RC_SUCCESS) {
128 return BNXT_TF_RC_ERROR;
134 /* update the implied SVIF */
135 return ulp_rte_parser_implicit_match_port_process(params);
139 * Function to handle the parsing of RTE Flows and placing
140 * the RTE flow actions into the ulp structures.
143 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144 struct ulp_rte_parser_params *params)
146 const struct rte_flow_action *action_item = actions;
147 struct bnxt_ulp_rte_act_info *hdr_info;
149 /* Parse all the items in the pattern */
150 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151 /* get the header information from the flow_hdr_info table */
152 hdr_info = &ulp_act_info[action_item->type];
153 if (hdr_info->act_type ==
154 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
156 "Truflow parser does not support act %u\n",
158 return BNXT_TF_RC_ERROR;
159 } else if (hdr_info->act_type ==
160 BNXT_ULP_ACT_TYPE_SUPPORTED) {
161 /* call the registered callback handler */
162 if (hdr_info->proto_act_func) {
163 if (hdr_info->proto_act_func(action_item,
165 BNXT_TF_RC_SUCCESS) {
166 return BNXT_TF_RC_ERROR;
172 /* update the implied port details */
173 ulp_rte_parser_implicit_act_port_process(params);
174 return BNXT_TF_RC_SUCCESS;
178 * Function to handle the post processing of the computed
179 * fields for the interface.
182 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
185 uint16_t port_id, parif;
187 enum bnxt_ulp_direction_type dir;
189 /* get the direction details */
190 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
192 /* read the port id details */
193 port_id = ULP_COMP_FLD_IDX_RD(params,
194 BNXT_ULP_CF_IDX_INCOMING_IF);
195 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
198 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
202 if (dir == BNXT_ULP_DIR_INGRESS) {
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
209 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
212 /* Get the match port type */
213 mtype = ULP_COMP_FLD_IDX_RD(params,
214 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216 ULP_COMP_FLD_IDX_WR(params,
217 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
219 /* Set VF func PARIF */
220 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221 BNXT_ULP_VF_FUNC_PARIF,
224 "ParseErr:ifindex is not valid\n");
227 ULP_COMP_FLD_IDX_WR(params,
228 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
231 /* populate the loopback parif */
232 ULP_COMP_FLD_IDX_WR(params,
233 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
234 BNXT_ULP_SYM_VF_FUNC_PARIF);
237 /* Set DRV func PARIF */
238 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
239 BNXT_ULP_DRV_FUNC_PARIF,
242 "ParseErr:ifindex is not valid\n");
245 ULP_COMP_FLD_IDX_WR(params,
246 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
253 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
255 enum bnxt_ulp_intf_type match_port_type, act_port_type;
256 enum bnxt_ulp_direction_type dir;
257 uint32_t act_port_set;
259 /* Get the computed details */
260 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261 match_port_type = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 act_port_type = ULP_COMP_FLD_IDX_RD(params,
264 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265 act_port_set = ULP_COMP_FLD_IDX_RD(params,
266 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
268 /* set the flow direction in the proto and action header */
269 if (dir == BNXT_ULP_DIR_EGRESS) {
270 ULP_BITMAP_SET(params->hdr_bitmap.bits,
271 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272 ULP_BITMAP_SET(params->act_bitmap.bits,
273 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
276 /* calculate the VF to VF flag */
277 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
281 /* Update the decrement ttl computational fields */
282 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
285 * Check that vxlan proto is included and vxlan decap
286 * action is not set then decrement tunnel ttl.
287 * Similarly add GRE and NVGRE in future.
289 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290 BNXT_ULP_HDR_BIT_T_VXLAN) &&
291 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
296 ULP_COMP_FLD_IDX_WR(params,
297 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
301 /* Merge the hdr_fp_bit into the proto header bit */
302 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
304 /* Update the computed interface parameters */
305 bnxt_ulp_comp_fld_intf_update(params);
307 /* TBD: Handle the flow rejection scenarios */
312 * Function to handle the post processing of the parsing details
315 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
317 ulp_post_process_normal_flow(params);
318 return ulp_post_process_tun_flow(params);
322 * Function to compute the flow direction based on the match port details
325 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
327 enum bnxt_ulp_intf_type match_port_type;
329 /* Get the match port type */
330 match_port_type = ULP_COMP_FLD_IDX_RD(params,
331 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
333 /* If ingress flow and matchport is vf rep then dir is egress*/
334 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
335 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
336 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
337 BNXT_ULP_DIR_EGRESS);
339 /* Assign the input direction */
340 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
341 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342 BNXT_ULP_DIR_INGRESS);
344 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345 BNXT_ULP_DIR_EGRESS);
349 /* Function to handle the parsing of RTE Flow item PF Header. */
351 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
356 enum bnxt_ulp_direction_type dir;
357 struct ulp_rte_hdr_field *hdr_field;
358 enum bnxt_ulp_svif_type svif_type;
359 enum bnxt_ulp_intf_type port_type;
361 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
362 BNXT_ULP_INVALID_SVIF_VAL) {
364 "SVIF already set,multiple source not support'd\n");
365 return BNXT_TF_RC_ERROR;
368 /* Get port type details */
369 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
370 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
371 BNXT_TF_DBG(ERR, "Invalid port type\n");
372 return BNXT_TF_RC_ERROR;
375 /* Update the match port type */
376 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
378 /* compute the direction */
379 bnxt_ulp_rte_parser_direction_compute(params);
381 /* Get the computed direction */
382 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
383 if (dir == BNXT_ULP_DIR_INGRESS) {
384 svif_type = BNXT_ULP_PHY_PORT_SVIF;
386 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
387 svif_type = BNXT_ULP_VF_FUNC_SVIF;
389 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
391 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
393 svif = rte_cpu_to_be_16(svif);
394 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
395 memcpy(hdr_field->spec, &svif, sizeof(svif));
396 memcpy(hdr_field->mask, &mask, sizeof(mask));
397 hdr_field->size = sizeof(svif);
398 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
399 rte_be_to_cpu_16(svif));
400 return BNXT_TF_RC_SUCCESS;
403 /* Function to handle the parsing of the RTE port id */
405 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
407 uint16_t port_id = 0;
408 uint16_t svif_mask = 0xFFFF;
410 int32_t rc = BNXT_TF_RC_ERROR;
412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 BNXT_ULP_INVALID_SVIF_VAL)
414 return BNXT_TF_RC_SUCCESS;
416 /* SVIF not set. So get the port id */
417 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
419 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
422 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
426 /* Update the SVIF details */
427 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
431 /* Function to handle the implicit action port id */
433 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
435 struct rte_flow_action action_item = {0};
436 struct rte_flow_action_port_id port_id = {0};
438 /* Read the action port set bit */
439 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
440 /* Already set, so just exit */
441 return BNXT_TF_RC_SUCCESS;
443 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
444 action_item.conf = &port_id;
446 /* Update the action port based on incoming port */
447 ulp_rte_port_id_act_handler(&action_item, params);
449 /* Reset the action port set bit */
450 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
451 return BNXT_TF_RC_SUCCESS;
454 /* Function to handle the parsing of RTE Flow item PF Header. */
456 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
457 struct ulp_rte_parser_params *params)
459 uint16_t port_id = 0;
460 uint16_t svif_mask = 0xFFFF;
463 /* Get the implicit port id */
464 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
466 /* perform the conversion from dpdk port to bnxt ifindex */
467 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
470 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
471 return BNXT_TF_RC_ERROR;
474 /* Update the SVIF details */
475 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
478 /* Function to handle the parsing of RTE Flow item VF Header. */
480 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
481 struct ulp_rte_parser_params *params)
483 const struct rte_flow_item_vf *vf_spec = item->spec;
484 const struct rte_flow_item_vf *vf_mask = item->mask;
487 int32_t rc = BNXT_TF_RC_PARSE_ERR;
489 /* Get VF rte_flow_item for Port details */
491 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
495 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
500 /* perform the conversion from VF Func id to bnxt ifindex */
501 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
504 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
507 /* Update the SVIF details */
508 return ulp_rte_parser_svif_set(params, ifindex, mask);
511 /* Function to handle the parsing of RTE Flow item port id Header. */
513 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
514 struct ulp_rte_parser_params *params)
516 const struct rte_flow_item_port_id *port_spec = item->spec;
517 const struct rte_flow_item_port_id *port_mask = item->mask;
519 int32_t rc = BNXT_TF_RC_PARSE_ERR;
523 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
527 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
530 mask = port_mask->id;
532 /* perform the conversion from dpdk port to bnxt ifindex */
533 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
536 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
539 /* Update the SVIF details */
540 return ulp_rte_parser_svif_set(params, ifindex, mask);
543 /* Function to handle the parsing of RTE Flow item phy port Header. */
545 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
546 struct ulp_rte_parser_params *params)
548 const struct rte_flow_item_phy_port *port_spec = item->spec;
549 const struct rte_flow_item_phy_port *port_mask = item->mask;
551 int32_t rc = BNXT_TF_RC_ERROR;
553 enum bnxt_ulp_direction_type dir;
554 struct ulp_rte_hdr_field *hdr_field;
556 /* Copy the rte_flow_item for phy port into hdr_field */
558 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
562 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
565 mask = port_mask->index;
567 /* Update the match port type */
568 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
569 BNXT_ULP_INTF_TYPE_PHY_PORT);
571 /* Compute the Hw direction */
572 bnxt_ulp_rte_parser_direction_compute(params);
574 /* Direction validation */
575 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
576 if (dir == BNXT_ULP_DIR_EGRESS) {
578 "Parse Err:Phy ports are valid only for ingress\n");
579 return BNXT_TF_RC_PARSE_ERR;
582 /* Get the physical port details from port db */
583 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
586 BNXT_TF_DBG(ERR, "Failed to get port details\n");
587 return BNXT_TF_RC_PARSE_ERR;
590 /* Update the SVIF details */
591 svif = rte_cpu_to_be_16(svif);
592 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
593 memcpy(hdr_field->spec, &svif, sizeof(svif));
594 memcpy(hdr_field->mask, &mask, sizeof(mask));
595 hdr_field->size = sizeof(svif);
596 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
597 rte_be_to_cpu_16(svif));
598 return BNXT_TF_RC_SUCCESS;
601 /* Function to handle the update of proto header based on field values */
603 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
604 uint16_t type, uint32_t in_flag)
606 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
608 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
609 BNXT_ULP_HDR_BIT_I_IPV4);
610 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
612 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
613 BNXT_ULP_HDR_BIT_O_IPV4);
614 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
616 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
618 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
619 BNXT_ULP_HDR_BIT_I_IPV6);
620 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
622 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623 BNXT_ULP_HDR_BIT_O_IPV6);
624 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
629 /* Internal Function to identify broadcast or multicast packets */
631 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
633 if (rte_is_multicast_ether_addr(eth_addr) ||
634 rte_is_broadcast_ether_addr(eth_addr)) {
636 "No support for bcast or mcast addr offload\n");
642 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
644 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
645 struct ulp_rte_parser_params *params)
647 const struct rte_flow_item_eth *eth_spec = item->spec;
648 const struct rte_flow_item_eth *eth_mask = item->mask;
649 struct ulp_rte_hdr_field *field;
650 uint32_t idx = params->field_idx;
652 uint16_t eth_type = 0;
653 uint32_t inner_flag = 0;
656 * Copy the rte_flow_item for eth into hdr_field using ethernet
660 size = sizeof(eth_spec->dst.addr_bytes);
661 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
662 eth_spec->dst.addr_bytes,
664 /* Todo: work around to avoid multicast and broadcast addr */
665 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
666 return BNXT_TF_RC_PARSE_ERR;
668 size = sizeof(eth_spec->src.addr_bytes);
669 field = ulp_rte_parser_fld_copy(field,
670 eth_spec->src.addr_bytes,
672 /* Todo: work around to avoid multicast and broadcast addr */
673 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
674 return BNXT_TF_RC_PARSE_ERR;
676 field = ulp_rte_parser_fld_copy(field,
678 sizeof(eth_spec->type));
679 eth_type = eth_spec->type;
682 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
683 sizeof(eth_mask->dst.addr_bytes));
684 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
685 sizeof(eth_mask->src.addr_bytes));
686 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
687 sizeof(eth_mask->type));
689 /* Add number of vlan header elements */
690 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
691 params->vlan_idx = params->field_idx;
692 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
694 /* Update the protocol hdr bitmap */
695 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
696 BNXT_ULP_HDR_BIT_O_ETH) ||
697 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
698 BNXT_ULP_HDR_BIT_O_IPV4) ||
699 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
700 BNXT_ULP_HDR_BIT_O_IPV6) ||
701 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
702 BNXT_ULP_HDR_BIT_O_UDP) ||
703 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
704 BNXT_ULP_HDR_BIT_O_TCP)) {
705 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
708 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
710 /* Update the field protocol hdr bitmap */
711 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
713 return BNXT_TF_RC_SUCCESS;
716 /* Function to handle the parsing of RTE Flow item Vlan Header. */
718 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
719 struct ulp_rte_parser_params *params)
721 const struct rte_flow_item_vlan *vlan_spec = item->spec;
722 const struct rte_flow_item_vlan *vlan_mask = item->mask;
723 struct ulp_rte_hdr_field *field;
724 struct ulp_rte_hdr_bitmap *hdr_bit;
725 uint32_t idx = params->vlan_idx;
726 uint16_t vlan_tag, priority;
727 uint32_t outer_vtag_num;
728 uint32_t inner_vtag_num;
729 uint16_t eth_type = 0;
730 uint32_t inner_flag = 0;
733 * Copy the rte_flow_item for vlan into hdr_field using Vlan
737 vlan_tag = ntohs(vlan_spec->tci);
738 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
739 vlan_tag &= ULP_VLAN_TAG_MASK;
740 vlan_tag = htons(vlan_tag);
742 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
745 field = ulp_rte_parser_fld_copy(field,
748 field = ulp_rte_parser_fld_copy(field,
749 &vlan_spec->inner_type,
750 sizeof(vlan_spec->inner_type));
751 eth_type = vlan_spec->inner_type;
755 vlan_tag = ntohs(vlan_mask->tci);
756 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
760 * the storage for priority and vlan tag is 2 bytes
761 * The mask of priority which is 3 bits if it is all 1's
762 * then make the rest bits 13 bits as 1's
763 * so that it is matched as exact match.
765 if (priority == ULP_VLAN_PRIORITY_MASK)
766 priority |= ~ULP_VLAN_PRIORITY_MASK;
767 if (vlan_tag == ULP_VLAN_TAG_MASK)
768 vlan_tag |= ~ULP_VLAN_TAG_MASK;
769 vlan_tag = htons(vlan_tag);
772 * The priority field is ignored since OVS is setting it as
773 * wild card match and it is not supported. This is a work
774 * around and shall be addressed in the future.
776 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
779 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
781 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
782 sizeof(vlan_mask->inner_type));
784 /* Set the vlan index to new incremented value */
785 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
787 /* Get the outer tag and inner tag counts */
788 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
789 BNXT_ULP_CF_IDX_O_VTAG_NUM);
790 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791 BNXT_ULP_CF_IDX_I_VTAG_NUM);
793 /* Update the hdr_bitmap of the vlans */
794 hdr_bit = ¶ms->hdr_bitmap;
795 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
796 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
798 /* Update the vlan tag num */
800 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
803 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
804 ULP_BITMAP_SET(params->hdr_bitmap.bits,
805 BNXT_ULP_HDR_BIT_OO_VLAN);
806 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
807 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
808 outer_vtag_num == 1) {
809 /* update the vlan tag num */
811 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
813 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
814 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
815 ULP_BITMAP_SET(params->hdr_bitmap.bits,
816 BNXT_ULP_HDR_BIT_OI_VLAN);
817 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
818 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
820 /* update the vlan tag num */
822 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
824 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
825 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
826 ULP_BITMAP_SET(params->hdr_bitmap.bits,
827 BNXT_ULP_HDR_BIT_IO_VLAN);
829 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
830 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
831 inner_vtag_num == 1) {
832 /* update the vlan tag num */
834 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
836 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
837 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
838 ULP_BITMAP_SET(params->hdr_bitmap.bits,
839 BNXT_ULP_HDR_BIT_II_VLAN);
842 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
843 return BNXT_TF_RC_ERROR;
845 /* Update the field protocol hdr bitmap */
846 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
847 return BNXT_TF_RC_SUCCESS;
850 /* Function to handle the update of proto header based on field values */
852 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
853 uint8_t proto, uint32_t in_flag)
855 if (proto == IPPROTO_UDP) {
857 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
858 BNXT_ULP_HDR_BIT_I_UDP);
859 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
861 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
862 BNXT_ULP_HDR_BIT_O_UDP);
863 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
865 } else if (proto == IPPROTO_TCP) {
867 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
868 BNXT_ULP_HDR_BIT_I_TCP);
869 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
871 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
872 BNXT_ULP_HDR_BIT_O_TCP);
873 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
878 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
880 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
881 struct ulp_rte_parser_params *params)
883 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
884 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
885 struct ulp_rte_hdr_field *field;
886 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
887 uint32_t idx = params->field_idx;
890 uint32_t inner_flag = 0;
893 /* validate there are no 3rd L3 header */
894 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
896 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
897 return BNXT_TF_RC_ERROR;
900 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
901 BNXT_ULP_HDR_BIT_O_ETH) &&
902 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
903 BNXT_ULP_HDR_BIT_I_ETH)) {
904 /* Since F2 flow does not include eth item, when parser detects
905 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
906 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
907 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
908 * This will allow the parser post processor to update the
909 * t_dmac in hdr_field[o_eth.dmac]
911 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
912 BNXT_ULP_PROTO_HDR_VLAN_NUM);
913 params->field_idx = idx;
917 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
921 size = sizeof(ipv4_spec->hdr.version_ihl);
922 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
923 &ipv4_spec->hdr.version_ihl,
925 size = sizeof(ipv4_spec->hdr.type_of_service);
926 field = ulp_rte_parser_fld_copy(field,
927 &ipv4_spec->hdr.type_of_service,
929 size = sizeof(ipv4_spec->hdr.total_length);
930 field = ulp_rte_parser_fld_copy(field,
931 &ipv4_spec->hdr.total_length,
933 size = sizeof(ipv4_spec->hdr.packet_id);
934 field = ulp_rte_parser_fld_copy(field,
935 &ipv4_spec->hdr.packet_id,
937 size = sizeof(ipv4_spec->hdr.fragment_offset);
938 field = ulp_rte_parser_fld_copy(field,
939 &ipv4_spec->hdr.fragment_offset,
941 size = sizeof(ipv4_spec->hdr.time_to_live);
942 field = ulp_rte_parser_fld_copy(field,
943 &ipv4_spec->hdr.time_to_live,
945 size = sizeof(ipv4_spec->hdr.next_proto_id);
946 field = ulp_rte_parser_fld_copy(field,
947 &ipv4_spec->hdr.next_proto_id,
949 proto = ipv4_spec->hdr.next_proto_id;
950 size = sizeof(ipv4_spec->hdr.hdr_checksum);
951 field = ulp_rte_parser_fld_copy(field,
952 &ipv4_spec->hdr.hdr_checksum,
954 size = sizeof(ipv4_spec->hdr.src_addr);
955 field = ulp_rte_parser_fld_copy(field,
956 &ipv4_spec->hdr.src_addr,
958 size = sizeof(ipv4_spec->hdr.dst_addr);
959 field = ulp_rte_parser_fld_copy(field,
960 &ipv4_spec->hdr.dst_addr,
964 ulp_rte_prsr_mask_copy(params, &idx,
965 &ipv4_mask->hdr.version_ihl,
966 sizeof(ipv4_mask->hdr.version_ihl));
968 * The tos field is ignored since OVS is setting it as wild card
969 * match and it is not supported. This is a work around and
970 * shall be addressed in the future.
972 ulp_rte_prsr_mask_ignore(params, &idx,
973 &ipv4_mask->hdr.type_of_service,
974 sizeof(ipv4_mask->hdr.type_of_service)
977 ulp_rte_prsr_mask_copy(params, &idx,
978 &ipv4_mask->hdr.total_length,
979 sizeof(ipv4_mask->hdr.total_length));
980 ulp_rte_prsr_mask_copy(params, &idx,
981 &ipv4_mask->hdr.packet_id,
982 sizeof(ipv4_mask->hdr.packet_id));
983 ulp_rte_prsr_mask_copy(params, &idx,
984 &ipv4_mask->hdr.fragment_offset,
985 sizeof(ipv4_mask->hdr.fragment_offset));
986 ulp_rte_prsr_mask_copy(params, &idx,
987 &ipv4_mask->hdr.time_to_live,
988 sizeof(ipv4_mask->hdr.time_to_live));
989 ulp_rte_prsr_mask_copy(params, &idx,
990 &ipv4_mask->hdr.next_proto_id,
991 sizeof(ipv4_mask->hdr.next_proto_id));
992 ulp_rte_prsr_mask_copy(params, &idx,
993 &ipv4_mask->hdr.hdr_checksum,
994 sizeof(ipv4_mask->hdr.hdr_checksum));
995 ulp_rte_prsr_mask_copy(params, &idx,
996 &ipv4_mask->hdr.src_addr,
997 sizeof(ipv4_mask->hdr.src_addr));
998 ulp_rte_prsr_mask_copy(params, &idx,
999 &ipv4_mask->hdr.dst_addr,
1000 sizeof(ipv4_mask->hdr.dst_addr));
1002 /* Add the number of ipv4 header elements */
1003 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1005 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1006 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1007 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1008 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1009 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1012 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1013 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1016 /* Some of the PMD applications may set the protocol field
1017 * in the IPv4 spec but don't set the mask. So, consider
1018 * the mask in the proto value calculation.
1021 proto &= ipv4_mask->hdr.next_proto_id;
1023 /* Update the field protocol hdr bitmap */
1024 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1025 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1026 return BNXT_TF_RC_SUCCESS;
1029 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1031 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1032 struct ulp_rte_parser_params *params)
1034 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1035 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1036 struct ulp_rte_hdr_field *field;
1037 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1038 uint32_t idx = params->field_idx;
1040 uint32_t vtcf, vtcf_mask;
1042 uint32_t inner_flag = 0;
1045 /* validate there are no 3rd L3 header */
1046 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1048 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1049 return BNXT_TF_RC_ERROR;
1052 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1053 BNXT_ULP_HDR_BIT_O_ETH) &&
1054 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1055 BNXT_ULP_HDR_BIT_I_ETH)) {
1056 /* Since F2 flow does not include eth item, when parser detects
1057 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1058 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1059 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1060 * This will allow the parser post processor to update the
1061 * t_dmac in hdr_field[o_eth.dmac]
1063 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1064 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1065 params->field_idx = idx;
1069 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1073 size = sizeof(ipv6_spec->hdr.vtc_flow);
1075 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1076 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1080 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1081 field = ulp_rte_parser_fld_copy(field,
1085 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1086 field = ulp_rte_parser_fld_copy(field,
1090 size = sizeof(ipv6_spec->hdr.payload_len);
1091 field = ulp_rte_parser_fld_copy(field,
1092 &ipv6_spec->hdr.payload_len,
1094 size = sizeof(ipv6_spec->hdr.proto);
1095 field = ulp_rte_parser_fld_copy(field,
1096 &ipv6_spec->hdr.proto,
1098 proto = ipv6_spec->hdr.proto;
1099 size = sizeof(ipv6_spec->hdr.hop_limits);
1100 field = ulp_rte_parser_fld_copy(field,
1101 &ipv6_spec->hdr.hop_limits,
1103 size = sizeof(ipv6_spec->hdr.src_addr);
1104 field = ulp_rte_parser_fld_copy(field,
1105 &ipv6_spec->hdr.src_addr,
1107 size = sizeof(ipv6_spec->hdr.dst_addr);
1108 field = ulp_rte_parser_fld_copy(field,
1109 &ipv6_spec->hdr.dst_addr,
1113 size = sizeof(ipv6_mask->hdr.vtc_flow);
1115 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1116 ulp_rte_prsr_mask_copy(params, &idx,
1120 * The TC and flow label field are ignored since OVS is
1121 * setting it for match and it is not supported.
1122 * This is a work around and
1123 * shall be addressed in the future.
1125 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1126 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1128 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1129 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1131 ulp_rte_prsr_mask_copy(params, &idx,
1132 &ipv6_mask->hdr.payload_len,
1133 sizeof(ipv6_mask->hdr.payload_len));
1134 ulp_rte_prsr_mask_copy(params, &idx,
1135 &ipv6_mask->hdr.proto,
1136 sizeof(ipv6_mask->hdr.proto));
1137 ulp_rte_prsr_mask_copy(params, &idx,
1138 &ipv6_mask->hdr.hop_limits,
1139 sizeof(ipv6_mask->hdr.hop_limits));
1140 ulp_rte_prsr_mask_copy(params, &idx,
1141 &ipv6_mask->hdr.src_addr,
1142 sizeof(ipv6_mask->hdr.src_addr));
1143 ulp_rte_prsr_mask_copy(params, &idx,
1144 &ipv6_mask->hdr.dst_addr,
1145 sizeof(ipv6_mask->hdr.dst_addr));
1147 /* add number of ipv6 header elements */
1148 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1150 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1151 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1152 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1153 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1154 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1157 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1158 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1161 /* Some of the PMD applications may set the protocol field
1162 * in the IPv6 spec but don't set the mask. So, consider
1163 * the mask in proto value calculation.
1166 proto &= ipv6_mask->hdr.proto;
1168 /* Update the field protocol hdr bitmap */
1169 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1170 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1172 return BNXT_TF_RC_SUCCESS;
1175 /* Function to handle the update of proto header based on field values */
1177 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1180 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1181 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1182 BNXT_ULP_HDR_BIT_T_VXLAN);
1183 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1187 /* Function to handle the parsing of RTE Flow item UDP Header. */
1189 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1190 struct ulp_rte_parser_params *params)
1192 const struct rte_flow_item_udp *udp_spec = item->spec;
1193 const struct rte_flow_item_udp *udp_mask = item->mask;
1194 struct ulp_rte_hdr_field *field;
1195 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1196 uint32_t idx = params->field_idx;
1198 uint16_t dst_port = 0;
1201 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1203 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1204 return BNXT_TF_RC_ERROR;
1208 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1212 size = sizeof(udp_spec->hdr.src_port);
1213 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1214 &udp_spec->hdr.src_port,
1217 size = sizeof(udp_spec->hdr.dst_port);
1218 field = ulp_rte_parser_fld_copy(field,
1219 &udp_spec->hdr.dst_port,
1221 dst_port = udp_spec->hdr.dst_port;
1222 size = sizeof(udp_spec->hdr.dgram_len);
1223 field = ulp_rte_parser_fld_copy(field,
1224 &udp_spec->hdr.dgram_len,
1226 size = sizeof(udp_spec->hdr.dgram_cksum);
1227 field = ulp_rte_parser_fld_copy(field,
1228 &udp_spec->hdr.dgram_cksum,
1232 ulp_rte_prsr_mask_copy(params, &idx,
1233 &udp_mask->hdr.src_port,
1234 sizeof(udp_mask->hdr.src_port));
1235 ulp_rte_prsr_mask_copy(params, &idx,
1236 &udp_mask->hdr.dst_port,
1237 sizeof(udp_mask->hdr.dst_port));
1238 ulp_rte_prsr_mask_copy(params, &idx,
1239 &udp_mask->hdr.dgram_len,
1240 sizeof(udp_mask->hdr.dgram_len));
1241 ulp_rte_prsr_mask_copy(params, &idx,
1242 &udp_mask->hdr.dgram_cksum,
1243 sizeof(udp_mask->hdr.dgram_cksum));
1246 /* Add number of UDP header elements */
1247 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1249 /* Set the udp header bitmap and computed l4 header bitmaps */
1250 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1251 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1252 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1255 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1256 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1257 /* Update the field protocol hdr bitmap */
1258 ulp_rte_l4_proto_type_update(params, dst_port);
1260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1261 return BNXT_TF_RC_SUCCESS;
1264 /* Function to handle the parsing of RTE Flow item TCP Header. */
1266 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1267 struct ulp_rte_parser_params *params)
1269 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1270 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1271 struct ulp_rte_hdr_field *field;
1272 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1273 uint32_t idx = params->field_idx;
1277 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1279 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1280 return BNXT_TF_RC_ERROR;
1284 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1288 size = sizeof(tcp_spec->hdr.src_port);
1289 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1290 &tcp_spec->hdr.src_port,
1292 size = sizeof(tcp_spec->hdr.dst_port);
1293 field = ulp_rte_parser_fld_copy(field,
1294 &tcp_spec->hdr.dst_port,
1296 size = sizeof(tcp_spec->hdr.sent_seq);
1297 field = ulp_rte_parser_fld_copy(field,
1298 &tcp_spec->hdr.sent_seq,
1300 size = sizeof(tcp_spec->hdr.recv_ack);
1301 field = ulp_rte_parser_fld_copy(field,
1302 &tcp_spec->hdr.recv_ack,
1304 size = sizeof(tcp_spec->hdr.data_off);
1305 field = ulp_rte_parser_fld_copy(field,
1306 &tcp_spec->hdr.data_off,
1308 size = sizeof(tcp_spec->hdr.tcp_flags);
1309 field = ulp_rte_parser_fld_copy(field,
1310 &tcp_spec->hdr.tcp_flags,
1312 size = sizeof(tcp_spec->hdr.rx_win);
1313 field = ulp_rte_parser_fld_copy(field,
1314 &tcp_spec->hdr.rx_win,
1316 size = sizeof(tcp_spec->hdr.cksum);
1317 field = ulp_rte_parser_fld_copy(field,
1318 &tcp_spec->hdr.cksum,
1320 size = sizeof(tcp_spec->hdr.tcp_urp);
1321 field = ulp_rte_parser_fld_copy(field,
1322 &tcp_spec->hdr.tcp_urp,
1325 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1329 ulp_rte_prsr_mask_copy(params, &idx,
1330 &tcp_mask->hdr.src_port,
1331 sizeof(tcp_mask->hdr.src_port));
1332 ulp_rte_prsr_mask_copy(params, &idx,
1333 &tcp_mask->hdr.dst_port,
1334 sizeof(tcp_mask->hdr.dst_port));
1335 ulp_rte_prsr_mask_copy(params, &idx,
1336 &tcp_mask->hdr.sent_seq,
1337 sizeof(tcp_mask->hdr.sent_seq));
1338 ulp_rte_prsr_mask_copy(params, &idx,
1339 &tcp_mask->hdr.recv_ack,
1340 sizeof(tcp_mask->hdr.recv_ack));
1341 ulp_rte_prsr_mask_copy(params, &idx,
1342 &tcp_mask->hdr.data_off,
1343 sizeof(tcp_mask->hdr.data_off));
1344 ulp_rte_prsr_mask_copy(params, &idx,
1345 &tcp_mask->hdr.tcp_flags,
1346 sizeof(tcp_mask->hdr.tcp_flags));
1347 ulp_rte_prsr_mask_copy(params, &idx,
1348 &tcp_mask->hdr.rx_win,
1349 sizeof(tcp_mask->hdr.rx_win));
1350 ulp_rte_prsr_mask_copy(params, &idx,
1351 &tcp_mask->hdr.cksum,
1352 sizeof(tcp_mask->hdr.cksum));
1353 ulp_rte_prsr_mask_copy(params, &idx,
1354 &tcp_mask->hdr.tcp_urp,
1355 sizeof(tcp_mask->hdr.tcp_urp));
1357 /* add number of TCP header elements */
1358 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1360 /* Set the udp header bitmap and computed l4 header bitmaps */
1361 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1362 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1363 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1364 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1366 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1367 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1369 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1370 return BNXT_TF_RC_SUCCESS;
1373 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1375 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1376 struct ulp_rte_parser_params *params)
1378 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1379 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1380 struct ulp_rte_hdr_field *field;
1381 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1382 uint32_t idx = params->field_idx;
1386 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1390 size = sizeof(vxlan_spec->flags);
1391 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1394 size = sizeof(vxlan_spec->rsvd0);
1395 field = ulp_rte_parser_fld_copy(field,
1398 size = sizeof(vxlan_spec->vni);
1399 field = ulp_rte_parser_fld_copy(field,
1402 size = sizeof(vxlan_spec->rsvd1);
1403 field = ulp_rte_parser_fld_copy(field,
1408 ulp_rte_prsr_mask_copy(params, &idx,
1410 sizeof(vxlan_mask->flags));
1411 ulp_rte_prsr_mask_copy(params, &idx,
1413 sizeof(vxlan_mask->rsvd0));
1414 ulp_rte_prsr_mask_copy(params, &idx,
1416 sizeof(vxlan_mask->vni));
1417 ulp_rte_prsr_mask_copy(params, &idx,
1419 sizeof(vxlan_mask->rsvd1));
1421 /* Add number of vxlan header elements */
1422 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1424 /* Update the hdr_bitmap with vxlan */
1425 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1426 return BNXT_TF_RC_SUCCESS;
1429 /* Function to handle the parsing of RTE Flow item void Header */
1431 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1432 struct ulp_rte_parser_params *params __rte_unused)
1434 return BNXT_TF_RC_SUCCESS;
1437 /* Function to handle the parsing of RTE Flow action void Header. */
1439 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1440 struct ulp_rte_parser_params *params __rte_unused)
1442 return BNXT_TF_RC_SUCCESS;
1445 /* Function to handle the parsing of RTE Flow action Mark Header. */
1447 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1448 struct ulp_rte_parser_params *param)
1450 const struct rte_flow_action_mark *mark;
1451 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1454 mark = action_item->conf;
1456 mark_id = tfp_cpu_to_be_32(mark->id);
1457 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1458 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1460 /* Update the hdr_bitmap with vxlan */
1461 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1462 return BNXT_TF_RC_SUCCESS;
1464 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1465 return BNXT_TF_RC_ERROR;
1468 /* Function to handle the parsing of RTE Flow action RSS Header. */
1470 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1471 struct ulp_rte_parser_params *param)
1473 const struct rte_flow_action_rss *rss = action_item->conf;
1476 /* Update the hdr_bitmap with vxlan */
1477 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1478 return BNXT_TF_RC_SUCCESS;
1480 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1481 return BNXT_TF_RC_ERROR;
1484 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1486 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1487 struct ulp_rte_parser_params *params)
1489 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1490 const struct rte_flow_item *item;
1491 const struct rte_flow_item_eth *eth_spec;
1492 const struct rte_flow_item_ipv4 *ipv4_spec;
1493 const struct rte_flow_item_ipv6 *ipv6_spec;
1494 struct rte_flow_item_vxlan vxlan_spec;
1495 uint32_t vlan_num = 0, vlan_size = 0;
1496 uint32_t ip_size = 0, ip_type = 0;
1497 uint32_t vxlan_size = 0;
1499 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1500 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1502 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1503 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1505 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1506 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1507 const uint8_t *tmp_buff;
1509 vxlan_encap = action_item->conf;
1511 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1512 return BNXT_TF_RC_ERROR;
1515 item = vxlan_encap->definition;
1517 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1518 return BNXT_TF_RC_ERROR;
1521 if (!ulp_rte_item_skip_void(&item, 0))
1522 return BNXT_TF_RC_ERROR;
1524 /* must have ethernet header */
1525 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1526 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1527 return BNXT_TF_RC_ERROR;
1529 eth_spec = item->spec;
1530 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1531 ulp_encap_buffer_copy(buff,
1532 eth_spec->dst.addr_bytes,
1533 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1534 ULP_BUFFER_ALIGN_8_BYTE);
1536 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1537 ulp_encap_buffer_copy(buff,
1538 eth_spec->src.addr_bytes,
1539 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1540 ULP_BUFFER_ALIGN_8_BYTE);
1542 /* Goto the next item */
1543 if (!ulp_rte_item_skip_void(&item, 1))
1544 return BNXT_TF_RC_ERROR;
1546 /* May have vlan header */
1547 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1549 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1550 ulp_encap_buffer_copy(buff,
1552 sizeof(struct rte_vlan_hdr),
1553 ULP_BUFFER_ALIGN_8_BYTE);
1555 if (!ulp_rte_item_skip_void(&item, 1))
1556 return BNXT_TF_RC_ERROR;
1559 /* may have two vlan headers */
1560 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1562 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1563 sizeof(struct rte_vlan_hdr)],
1565 sizeof(struct rte_vlan_hdr));
1566 if (!ulp_rte_item_skip_void(&item, 1))
1567 return BNXT_TF_RC_ERROR;
1569 /* Update the vlan count and size of more than one */
1571 vlan_size = vlan_num * sizeof(struct rte_vlan_hdr);
1572 vlan_num = tfp_cpu_to_be_32(vlan_num);
1573 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1576 vlan_size = tfp_cpu_to_be_32(vlan_size);
1577 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1582 /* L3 must be IPv4, IPv6 */
1583 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1584 ipv4_spec = item->spec;
1585 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1587 /* copy the ipv4 details */
1588 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1589 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1590 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1591 ulp_encap_buffer_copy(buff,
1593 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1594 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1595 ULP_BUFFER_ALIGN_8_BYTE);
1597 /* Total length being ignored in the ip hdr. */
1598 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1599 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1600 ulp_encap_buffer_copy(buff,
1602 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1603 ULP_BUFFER_ALIGN_8_BYTE);
1604 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1605 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1606 ulp_encap_buffer_copy(buff,
1607 &ipv4_spec->hdr.version_ihl,
1608 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1609 ULP_BUFFER_ALIGN_8_BYTE);
1612 /* Update the dst ip address in ip encap buffer */
1613 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1614 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1615 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1616 ulp_encap_buffer_copy(buff,
1617 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1618 sizeof(ipv4_spec->hdr.dst_addr),
1619 ULP_BUFFER_ALIGN_8_BYTE);
1621 /* Update the src ip address */
1622 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1623 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1624 sizeof(ipv4_spec->hdr.src_addr)];
1625 ulp_encap_buffer_copy(buff,
1626 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1627 sizeof(ipv4_spec->hdr.src_addr),
1628 ULP_BUFFER_ALIGN_8_BYTE);
1630 /* Update the ip size details */
1631 ip_size = tfp_cpu_to_be_32(ip_size);
1632 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1633 &ip_size, sizeof(uint32_t));
1635 /* update the ip type */
1636 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1637 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1638 &ip_type, sizeof(uint32_t));
1640 /* update the computed field to notify it is ipv4 header */
1641 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1644 if (!ulp_rte_item_skip_void(&item, 1))
1645 return BNXT_TF_RC_ERROR;
1646 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1647 ipv6_spec = item->spec;
1648 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1650 /* copy the ipv6 details */
1651 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1652 if (ulp_buffer_is_empty(tmp_buff,
1653 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1654 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1655 ulp_encap_buffer_copy(buff,
1657 sizeof(def_ipv6_hdr),
1658 ULP_BUFFER_ALIGN_8_BYTE);
1660 /* The payload length being ignored in the ip hdr. */
1661 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1662 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1663 ulp_encap_buffer_copy(buff,
1665 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1666 ULP_BUFFER_ALIGN_8_BYTE);
1667 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1668 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1669 BNXT_ULP_ENCAP_IPV6_DO];
1670 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1671 ulp_encap_buffer_copy(buff,
1673 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1674 ULP_BUFFER_ALIGN_8_BYTE);
1676 /* Update the dst ip address in ip encap buffer */
1677 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1678 sizeof(def_ipv6_hdr)];
1679 ulp_encap_buffer_copy(buff,
1680 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1681 sizeof(ipv6_spec->hdr.dst_addr),
1682 ULP_BUFFER_ALIGN_8_BYTE);
1684 /* Update the src ip address */
1685 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1686 ulp_encap_buffer_copy(buff,
1687 (const uint8_t *)ipv6_spec->hdr.src_addr,
1688 sizeof(ipv6_spec->hdr.src_addr),
1689 ULP_BUFFER_ALIGN_16_BYTE);
1691 /* Update the ip size details */
1692 ip_size = tfp_cpu_to_be_32(ip_size);
1693 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1694 &ip_size, sizeof(uint32_t));
1696 /* update the ip type */
1697 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1698 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1699 &ip_type, sizeof(uint32_t));
1701 /* update the computed field to notify it is ipv6 header */
1702 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1705 if (!ulp_rte_item_skip_void(&item, 1))
1706 return BNXT_TF_RC_ERROR;
1708 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1709 return BNXT_TF_RC_ERROR;
1713 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1714 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1715 return BNXT_TF_RC_ERROR;
1717 /* copy the udp details */
1718 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1719 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1720 ULP_BUFFER_ALIGN_8_BYTE);
1722 if (!ulp_rte_item_skip_void(&item, 1))
1723 return BNXT_TF_RC_ERROR;
1726 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1727 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1728 return BNXT_TF_RC_ERROR;
1730 vxlan_size = sizeof(struct rte_vxlan_hdr);
1731 /* copy the vxlan details */
1732 memcpy(&vxlan_spec, item->spec, vxlan_size);
1733 vxlan_spec.flags = 0x08;
1734 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1735 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1736 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1737 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1739 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1740 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1741 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1742 (const uint8_t *)&vxlan_spec.vni,
1743 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1745 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1746 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1747 &vxlan_size, sizeof(uint32_t));
1749 /* update the hdr_bitmap with vxlan */
1750 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1751 return BNXT_TF_RC_SUCCESS;
1754 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1756 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1758 struct ulp_rte_parser_params *params)
1760 /* update the hdr_bitmap with vxlan */
1761 ULP_BITMAP_SET(params->act_bitmap.bits,
1762 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1763 /* Update computational field with tunnel decap info */
1764 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1765 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1766 return BNXT_TF_RC_SUCCESS;
1769 /* Function to handle the parsing of RTE Flow action drop Header. */
1771 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1772 struct ulp_rte_parser_params *params)
1774 /* Update the hdr_bitmap with drop */
1775 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1776 return BNXT_TF_RC_SUCCESS;
1779 /* Function to handle the parsing of RTE Flow action count. */
1781 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1782 struct ulp_rte_parser_params *params)
1785 const struct rte_flow_action_count *act_count;
1786 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1788 act_count = action_item->conf;
1790 if (act_count->shared) {
1792 "Parse Error:Shared count not supported\n");
1793 return BNXT_TF_RC_PARSE_ERR;
1795 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1797 BNXT_ULP_ACT_PROP_SZ_COUNT);
1800 /* Update the hdr_bitmap with count */
1801 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1802 return BNXT_TF_RC_SUCCESS;
1805 /* Function to handle the parsing of action ports. */
1807 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1810 enum bnxt_ulp_direction_type dir;
1813 struct ulp_rte_act_prop *act = ¶m->act_prop;
1814 enum bnxt_ulp_intf_type port_type;
1817 /* Get the direction */
1818 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1819 if (dir == BNXT_ULP_DIR_EGRESS) {
1820 /* For egress direction, fill vport */
1821 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1822 return BNXT_TF_RC_ERROR;
1825 pid = rte_cpu_to_be_32(pid);
1826 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1827 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1829 /* For ingress direction, fill vnic */
1830 port_type = ULP_COMP_FLD_IDX_RD(param,
1831 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1832 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1833 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1835 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1837 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1839 return BNXT_TF_RC_ERROR;
1842 pid = rte_cpu_to_be_32(pid);
1843 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1844 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1847 /* Update the action port set bit */
1848 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1849 return BNXT_TF_RC_SUCCESS;
1852 /* Function to handle the parsing of RTE Flow action PF. */
1854 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1855 struct ulp_rte_parser_params *params)
1859 enum bnxt_ulp_intf_type intf_type;
1861 /* Get the port id of the current device */
1862 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1864 /* Get the port db ifindex */
1865 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1867 BNXT_TF_DBG(ERR, "Invalid port id\n");
1868 return BNXT_TF_RC_ERROR;
1871 /* Check the port is PF port */
1872 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1873 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1874 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1875 return BNXT_TF_RC_ERROR;
1877 /* Update the action properties */
1878 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1879 return ulp_rte_parser_act_port_set(params, ifindex);
1882 /* Function to handle the parsing of RTE Flow action VF. */
1884 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1885 struct ulp_rte_parser_params *params)
1887 const struct rte_flow_action_vf *vf_action;
1889 enum bnxt_ulp_intf_type intf_type;
1891 vf_action = action_item->conf;
1893 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1894 return BNXT_TF_RC_PARSE_ERR;
1897 if (vf_action->original) {
1898 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1899 return BNXT_TF_RC_PARSE_ERR;
1902 /* Check the port is VF port */
1903 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1905 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1906 return BNXT_TF_RC_ERROR;
1908 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1909 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1910 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1911 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1912 return BNXT_TF_RC_ERROR;
1915 /* Update the action properties */
1916 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1917 return ulp_rte_parser_act_port_set(params, ifindex);
1920 /* Function to handle the parsing of RTE Flow action port_id. */
1922 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1923 struct ulp_rte_parser_params *param)
1925 const struct rte_flow_action_port_id *port_id = act_item->conf;
1927 enum bnxt_ulp_intf_type intf_type;
1931 "ParseErr: Invalid Argument\n");
1932 return BNXT_TF_RC_PARSE_ERR;
1934 if (port_id->original) {
1936 "ParseErr:Portid Original not supported\n");
1937 return BNXT_TF_RC_PARSE_ERR;
1940 /* Get the port db ifindex */
1941 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1943 BNXT_TF_DBG(ERR, "Invalid port id\n");
1944 return BNXT_TF_RC_ERROR;
1947 /* Get the intf type */
1948 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1950 BNXT_TF_DBG(ERR, "Invalid port type\n");
1951 return BNXT_TF_RC_ERROR;
1954 /* Set the action port */
1955 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1956 return ulp_rte_parser_act_port_set(param, ifindex);
1959 /* Function to handle the parsing of RTE Flow action phy_port. */
1961 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1962 struct ulp_rte_parser_params *prm)
1964 const struct rte_flow_action_phy_port *phy_port;
1968 enum bnxt_ulp_direction_type dir;
1970 phy_port = action_item->conf;
1973 "ParseErr: Invalid Argument\n");
1974 return BNXT_TF_RC_PARSE_ERR;
1977 if (phy_port->original) {
1979 "Parse Err:Port Original not supported\n");
1980 return BNXT_TF_RC_PARSE_ERR;
1982 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1983 if (dir != BNXT_ULP_DIR_EGRESS) {
1985 "Parse Err:Phy ports are valid only for egress\n");
1986 return BNXT_TF_RC_PARSE_ERR;
1988 /* Get the physical port details from port db */
1989 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1992 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1997 pid = rte_cpu_to_be_32(pid);
1998 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1999 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2001 /* Update the action port set bit */
2002 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2003 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2004 BNXT_ULP_INTF_TYPE_PHY_PORT);
2005 return BNXT_TF_RC_SUCCESS;
2008 /* Function to handle the parsing of RTE Flow action pop vlan. */
2010 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2011 struct ulp_rte_parser_params *params)
2013 /* Update the act_bitmap with pop */
2014 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2015 return BNXT_TF_RC_SUCCESS;
2018 /* Function to handle the parsing of RTE Flow action push vlan. */
2020 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2021 struct ulp_rte_parser_params *params)
2023 const struct rte_flow_action_of_push_vlan *push_vlan;
2025 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2027 push_vlan = action_item->conf;
2029 ethertype = push_vlan->ethertype;
2030 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2032 "Parse Err: Ethertype not supported\n");
2033 return BNXT_TF_RC_PARSE_ERR;
2035 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2036 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2037 /* Update the hdr_bitmap with push vlan */
2038 ULP_BITMAP_SET(params->act_bitmap.bits,
2039 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2040 return BNXT_TF_RC_SUCCESS;
2042 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2043 return BNXT_TF_RC_ERROR;
2046 /* Function to handle the parsing of RTE Flow action set vlan id. */
2048 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2049 struct ulp_rte_parser_params *params)
2051 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2053 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2055 vlan_vid = action_item->conf;
2056 if (vlan_vid && vlan_vid->vlan_vid) {
2057 vid = vlan_vid->vlan_vid;
2058 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2059 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2060 /* Update the hdr_bitmap with vlan vid */
2061 ULP_BITMAP_SET(params->act_bitmap.bits,
2062 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2063 return BNXT_TF_RC_SUCCESS;
2065 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2066 return BNXT_TF_RC_ERROR;
2069 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2071 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2072 struct ulp_rte_parser_params *params)
2074 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2076 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2078 vlan_pcp = action_item->conf;
2080 pcp = vlan_pcp->vlan_pcp;
2081 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2082 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2083 /* Update the hdr_bitmap with vlan vid */
2084 ULP_BITMAP_SET(params->act_bitmap.bits,
2085 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2086 return BNXT_TF_RC_SUCCESS;
2088 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2089 return BNXT_TF_RC_ERROR;
2092 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2094 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2095 struct ulp_rte_parser_params *params)
2097 const struct rte_flow_action_set_ipv4 *set_ipv4;
2098 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2100 set_ipv4 = action_item->conf;
2102 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2103 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2104 /* Update the hdr_bitmap with set ipv4 src */
2105 ULP_BITMAP_SET(params->act_bitmap.bits,
2106 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2107 return BNXT_TF_RC_SUCCESS;
2109 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2110 return BNXT_TF_RC_ERROR;
2113 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2115 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2116 struct ulp_rte_parser_params *params)
2118 const struct rte_flow_action_set_ipv4 *set_ipv4;
2119 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2121 set_ipv4 = action_item->conf;
2123 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2124 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2125 /* Update the hdr_bitmap with set ipv4 dst */
2126 ULP_BITMAP_SET(params->act_bitmap.bits,
2127 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2128 return BNXT_TF_RC_SUCCESS;
2130 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2131 return BNXT_TF_RC_ERROR;
2134 /* Function to handle the parsing of RTE Flow action set tp src.*/
2136 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2137 struct ulp_rte_parser_params *params)
2139 const struct rte_flow_action_set_tp *set_tp;
2140 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2142 set_tp = action_item->conf;
2144 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2145 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2146 /* Update the hdr_bitmap with set tp src */
2147 ULP_BITMAP_SET(params->act_bitmap.bits,
2148 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2149 return BNXT_TF_RC_SUCCESS;
2152 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2153 return BNXT_TF_RC_ERROR;
2156 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2158 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2159 struct ulp_rte_parser_params *params)
2161 const struct rte_flow_action_set_tp *set_tp;
2162 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2164 set_tp = action_item->conf;
2166 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2167 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2168 /* Update the hdr_bitmap with set tp dst */
2169 ULP_BITMAP_SET(params->act_bitmap.bits,
2170 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2171 return BNXT_TF_RC_SUCCESS;
2174 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2175 return BNXT_TF_RC_ERROR;
2178 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2180 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2181 struct ulp_rte_parser_params *params)
2183 /* Update the act_bitmap with dec ttl */
2184 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2185 return BNXT_TF_RC_SUCCESS;
2188 /* Function to handle the parsing of RTE Flow action JUMP */
2190 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2191 struct ulp_rte_parser_params *params)
2193 /* Update the act_bitmap with dec ttl */
2194 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2195 return BNXT_TF_RC_SUCCESS;