1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to update the field_bitmap */
45 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
47 enum bnxt_ulp_prsr_action prsr_act)
49 struct ulp_rte_hdr_field *field;
51 field = ¶ms->hdr_field[idx];
52 if (ulp_bitmap_notzero(field->mask, field->size)) {
53 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
54 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
55 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
57 if (!ulp_bitmap_is_ones(field->mask, field->size))
58 ULP_COMP_FLD_IDX_WR(params,
59 BNXT_ULP_CF_IDX_WC_MATCH, 1);
61 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
65 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
66 /* Utility function to copy field spec and masks items */
68 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
71 const void *spec_buff,
72 const void *mask_buff,
73 enum bnxt_ulp_prsr_action prsr_act)
75 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
77 /* update the field size */
80 /* copy the mask specifications only if mask is not null */
81 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
82 memcpy(field->mask, mask_buff, size);
83 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
86 /* copy the protocol specifications only if mask is not null*/
87 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
88 memcpy(field->spec, spec_buff, size);
90 /* Increment the index */
94 /* Utility function to copy field spec and masks items */
96 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
100 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
101 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
104 *idx = params->field_idx;
105 params->field_idx += size;
110 * Function to handle the parsing of RTE Flows and placing
111 * the RTE flow items into the ulp structures.
114 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
115 struct ulp_rte_parser_params *params)
117 const struct rte_flow_item *item = pattern;
118 struct bnxt_ulp_rte_hdr_info *hdr_info;
120 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
122 /* Set the computed flags for no vlan tags before parsing */
123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
124 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
126 /* Parse all the items in the pattern */
127 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
128 /* get the header information from the flow_hdr_info table */
129 hdr_info = &ulp_hdr_info[item->type];
130 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
132 "Truflow parser does not support type %d\n",
134 return BNXT_TF_RC_PARSE_ERR;
135 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
136 /* call the registered callback handler */
137 if (hdr_info->proto_hdr_func) {
138 if (hdr_info->proto_hdr_func(item, params) !=
139 BNXT_TF_RC_SUCCESS) {
140 return BNXT_TF_RC_ERROR;
146 /* update the implied SVIF */
147 return ulp_rte_parser_implicit_match_port_process(params);
151 * Function to handle the parsing of RTE Flows and placing
152 * the RTE flow actions into the ulp structures.
155 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
156 struct ulp_rte_parser_params *params)
158 const struct rte_flow_action *action_item = actions;
159 struct bnxt_ulp_rte_act_info *hdr_info;
161 /* Parse all the items in the pattern */
162 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
163 /* get the header information from the flow_hdr_info table */
164 hdr_info = &ulp_act_info[action_item->type];
165 if (hdr_info->act_type ==
166 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
168 "Truflow parser does not support act %u\n",
170 return BNXT_TF_RC_ERROR;
171 } else if (hdr_info->act_type ==
172 BNXT_ULP_ACT_TYPE_SUPPORTED) {
173 /* call the registered callback handler */
174 if (hdr_info->proto_act_func) {
175 if (hdr_info->proto_act_func(action_item,
177 BNXT_TF_RC_SUCCESS) {
178 return BNXT_TF_RC_ERROR;
184 /* update the implied port details */
185 ulp_rte_parser_implicit_act_port_process(params);
186 return BNXT_TF_RC_SUCCESS;
190 * Function to handle the post processing of the computed
191 * fields for the interface.
194 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
197 uint16_t port_id, parif;
199 enum bnxt_ulp_direction_type dir;
201 /* get the direction details */
202 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
204 /* read the port id details */
205 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
206 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
209 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
213 if (dir == BNXT_ULP_DIR_INGRESS) {
215 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
216 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
217 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
220 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
223 /* Get the match port type */
224 mtype = ULP_COMP_FLD_IDX_RD(params,
225 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
226 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
227 ULP_COMP_FLD_IDX_WR(params,
228 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
230 /* Set VF func PARIF */
231 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
232 BNXT_ULP_VF_FUNC_PARIF,
235 "ParseErr:ifindex is not valid\n");
238 ULP_COMP_FLD_IDX_WR(params,
239 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
243 /* Set DRV func PARIF */
244 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
245 BNXT_ULP_DRV_FUNC_PARIF,
248 "ParseErr:ifindex is not valid\n");
251 ULP_COMP_FLD_IDX_WR(params,
252 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
255 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
256 ULP_COMP_FLD_IDX_WR(params,
257 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
264 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
266 enum bnxt_ulp_intf_type match_port_type, act_port_type;
267 enum bnxt_ulp_direction_type dir;
268 uint32_t act_port_set;
270 /* Get the computed details */
271 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
272 match_port_type = ULP_COMP_FLD_IDX_RD(params,
273 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
274 act_port_type = ULP_COMP_FLD_IDX_RD(params,
275 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
276 act_port_set = ULP_COMP_FLD_IDX_RD(params,
277 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
279 /* set the flow direction in the proto and action header */
280 if (dir == BNXT_ULP_DIR_EGRESS) {
281 ULP_BITMAP_SET(params->hdr_bitmap.bits,
282 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
283 ULP_BITMAP_SET(params->act_bitmap.bits,
284 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
287 /* calculate the VF to VF flag */
288 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
289 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
290 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
292 /* Update the decrement ttl computational fields */
293 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
294 BNXT_ULP_ACT_BIT_DEC_TTL)) {
296 * Check that vxlan proto is included and vxlan decap
297 * action is not set then decrement tunnel ttl.
298 * Similarly add GRE and NVGRE in future.
300 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
301 BNXT_ULP_HDR_BIT_T_VXLAN) &&
302 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
303 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
304 ULP_COMP_FLD_IDX_WR(params,
305 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
307 ULP_COMP_FLD_IDX_WR(params,
308 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
312 /* Merge the hdr_fp_bit into the proto header bit */
313 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
315 /* Update the comp fld fid */
316 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
318 /* Update the computed interface parameters */
319 bnxt_ulp_comp_fld_intf_update(params);
321 /* TBD: Handle the flow rejection scenarios */
326 * Function to handle the post processing of the parsing details
329 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
331 ulp_post_process_normal_flow(params);
332 return ulp_post_process_tun_flow(params);
336 * Function to compute the flow direction based on the match port details
339 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
341 enum bnxt_ulp_intf_type match_port_type;
343 /* Get the match port type */
344 match_port_type = ULP_COMP_FLD_IDX_RD(params,
345 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
347 /* If ingress flow and matchport is vf rep then dir is egress*/
348 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
349 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
350 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
351 BNXT_ULP_DIR_EGRESS);
353 /* Assign the input direction */
354 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
355 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
356 BNXT_ULP_DIR_INGRESS);
358 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
359 BNXT_ULP_DIR_EGRESS);
363 /* Function to handle the parsing of RTE Flow item PF Header. */
365 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
370 enum bnxt_ulp_direction_type dir;
371 struct ulp_rte_hdr_field *hdr_field;
372 enum bnxt_ulp_svif_type svif_type;
373 enum bnxt_ulp_intf_type port_type;
375 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
376 BNXT_ULP_INVALID_SVIF_VAL) {
378 "SVIF already set,multiple source not support'd\n");
379 return BNXT_TF_RC_ERROR;
382 /* Get port type details */
383 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
384 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
385 BNXT_TF_DBG(ERR, "Invalid port type\n");
386 return BNXT_TF_RC_ERROR;
389 /* Update the match port type */
390 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
392 /* compute the direction */
393 bnxt_ulp_rte_parser_direction_compute(params);
395 /* Get the computed direction */
396 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
397 if (dir == BNXT_ULP_DIR_INGRESS) {
398 svif_type = BNXT_ULP_PHY_PORT_SVIF;
400 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
401 svif_type = BNXT_ULP_VF_FUNC_SVIF;
403 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
405 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
407 svif = rte_cpu_to_be_16(svif);
408 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
409 memcpy(hdr_field->spec, &svif, sizeof(svif));
410 memcpy(hdr_field->mask, &mask, sizeof(mask));
411 hdr_field->size = sizeof(svif);
412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
413 rte_be_to_cpu_16(svif));
414 return BNXT_TF_RC_SUCCESS;
417 /* Function to handle the parsing of the RTE port id */
419 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
421 uint16_t port_id = 0;
422 uint16_t svif_mask = 0xFFFF;
424 int32_t rc = BNXT_TF_RC_ERROR;
426 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
427 BNXT_ULP_INVALID_SVIF_VAL)
428 return BNXT_TF_RC_SUCCESS;
430 /* SVIF not set. So get the port id */
431 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
433 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
436 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
440 /* Update the SVIF details */
441 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
445 /* Function to handle the implicit action port id */
447 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
449 struct rte_flow_action action_item = {0};
450 struct rte_flow_action_port_id port_id = {0};
452 /* Read the action port set bit */
453 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
454 /* Already set, so just exit */
455 return BNXT_TF_RC_SUCCESS;
457 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
458 action_item.conf = &port_id;
460 /* Update the action port based on incoming port */
461 ulp_rte_port_id_act_handler(&action_item, params);
463 /* Reset the action port set bit */
464 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
465 return BNXT_TF_RC_SUCCESS;
468 /* Function to handle the parsing of RTE Flow item PF Header. */
470 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
471 struct ulp_rte_parser_params *params)
473 uint16_t port_id = 0;
474 uint16_t svif_mask = 0xFFFF;
477 /* Get the implicit port id */
478 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
480 /* perform the conversion from dpdk port to bnxt ifindex */
481 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
484 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
485 return BNXT_TF_RC_ERROR;
488 /* Update the SVIF details */
489 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
492 /* Function to handle the parsing of RTE Flow item VF Header. */
494 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
495 struct ulp_rte_parser_params *params)
497 const struct rte_flow_item_vf *vf_spec = item->spec;
498 const struct rte_flow_item_vf *vf_mask = item->mask;
501 int32_t rc = BNXT_TF_RC_PARSE_ERR;
503 /* Get VF rte_flow_item for Port details */
505 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
509 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
514 /* perform the conversion from VF Func id to bnxt ifindex */
515 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
518 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521 /* Update the SVIF details */
522 return ulp_rte_parser_svif_set(params, ifindex, mask);
525 /* Function to handle the parsing of RTE Flow item port id Header. */
527 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
528 struct ulp_rte_parser_params *params)
530 const struct rte_flow_item_port_id *port_spec = item->spec;
531 const struct rte_flow_item_port_id *port_mask = item->mask;
533 int32_t rc = BNXT_TF_RC_PARSE_ERR;
537 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
541 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
544 mask = port_mask->id;
546 /* perform the conversion from dpdk port to bnxt ifindex */
547 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
550 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
553 /* Update the SVIF details */
554 return ulp_rte_parser_svif_set(params, ifindex, mask);
557 /* Function to handle the parsing of RTE Flow item phy port Header. */
559 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
560 struct ulp_rte_parser_params *params)
562 const struct rte_flow_item_phy_port *port_spec = item->spec;
563 const struct rte_flow_item_phy_port *port_mask = item->mask;
565 int32_t rc = BNXT_TF_RC_ERROR;
567 enum bnxt_ulp_direction_type dir;
568 struct ulp_rte_hdr_field *hdr_field;
570 /* Copy the rte_flow_item for phy port into hdr_field */
572 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
576 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
579 mask = port_mask->index;
581 /* Update the match port type */
582 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
583 BNXT_ULP_INTF_TYPE_PHY_PORT);
585 /* Compute the Hw direction */
586 bnxt_ulp_rte_parser_direction_compute(params);
588 /* Direction validation */
589 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
590 if (dir == BNXT_ULP_DIR_EGRESS) {
592 "Parse Err:Phy ports are valid only for ingress\n");
593 return BNXT_TF_RC_PARSE_ERR;
596 /* Get the physical port details from port db */
597 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
600 BNXT_TF_DBG(ERR, "Failed to get port details\n");
601 return BNXT_TF_RC_PARSE_ERR;
604 /* Update the SVIF details */
605 svif = rte_cpu_to_be_16(svif);
606 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
607 memcpy(hdr_field->spec, &svif, sizeof(svif));
608 memcpy(hdr_field->mask, &mask, sizeof(mask));
609 hdr_field->size = sizeof(svif);
610 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
611 rte_be_to_cpu_16(svif));
612 return BNXT_TF_RC_SUCCESS;
615 /* Function to handle the update of proto header based on field values */
617 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
618 uint16_t type, uint32_t in_flag)
620 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
622 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623 BNXT_ULP_HDR_BIT_I_IPV4);
624 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
626 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
627 BNXT_ULP_HDR_BIT_O_IPV4);
628 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
630 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
632 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
633 BNXT_ULP_HDR_BIT_I_IPV6);
634 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
636 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
637 BNXT_ULP_HDR_BIT_O_IPV6);
638 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
643 /* Internal Function to identify broadcast or multicast packets */
645 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
647 if (rte_is_multicast_ether_addr(eth_addr) ||
648 rte_is_broadcast_ether_addr(eth_addr)) {
650 "No support for bcast or mcast addr offload\n");
656 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
658 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
659 struct ulp_rte_parser_params *params)
661 const struct rte_flow_item_eth *eth_spec = item->spec;
662 const struct rte_flow_item_eth *eth_mask = item->mask;
665 uint16_t eth_type = 0;
666 uint32_t inner_flag = 0;
668 /* Perform validations */
670 /* Todo: work around to avoid multicast and broadcast addr */
671 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
672 return BNXT_TF_RC_PARSE_ERR;
674 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
675 return BNXT_TF_RC_PARSE_ERR;
677 eth_type = eth_spec->type;
680 if (ulp_rte_prsr_fld_size_validate(params, &idx,
681 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
682 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
683 return BNXT_TF_RC_ERROR;
686 * Copy the rte_flow_item for eth into hdr_field using ethernet
689 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
690 ulp_rte_prsr_fld_mask(params, &idx, size,
691 ulp_deference_struct(eth_spec, dst.addr_bytes),
692 ulp_deference_struct(eth_mask, dst.addr_bytes),
693 ULP_PRSR_ACT_DEFAULT);
695 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
696 ulp_rte_prsr_fld_mask(params, &idx, size,
697 ulp_deference_struct(eth_spec, src.addr_bytes),
698 ulp_deference_struct(eth_mask, src.addr_bytes),
699 ULP_PRSR_ACT_DEFAULT);
701 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
702 ulp_rte_prsr_fld_mask(params, &idx, size,
703 ulp_deference_struct(eth_spec, type),
704 ulp_deference_struct(eth_mask, type),
705 ULP_PRSR_ACT_MATCH_IGNORE);
707 /* Update the protocol hdr bitmap */
708 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
709 BNXT_ULP_HDR_BIT_O_ETH) ||
710 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
711 BNXT_ULP_HDR_BIT_O_IPV4) ||
712 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
713 BNXT_ULP_HDR_BIT_O_IPV6) ||
714 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
715 BNXT_ULP_HDR_BIT_O_UDP) ||
716 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
717 BNXT_ULP_HDR_BIT_O_TCP)) {
718 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
721 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
723 /* Update the field protocol hdr bitmap */
724 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
726 return BNXT_TF_RC_SUCCESS;
729 /* Function to handle the parsing of RTE Flow item Vlan Header. */
731 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
732 struct ulp_rte_parser_params *params)
734 const struct rte_flow_item_vlan *vlan_spec = item->spec;
735 const struct rte_flow_item_vlan *vlan_mask = item->mask;
736 struct ulp_rte_hdr_bitmap *hdr_bit;
738 uint16_t vlan_tag = 0, priority = 0;
739 uint16_t vlan_tag_mask = 0, priority_mask = 0;
740 uint32_t outer_vtag_num;
741 uint32_t inner_vtag_num;
742 uint16_t eth_type = 0;
743 uint32_t inner_flag = 0;
747 vlan_tag = ntohs(vlan_spec->tci);
748 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
749 vlan_tag &= ULP_VLAN_TAG_MASK;
750 vlan_tag = htons(vlan_tag);
751 eth_type = vlan_spec->inner_type;
755 vlan_tag_mask = ntohs(vlan_mask->tci);
756 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
757 vlan_tag_mask &= 0xfff;
760 * the storage for priority and vlan tag is 2 bytes
761 * The mask of priority which is 3 bits if it is all 1's
762 * then make the rest bits 13 bits as 1's
763 * so that it is matched as exact match.
765 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
766 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
767 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
768 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
769 vlan_tag_mask = htons(vlan_tag_mask);
772 if (ulp_rte_prsr_fld_size_validate(params, &idx,
773 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
774 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
775 return BNXT_TF_RC_ERROR;
779 * Copy the rte_flow_item for vlan into hdr_field using Vlan
782 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
784 * The priority field is ignored since OVS is setting it as
785 * wild card match and it is not supported. This is a work
786 * around and shall be addressed in the future.
788 ulp_rte_prsr_fld_mask(params, &idx, size,
791 ULP_PRSR_ACT_MASK_IGNORE);
793 ulp_rte_prsr_fld_mask(params, &idx, size,
796 ULP_PRSR_ACT_DEFAULT);
798 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
799 ulp_rte_prsr_fld_mask(params, &idx, size,
800 ulp_deference_struct(vlan_spec, inner_type),
801 ulp_deference_struct(vlan_mask, inner_type),
802 ULP_PRSR_ACT_MATCH_IGNORE);
804 /* Get the outer tag and inner tag counts */
805 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
806 BNXT_ULP_CF_IDX_O_VTAG_NUM);
807 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
808 BNXT_ULP_CF_IDX_I_VTAG_NUM);
810 /* Update the hdr_bitmap of the vlans */
811 hdr_bit = ¶ms->hdr_bitmap;
812 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
815 /* Update the vlan tag num */
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
821 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822 BNXT_ULP_HDR_BIT_OO_VLAN);
823 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
824 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
825 outer_vtag_num == 1) {
826 /* update the vlan tag num */
828 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
830 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
831 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
832 ULP_BITMAP_SET(params->hdr_bitmap.bits,
833 BNXT_ULP_HDR_BIT_OI_VLAN);
834 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
835 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
837 /* update the vlan tag num */
839 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
841 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
842 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
843 ULP_BITMAP_SET(params->hdr_bitmap.bits,
844 BNXT_ULP_HDR_BIT_IO_VLAN);
846 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
847 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
848 inner_vtag_num == 1) {
849 /* update the vlan tag num */
851 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
853 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
854 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
855 ULP_BITMAP_SET(params->hdr_bitmap.bits,
856 BNXT_ULP_HDR_BIT_II_VLAN);
859 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
860 return BNXT_TF_RC_ERROR;
862 /* Update the field protocol hdr bitmap */
863 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
864 return BNXT_TF_RC_SUCCESS;
867 /* Function to handle the update of proto header based on field values */
869 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
870 uint8_t proto, uint32_t in_flag)
872 if (proto == IPPROTO_UDP) {
874 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
875 BNXT_ULP_HDR_BIT_I_UDP);
876 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
878 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
879 BNXT_ULP_HDR_BIT_O_UDP);
880 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
882 } else if (proto == IPPROTO_TCP) {
884 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
885 BNXT_ULP_HDR_BIT_I_TCP);
886 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
888 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
889 BNXT_ULP_HDR_BIT_O_TCP);
890 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
892 } else if (proto == IPPROTO_GRE) {
893 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
894 } else if (proto == IPPROTO_ICMP) {
895 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
896 ULP_BITMAP_SET(param->hdr_bitmap.bits,
897 BNXT_ULP_HDR_BIT_I_ICMP);
899 ULP_BITMAP_SET(param->hdr_bitmap.bits,
900 BNXT_ULP_HDR_BIT_O_ICMP);
904 ULP_COMP_FLD_IDX_WR(param,
905 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
907 ULP_COMP_FLD_IDX_WR(param,
908 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
911 ULP_COMP_FLD_IDX_WR(param,
912 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
914 ULP_COMP_FLD_IDX_WR(param,
915 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
921 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
923 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
924 struct ulp_rte_parser_params *params)
926 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
927 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
928 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
932 uint32_t inner_flag = 0;
935 /* validate there are no 3rd L3 header */
936 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
938 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
939 return BNXT_TF_RC_ERROR;
942 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
943 BNXT_ULP_HDR_BIT_O_ETH) &&
944 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
945 BNXT_ULP_HDR_BIT_I_ETH)) {
946 /* Since F2 flow does not include eth item, when parser detects
947 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
948 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
949 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
950 * This will allow the parser post processor to update the
951 * t_dmac in hdr_field[o_eth.dmac]
953 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
954 BNXT_ULP_PROTO_HDR_VLAN_NUM);
955 params->field_idx = idx;
958 if (ulp_rte_prsr_fld_size_validate(params, &idx,
959 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
960 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
961 return BNXT_TF_RC_ERROR;
965 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
968 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
969 ulp_rte_prsr_fld_mask(params, &idx, size,
970 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
971 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
972 ULP_PRSR_ACT_DEFAULT);
975 * The tos field is ignored since OVS is setting it as wild card
976 * match and it is not supported. This is a work around and
977 * shall be addressed in the future.
979 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
980 ulp_rte_prsr_fld_mask(params, &idx, size,
981 ulp_deference_struct(ipv4_spec,
982 hdr.type_of_service),
983 ulp_deference_struct(ipv4_mask,
984 hdr.type_of_service),
985 ULP_PRSR_ACT_MASK_IGNORE);
987 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
988 ulp_rte_prsr_fld_mask(params, &idx, size,
989 ulp_deference_struct(ipv4_spec, hdr.total_length),
990 ulp_deference_struct(ipv4_mask, hdr.total_length),
991 ULP_PRSR_ACT_DEFAULT);
993 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
994 ulp_rte_prsr_fld_mask(params, &idx, size,
995 ulp_deference_struct(ipv4_spec, hdr.packet_id),
996 ulp_deference_struct(ipv4_mask, hdr.packet_id),
997 ULP_PRSR_ACT_DEFAULT);
999 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1000 ulp_rte_prsr_fld_mask(params, &idx, size,
1001 ulp_deference_struct(ipv4_spec,
1002 hdr.fragment_offset),
1003 ulp_deference_struct(ipv4_mask,
1004 hdr.fragment_offset),
1005 ULP_PRSR_ACT_DEFAULT);
1007 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1008 ulp_rte_prsr_fld_mask(params, &idx, size,
1009 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1010 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1011 ULP_PRSR_ACT_DEFAULT);
1013 /* Ignore proto for matching templates */
1014 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1015 ulp_rte_prsr_fld_mask(params, &idx, size,
1016 ulp_deference_struct(ipv4_spec,
1018 ulp_deference_struct(ipv4_mask,
1020 ULP_PRSR_ACT_MATCH_IGNORE);
1022 proto = ipv4_spec->hdr.next_proto_id;
1024 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1025 ulp_rte_prsr_fld_mask(params, &idx, size,
1026 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1027 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1028 ULP_PRSR_ACT_DEFAULT);
1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1031 ulp_rte_prsr_fld_mask(params, &idx, size,
1032 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1033 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1034 ULP_PRSR_ACT_DEFAULT);
1036 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1037 ulp_rte_prsr_fld_mask(params, &idx, size,
1038 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1039 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1040 ULP_PRSR_ACT_DEFAULT);
1042 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1043 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1044 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1045 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1046 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1049 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1050 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1053 /* Some of the PMD applications may set the protocol field
1054 * in the IPv4 spec but don't set the mask. So, consider
1055 * the mask in the proto value calculation.
1058 proto &= ipv4_mask->hdr.next_proto_id;
1060 /* Update the field protocol hdr bitmap */
1061 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1062 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1063 return BNXT_TF_RC_SUCCESS;
1066 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1068 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1069 struct ulp_rte_parser_params *params)
1071 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1072 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1073 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1076 uint32_t ver_spec = 0, ver_mask = 0;
1077 uint32_t tc_spec = 0, tc_mask = 0;
1078 uint32_t lab_spec = 0, lab_mask = 0;
1080 uint32_t inner_flag = 0;
1083 /* validate there are no 3rd L3 header */
1084 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1086 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1087 return BNXT_TF_RC_ERROR;
1090 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1091 BNXT_ULP_HDR_BIT_O_ETH) &&
1092 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1093 BNXT_ULP_HDR_BIT_I_ETH)) {
1094 /* Since F2 flow does not include eth item, when parser detects
1095 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1096 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1097 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1098 * This will allow the parser post processor to update the
1099 * t_dmac in hdr_field[o_eth.dmac]
1101 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1102 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1103 params->field_idx = idx;
1106 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1107 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1108 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1109 return BNXT_TF_RC_ERROR;
1113 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1117 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1118 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1119 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1120 proto = ipv6_spec->hdr.proto;
1124 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1125 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1126 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1128 /* Some of the PMD applications may set the protocol field
1129 * in the IPv6 spec but don't set the mask. So, consider
1130 * the mask in proto value calculation.
1132 proto &= ipv6_mask->hdr.proto;
1135 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1136 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1137 ULP_PRSR_ACT_DEFAULT);
1139 * The TC and flow label field are ignored since OVS is
1140 * setting it for match and it is not supported.
1141 * This is a work around and
1142 * shall be addressed in the future.
1144 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1145 ULP_PRSR_ACT_MASK_IGNORE);
1146 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1147 ULP_PRSR_ACT_MASK_IGNORE);
1149 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1150 ulp_rte_prsr_fld_mask(params, &idx, size,
1151 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1152 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1153 ULP_PRSR_ACT_DEFAULT);
1155 /* Ignore proto for template matching */
1156 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1157 ulp_rte_prsr_fld_mask(params, &idx, size,
1158 ulp_deference_struct(ipv6_spec, hdr.proto),
1159 ulp_deference_struct(ipv6_mask, hdr.proto),
1160 ULP_PRSR_ACT_MATCH_IGNORE);
1162 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1163 ulp_rte_prsr_fld_mask(params, &idx, size,
1164 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1165 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1166 ULP_PRSR_ACT_DEFAULT);
1168 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1169 ulp_rte_prsr_fld_mask(params, &idx, size,
1170 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1171 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1172 ULP_PRSR_ACT_DEFAULT);
1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1175 ulp_rte_prsr_fld_mask(params, &idx, size,
1176 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1177 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1178 ULP_PRSR_ACT_DEFAULT);
1180 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1181 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1182 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1183 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1184 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1187 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1188 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1191 /* Update the field protocol hdr bitmap */
1192 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1195 return BNXT_TF_RC_SUCCESS;
1198 /* Function to handle the update of proto header based on field values */
1200 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1203 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1204 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1205 BNXT_ULP_HDR_BIT_T_VXLAN);
1207 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1208 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1209 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1210 BNXT_ULP_HDR_BIT_T_GRE))
1211 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1214 /* Function to handle the parsing of RTE Flow item UDP Header. */
1216 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1217 struct ulp_rte_parser_params *params)
1219 const struct rte_flow_item_udp *udp_spec = item->spec;
1220 const struct rte_flow_item_udp *udp_mask = item->mask;
1221 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1224 uint16_t dport = 0, sport = 0;
1227 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1229 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1230 return BNXT_TF_RC_ERROR;
1234 sport = udp_spec->hdr.src_port;
1235 dport = udp_spec->hdr.dst_port;
1238 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1239 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1240 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1241 return BNXT_TF_RC_ERROR;
1245 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1248 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1249 ulp_rte_prsr_fld_mask(params, &idx, size,
1250 ulp_deference_struct(udp_spec, hdr.src_port),
1251 ulp_deference_struct(udp_mask, hdr.src_port),
1252 ULP_PRSR_ACT_DEFAULT);
1254 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1255 ulp_rte_prsr_fld_mask(params, &idx, size,
1256 ulp_deference_struct(udp_spec, hdr.dst_port),
1257 ulp_deference_struct(udp_mask, hdr.dst_port),
1258 ULP_PRSR_ACT_DEFAULT);
1260 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1261 ulp_rte_prsr_fld_mask(params, &idx, size,
1262 ulp_deference_struct(udp_spec, hdr.dgram_len),
1263 ulp_deference_struct(udp_mask, hdr.dgram_len),
1264 ULP_PRSR_ACT_DEFAULT);
1266 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1267 ulp_rte_prsr_fld_mask(params, &idx, size,
1268 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1269 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1270 ULP_PRSR_ACT_DEFAULT);
1272 /* Set the udp header bitmap and computed l4 header bitmaps */
1273 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1274 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1275 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1277 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1278 (uint32_t)rte_be_to_cpu_16(sport));
1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1280 (uint32_t)rte_be_to_cpu_16(dport));
1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1285 if (udp_mask && udp_mask->hdr.src_port)
1286 ULP_COMP_FLD_IDX_WR(params,
1287 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1289 if (udp_mask && udp_mask->hdr.dst_port)
1290 ULP_COMP_FLD_IDX_WR(params,
1291 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1294 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1295 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1296 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1297 (uint32_t)rte_be_to_cpu_16(sport));
1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1299 (uint32_t)rte_be_to_cpu_16(dport));
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1302 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1304 if (udp_mask && udp_mask->hdr.src_port)
1305 ULP_COMP_FLD_IDX_WR(params,
1306 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1308 if (udp_mask && udp_mask->hdr.dst_port)
1309 ULP_COMP_FLD_IDX_WR(params,
1310 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1313 /* Update the field protocol hdr bitmap */
1314 ulp_rte_l4_proto_type_update(params, dport);
1316 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1317 return BNXT_TF_RC_SUCCESS;
1320 /* Function to handle the parsing of RTE Flow item TCP Header. */
1322 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1323 struct ulp_rte_parser_params *params)
1325 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1326 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1327 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1329 uint16_t dport = 0, sport = 0;
1333 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1335 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1336 return BNXT_TF_RC_ERROR;
1340 sport = tcp_spec->hdr.src_port;
1341 dport = tcp_spec->hdr.dst_port;
1344 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1345 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1346 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1347 return BNXT_TF_RC_ERROR;
1351 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1354 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1355 ulp_rte_prsr_fld_mask(params, &idx, size,
1356 ulp_deference_struct(tcp_spec, hdr.src_port),
1357 ulp_deference_struct(tcp_mask, hdr.src_port),
1358 ULP_PRSR_ACT_DEFAULT);
1360 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1361 ulp_rte_prsr_fld_mask(params, &idx, size,
1362 ulp_deference_struct(tcp_spec, hdr.dst_port),
1363 ulp_deference_struct(tcp_mask, hdr.dst_port),
1364 ULP_PRSR_ACT_DEFAULT);
1366 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1367 ulp_rte_prsr_fld_mask(params, &idx, size,
1368 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1369 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1370 ULP_PRSR_ACT_DEFAULT);
1372 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1373 ulp_rte_prsr_fld_mask(params, &idx, size,
1374 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1375 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1376 ULP_PRSR_ACT_DEFAULT);
1378 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1379 ulp_rte_prsr_fld_mask(params, &idx, size,
1380 ulp_deference_struct(tcp_spec, hdr.data_off),
1381 ulp_deference_struct(tcp_mask, hdr.data_off),
1382 ULP_PRSR_ACT_DEFAULT);
1384 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1385 ulp_rte_prsr_fld_mask(params, &idx, size,
1386 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1387 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1388 ULP_PRSR_ACT_DEFAULT);
1390 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1391 ulp_rte_prsr_fld_mask(params, &idx, size,
1392 ulp_deference_struct(tcp_spec, hdr.rx_win),
1393 ulp_deference_struct(tcp_mask, hdr.rx_win),
1394 ULP_PRSR_ACT_DEFAULT);
1396 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1397 ulp_rte_prsr_fld_mask(params, &idx, size,
1398 ulp_deference_struct(tcp_spec, hdr.cksum),
1399 ulp_deference_struct(tcp_mask, hdr.cksum),
1400 ULP_PRSR_ACT_DEFAULT);
1402 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1403 ulp_rte_prsr_fld_mask(params, &idx, size,
1404 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1405 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1406 ULP_PRSR_ACT_DEFAULT);
1408 /* Set the udp header bitmap and computed l4 header bitmaps */
1409 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1410 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1411 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1413 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1414 (uint32_t)rte_be_to_cpu_16(sport));
1415 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1416 (uint32_t)rte_be_to_cpu_16(dport));
1417 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1419 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1421 if (tcp_mask && tcp_mask->hdr.src_port)
1422 ULP_COMP_FLD_IDX_WR(params,
1423 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1425 if (tcp_mask && tcp_mask->hdr.dst_port)
1426 ULP_COMP_FLD_IDX_WR(params,
1427 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1430 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1431 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1432 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1433 (uint32_t)rte_be_to_cpu_16(sport));
1434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1435 (uint32_t)rte_be_to_cpu_16(dport));
1436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1440 if (tcp_mask && tcp_mask->hdr.src_port)
1441 ULP_COMP_FLD_IDX_WR(params,
1442 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1444 if (tcp_mask && tcp_mask->hdr.dst_port)
1445 ULP_COMP_FLD_IDX_WR(params,
1446 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1450 return BNXT_TF_RC_SUCCESS;
1453 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1455 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1456 struct ulp_rte_parser_params *params)
1458 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1459 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1460 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1464 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1465 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1466 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1467 return BNXT_TF_RC_ERROR;
1471 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1474 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1475 ulp_rte_prsr_fld_mask(params, &idx, size,
1476 ulp_deference_struct(vxlan_spec, flags),
1477 ulp_deference_struct(vxlan_mask, flags),
1478 ULP_PRSR_ACT_DEFAULT);
1480 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1481 ulp_rte_prsr_fld_mask(params, &idx, size,
1482 ulp_deference_struct(vxlan_spec, rsvd0),
1483 ulp_deference_struct(vxlan_mask, rsvd0),
1484 ULP_PRSR_ACT_DEFAULT);
1486 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1487 ulp_rte_prsr_fld_mask(params, &idx, size,
1488 ulp_deference_struct(vxlan_spec, vni),
1489 ulp_deference_struct(vxlan_mask, vni),
1490 ULP_PRSR_ACT_DEFAULT);
1492 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1493 ulp_rte_prsr_fld_mask(params, &idx, size,
1494 ulp_deference_struct(vxlan_spec, rsvd1),
1495 ulp_deference_struct(vxlan_mask, rsvd1),
1496 ULP_PRSR_ACT_DEFAULT);
1498 /* Update the hdr_bitmap with vxlan */
1499 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1500 ulp_rte_l4_proto_type_update(params, 0);
1501 return BNXT_TF_RC_SUCCESS;
1504 /* Function to handle the parsing of RTE Flow item GRE Header. */
1506 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1507 struct ulp_rte_parser_params *params)
1509 const struct rte_flow_item_gre *gre_spec = item->spec;
1510 const struct rte_flow_item_gre *gre_mask = item->mask;
1511 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1515 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1516 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1517 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1518 return BNXT_TF_RC_ERROR;
1521 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1522 ulp_rte_prsr_fld_mask(params, &idx, size,
1523 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1524 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1525 ULP_PRSR_ACT_DEFAULT);
1527 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1528 ulp_rte_prsr_fld_mask(params, &idx, size,
1529 ulp_deference_struct(gre_spec, protocol),
1530 ulp_deference_struct(gre_mask, protocol),
1531 ULP_PRSR_ACT_DEFAULT);
1533 /* Update the hdr_bitmap with GRE */
1534 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1535 ulp_rte_l4_proto_type_update(params, 0);
1536 return BNXT_TF_RC_SUCCESS;
1539 /* Function to handle the parsing of RTE Flow item ANY. */
1541 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1542 struct ulp_rte_parser_params *params __rte_unused)
1544 return BNXT_TF_RC_SUCCESS;
1547 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1549 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1550 struct ulp_rte_parser_params *params)
1552 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1553 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1554 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1558 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1559 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1560 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1561 return BNXT_TF_RC_ERROR;
1564 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1565 ulp_rte_prsr_fld_mask(params, &idx, size,
1566 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1567 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1568 ULP_PRSR_ACT_DEFAULT);
1570 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1571 ulp_rte_prsr_fld_mask(params, &idx, size,
1572 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1573 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1574 ULP_PRSR_ACT_DEFAULT);
1576 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1577 ulp_rte_prsr_fld_mask(params, &idx, size,
1578 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1579 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1580 ULP_PRSR_ACT_DEFAULT);
1582 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1583 ulp_rte_prsr_fld_mask(params, &idx, size,
1584 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1585 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1586 ULP_PRSR_ACT_DEFAULT);
1588 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1589 ulp_rte_prsr_fld_mask(params, &idx, size,
1590 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1591 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1592 ULP_PRSR_ACT_DEFAULT);
1594 /* Update the hdr_bitmap with ICMP */
1595 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1596 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1598 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1599 return BNXT_TF_RC_SUCCESS;
1602 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1604 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1605 struct ulp_rte_parser_params *params)
1607 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1608 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1609 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1613 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1614 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1615 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1616 return BNXT_TF_RC_ERROR;
1619 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1620 ulp_rte_prsr_fld_mask(params, &idx, size,
1621 ulp_deference_struct(icmp_spec, type),
1622 ulp_deference_struct(icmp_mask, type),
1623 ULP_PRSR_ACT_DEFAULT);
1625 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1626 ulp_rte_prsr_fld_mask(params, &idx, size,
1627 ulp_deference_struct(icmp_spec, code),
1628 ulp_deference_struct(icmp_mask, code),
1629 ULP_PRSR_ACT_DEFAULT);
1631 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1632 ulp_rte_prsr_fld_mask(params, &idx, size,
1633 ulp_deference_struct(icmp_spec, checksum),
1634 ulp_deference_struct(icmp_mask, checksum),
1635 ULP_PRSR_ACT_DEFAULT);
1637 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1638 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1639 return BNXT_TF_RC_ERROR;
1642 /* Update the hdr_bitmap with ICMP */
1643 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1644 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1646 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1647 return BNXT_TF_RC_SUCCESS;
1650 /* Function to handle the parsing of RTE Flow item void Header */
1652 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1653 struct ulp_rte_parser_params *params __rte_unused)
1655 return BNXT_TF_RC_SUCCESS;
1658 /* Function to handle the parsing of RTE Flow action void Header. */
1660 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1661 struct ulp_rte_parser_params *params __rte_unused)
1663 return BNXT_TF_RC_SUCCESS;
1666 /* Function to handle the parsing of RTE Flow action Mark Header. */
1668 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1669 struct ulp_rte_parser_params *param)
1671 const struct rte_flow_action_mark *mark;
1672 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1675 mark = action_item->conf;
1677 mark_id = tfp_cpu_to_be_32(mark->id);
1678 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1679 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1681 /* Update the hdr_bitmap with vxlan */
1682 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1683 return BNXT_TF_RC_SUCCESS;
1685 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1686 return BNXT_TF_RC_ERROR;
1689 /* Function to handle the parsing of RTE Flow action RSS Header. */
1691 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1692 struct ulp_rte_parser_params *param)
1694 const struct rte_flow_action_rss *rss;
1695 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1697 if (action_item == NULL || action_item->conf == NULL) {
1698 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1699 return BNXT_TF_RC_ERROR;
1702 rss = action_item->conf;
1703 /* Copy the rss into the specific action properties */
1704 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1705 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1706 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1707 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1708 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1709 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1711 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1712 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1713 return BNXT_TF_RC_ERROR;
1715 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1718 /* set the RSS action header bit */
1719 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1721 return BNXT_TF_RC_SUCCESS;
1724 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1726 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1727 struct ulp_rte_parser_params *params)
1729 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1730 const struct rte_flow_item *item;
1731 const struct rte_flow_item_eth *eth_spec;
1732 const struct rte_flow_item_ipv4 *ipv4_spec;
1733 const struct rte_flow_item_ipv6 *ipv6_spec;
1734 struct rte_flow_item_vxlan vxlan_spec;
1735 uint32_t vlan_num = 0, vlan_size = 0;
1736 uint32_t ip_size = 0, ip_type = 0;
1737 uint32_t vxlan_size = 0;
1739 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1740 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1742 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1743 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1745 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1746 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1747 const uint8_t *tmp_buff;
1749 vxlan_encap = action_item->conf;
1751 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1752 return BNXT_TF_RC_ERROR;
1755 item = vxlan_encap->definition;
1757 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1758 return BNXT_TF_RC_ERROR;
1761 if (!ulp_rte_item_skip_void(&item, 0))
1762 return BNXT_TF_RC_ERROR;
1764 /* must have ethernet header */
1765 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1766 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1767 return BNXT_TF_RC_ERROR;
1769 eth_spec = item->spec;
1770 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1771 ulp_encap_buffer_copy(buff,
1772 eth_spec->dst.addr_bytes,
1773 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1774 ULP_BUFFER_ALIGN_8_BYTE);
1776 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1777 ulp_encap_buffer_copy(buff,
1778 eth_spec->src.addr_bytes,
1779 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1780 ULP_BUFFER_ALIGN_8_BYTE);
1782 /* Goto the next item */
1783 if (!ulp_rte_item_skip_void(&item, 1))
1784 return BNXT_TF_RC_ERROR;
1786 /* May have vlan header */
1787 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1789 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1790 ulp_encap_buffer_copy(buff,
1792 sizeof(struct rte_flow_item_vlan),
1793 ULP_BUFFER_ALIGN_8_BYTE);
1795 if (!ulp_rte_item_skip_void(&item, 1))
1796 return BNXT_TF_RC_ERROR;
1799 /* may have two vlan headers */
1800 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1802 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1803 sizeof(struct rte_flow_item_vlan)],
1805 sizeof(struct rte_flow_item_vlan));
1806 if (!ulp_rte_item_skip_void(&item, 1))
1807 return BNXT_TF_RC_ERROR;
1809 /* Update the vlan count and size of more than one */
1811 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1812 vlan_num = tfp_cpu_to_be_32(vlan_num);
1813 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1816 vlan_size = tfp_cpu_to_be_32(vlan_size);
1817 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1822 /* L3 must be IPv4, IPv6 */
1823 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1824 ipv4_spec = item->spec;
1825 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1827 /* copy the ipv4 details */
1828 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1829 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1830 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1831 ulp_encap_buffer_copy(buff,
1833 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1834 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1835 ULP_BUFFER_ALIGN_8_BYTE);
1837 /* Total length being ignored in the ip hdr. */
1838 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1839 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1840 ulp_encap_buffer_copy(buff,
1842 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1843 ULP_BUFFER_ALIGN_8_BYTE);
1844 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1845 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1846 ulp_encap_buffer_copy(buff,
1847 &ipv4_spec->hdr.version_ihl,
1848 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1849 ULP_BUFFER_ALIGN_8_BYTE);
1852 /* Update the dst ip address in ip encap buffer */
1853 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1854 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1855 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1856 ulp_encap_buffer_copy(buff,
1857 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1858 sizeof(ipv4_spec->hdr.dst_addr),
1859 ULP_BUFFER_ALIGN_8_BYTE);
1861 /* Update the src ip address */
1862 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1863 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1864 sizeof(ipv4_spec->hdr.src_addr)];
1865 ulp_encap_buffer_copy(buff,
1866 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1867 sizeof(ipv4_spec->hdr.src_addr),
1868 ULP_BUFFER_ALIGN_8_BYTE);
1870 /* Update the ip size details */
1871 ip_size = tfp_cpu_to_be_32(ip_size);
1872 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1873 &ip_size, sizeof(uint32_t));
1875 /* update the ip type */
1876 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1877 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1878 &ip_type, sizeof(uint32_t));
1880 /* update the computed field to notify it is ipv4 header */
1881 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1884 if (!ulp_rte_item_skip_void(&item, 1))
1885 return BNXT_TF_RC_ERROR;
1886 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1887 ipv6_spec = item->spec;
1888 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1890 /* copy the ipv6 details */
1891 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1892 if (ulp_buffer_is_empty(tmp_buff,
1893 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1894 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1895 ulp_encap_buffer_copy(buff,
1897 sizeof(def_ipv6_hdr),
1898 ULP_BUFFER_ALIGN_8_BYTE);
1900 /* The payload length being ignored in the ip hdr. */
1901 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1902 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1903 ulp_encap_buffer_copy(buff,
1905 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1906 ULP_BUFFER_ALIGN_8_BYTE);
1907 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1908 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1909 BNXT_ULP_ENCAP_IPV6_DO];
1910 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1911 ulp_encap_buffer_copy(buff,
1913 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1914 ULP_BUFFER_ALIGN_8_BYTE);
1916 /* Update the dst ip address in ip encap buffer */
1917 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1918 sizeof(def_ipv6_hdr)];
1919 ulp_encap_buffer_copy(buff,
1920 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1921 sizeof(ipv6_spec->hdr.dst_addr),
1922 ULP_BUFFER_ALIGN_8_BYTE);
1924 /* Update the src ip address */
1925 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1926 ulp_encap_buffer_copy(buff,
1927 (const uint8_t *)ipv6_spec->hdr.src_addr,
1928 sizeof(ipv6_spec->hdr.src_addr),
1929 ULP_BUFFER_ALIGN_16_BYTE);
1931 /* Update the ip size details */
1932 ip_size = tfp_cpu_to_be_32(ip_size);
1933 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1934 &ip_size, sizeof(uint32_t));
1936 /* update the ip type */
1937 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1938 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1939 &ip_type, sizeof(uint32_t));
1941 /* update the computed field to notify it is ipv6 header */
1942 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1945 if (!ulp_rte_item_skip_void(&item, 1))
1946 return BNXT_TF_RC_ERROR;
1948 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1949 return BNXT_TF_RC_ERROR;
1953 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1954 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1955 return BNXT_TF_RC_ERROR;
1957 /* copy the udp details */
1958 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1959 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1960 ULP_BUFFER_ALIGN_8_BYTE);
1962 if (!ulp_rte_item_skip_void(&item, 1))
1963 return BNXT_TF_RC_ERROR;
1966 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1967 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1968 return BNXT_TF_RC_ERROR;
1970 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1971 /* copy the vxlan details */
1972 memcpy(&vxlan_spec, item->spec, vxlan_size);
1973 vxlan_spec.flags = 0x08;
1974 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1975 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1976 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1977 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1979 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1980 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1981 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1982 (const uint8_t *)&vxlan_spec.vni,
1983 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1985 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1986 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1987 &vxlan_size, sizeof(uint32_t));
1989 /* update the hdr_bitmap with vxlan */
1990 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1991 return BNXT_TF_RC_SUCCESS;
1994 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1996 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1998 struct ulp_rte_parser_params *params)
2000 /* update the hdr_bitmap with vxlan */
2001 ULP_BITMAP_SET(params->act_bitmap.bits,
2002 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2003 /* Update computational field with tunnel decap info */
2004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2005 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2006 return BNXT_TF_RC_SUCCESS;
2009 /* Function to handle the parsing of RTE Flow action drop Header. */
2011 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2012 struct ulp_rte_parser_params *params)
2014 /* Update the hdr_bitmap with drop */
2015 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2016 return BNXT_TF_RC_SUCCESS;
2019 /* Function to handle the parsing of RTE Flow action count. */
2021 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2022 struct ulp_rte_parser_params *params)
2024 const struct rte_flow_action_count *act_count;
2025 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2027 act_count = action_item->conf;
2029 if (act_count->shared) {
2031 "Parse Error:Shared count not supported\n");
2032 return BNXT_TF_RC_PARSE_ERR;
2034 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2036 BNXT_ULP_ACT_PROP_SZ_COUNT);
2039 /* Update the hdr_bitmap with count */
2040 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2041 return BNXT_TF_RC_SUCCESS;
2044 /* Function to handle the parsing of action ports. */
2046 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2049 enum bnxt_ulp_direction_type dir;
2052 struct ulp_rte_act_prop *act = ¶m->act_prop;
2053 enum bnxt_ulp_intf_type port_type;
2056 /* Get the direction */
2057 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2058 if (dir == BNXT_ULP_DIR_EGRESS) {
2059 /* For egress direction, fill vport */
2060 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2061 return BNXT_TF_RC_ERROR;
2064 pid = rte_cpu_to_be_32(pid);
2065 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2066 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2068 /* For ingress direction, fill vnic */
2069 port_type = ULP_COMP_FLD_IDX_RD(param,
2070 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2071 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2072 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2074 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2076 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2078 return BNXT_TF_RC_ERROR;
2081 pid = rte_cpu_to_be_32(pid);
2082 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2083 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2086 /* Update the action port set bit */
2087 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2088 return BNXT_TF_RC_SUCCESS;
2091 /* Function to handle the parsing of RTE Flow action PF. */
2093 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2094 struct ulp_rte_parser_params *params)
2098 enum bnxt_ulp_intf_type intf_type;
2100 /* Get the port id of the current device */
2101 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2103 /* Get the port db ifindex */
2104 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2106 BNXT_TF_DBG(ERR, "Invalid port id\n");
2107 return BNXT_TF_RC_ERROR;
2110 /* Check the port is PF port */
2111 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2112 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2113 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2114 return BNXT_TF_RC_ERROR;
2116 /* Update the action properties */
2117 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2118 return ulp_rte_parser_act_port_set(params, ifindex);
2121 /* Function to handle the parsing of RTE Flow action VF. */
2123 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2124 struct ulp_rte_parser_params *params)
2126 const struct rte_flow_action_vf *vf_action;
2127 enum bnxt_ulp_intf_type intf_type;
2131 vf_action = action_item->conf;
2133 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2134 return BNXT_TF_RC_PARSE_ERR;
2137 if (vf_action->original) {
2138 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2139 return BNXT_TF_RC_PARSE_ERR;
2142 bp = bnxt_pmd_get_bp(params->port_id);
2144 BNXT_TF_DBG(ERR, "Invalid bp\n");
2145 return BNXT_TF_RC_ERROR;
2148 /* vf_action->id is a logical number which in this case is an
2149 * offset from the first VF. So, to get the absolute VF id, the
2150 * offset must be added to the absolute first vf id of that port.
2152 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2156 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2157 return BNXT_TF_RC_ERROR;
2159 /* Check the port is VF port */
2160 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2161 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2162 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2163 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2164 return BNXT_TF_RC_ERROR;
2167 /* Update the action properties */
2168 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2169 return ulp_rte_parser_act_port_set(params, ifindex);
2172 /* Function to handle the parsing of RTE Flow action port_id. */
2174 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2175 struct ulp_rte_parser_params *param)
2177 const struct rte_flow_action_port_id *port_id = act_item->conf;
2179 enum bnxt_ulp_intf_type intf_type;
2183 "ParseErr: Invalid Argument\n");
2184 return BNXT_TF_RC_PARSE_ERR;
2186 if (port_id->original) {
2188 "ParseErr:Portid Original not supported\n");
2189 return BNXT_TF_RC_PARSE_ERR;
2192 /* Get the port db ifindex */
2193 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2195 BNXT_TF_DBG(ERR, "Invalid port id\n");
2196 return BNXT_TF_RC_ERROR;
2199 /* Get the intf type */
2200 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2202 BNXT_TF_DBG(ERR, "Invalid port type\n");
2203 return BNXT_TF_RC_ERROR;
2206 /* Set the action port */
2207 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2208 return ulp_rte_parser_act_port_set(param, ifindex);
2211 /* Function to handle the parsing of RTE Flow action phy_port. */
2213 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2214 struct ulp_rte_parser_params *prm)
2216 const struct rte_flow_action_phy_port *phy_port;
2220 enum bnxt_ulp_direction_type dir;
2222 phy_port = action_item->conf;
2225 "ParseErr: Invalid Argument\n");
2226 return BNXT_TF_RC_PARSE_ERR;
2229 if (phy_port->original) {
2231 "Parse Err:Port Original not supported\n");
2232 return BNXT_TF_RC_PARSE_ERR;
2234 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2235 if (dir != BNXT_ULP_DIR_EGRESS) {
2237 "Parse Err:Phy ports are valid only for egress\n");
2238 return BNXT_TF_RC_PARSE_ERR;
2240 /* Get the physical port details from port db */
2241 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2244 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2249 pid = rte_cpu_to_be_32(pid);
2250 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2251 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2253 /* Update the action port set bit */
2254 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2255 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2256 BNXT_ULP_INTF_TYPE_PHY_PORT);
2257 return BNXT_TF_RC_SUCCESS;
2260 /* Function to handle the parsing of RTE Flow action pop vlan. */
2262 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2263 struct ulp_rte_parser_params *params)
2265 /* Update the act_bitmap with pop */
2266 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2267 return BNXT_TF_RC_SUCCESS;
2270 /* Function to handle the parsing of RTE Flow action push vlan. */
2272 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2273 struct ulp_rte_parser_params *params)
2275 const struct rte_flow_action_of_push_vlan *push_vlan;
2277 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2279 push_vlan = action_item->conf;
2281 ethertype = push_vlan->ethertype;
2282 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2284 "Parse Err: Ethertype not supported\n");
2285 return BNXT_TF_RC_PARSE_ERR;
2287 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2288 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2289 /* Update the hdr_bitmap with push vlan */
2290 ULP_BITMAP_SET(params->act_bitmap.bits,
2291 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2292 return BNXT_TF_RC_SUCCESS;
2294 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2295 return BNXT_TF_RC_ERROR;
2298 /* Function to handle the parsing of RTE Flow action set vlan id. */
2300 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2301 struct ulp_rte_parser_params *params)
2303 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2305 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2307 vlan_vid = action_item->conf;
2308 if (vlan_vid && vlan_vid->vlan_vid) {
2309 vid = vlan_vid->vlan_vid;
2310 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2311 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2312 /* Update the hdr_bitmap with vlan vid */
2313 ULP_BITMAP_SET(params->act_bitmap.bits,
2314 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2315 return BNXT_TF_RC_SUCCESS;
2317 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2318 return BNXT_TF_RC_ERROR;
2321 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2323 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2324 struct ulp_rte_parser_params *params)
2326 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2328 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2330 vlan_pcp = action_item->conf;
2332 pcp = vlan_pcp->vlan_pcp;
2333 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2334 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2335 /* Update the hdr_bitmap with vlan vid */
2336 ULP_BITMAP_SET(params->act_bitmap.bits,
2337 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2338 return BNXT_TF_RC_SUCCESS;
2340 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2341 return BNXT_TF_RC_ERROR;
2344 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2346 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2347 struct ulp_rte_parser_params *params)
2349 const struct rte_flow_action_set_ipv4 *set_ipv4;
2350 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2352 set_ipv4 = action_item->conf;
2354 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2355 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2356 /* Update the hdr_bitmap with set ipv4 src */
2357 ULP_BITMAP_SET(params->act_bitmap.bits,
2358 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2359 return BNXT_TF_RC_SUCCESS;
2361 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2362 return BNXT_TF_RC_ERROR;
2365 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2367 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2368 struct ulp_rte_parser_params *params)
2370 const struct rte_flow_action_set_ipv4 *set_ipv4;
2371 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2373 set_ipv4 = action_item->conf;
2375 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2376 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2377 /* Update the hdr_bitmap with set ipv4 dst */
2378 ULP_BITMAP_SET(params->act_bitmap.bits,
2379 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2380 return BNXT_TF_RC_SUCCESS;
2382 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2383 return BNXT_TF_RC_ERROR;
2386 /* Function to handle the parsing of RTE Flow action set tp src.*/
2388 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2389 struct ulp_rte_parser_params *params)
2391 const struct rte_flow_action_set_tp *set_tp;
2392 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2394 set_tp = action_item->conf;
2396 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2397 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2398 /* Update the hdr_bitmap with set tp src */
2399 ULP_BITMAP_SET(params->act_bitmap.bits,
2400 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2401 return BNXT_TF_RC_SUCCESS;
2404 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2405 return BNXT_TF_RC_ERROR;
2408 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2410 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2411 struct ulp_rte_parser_params *params)
2413 const struct rte_flow_action_set_tp *set_tp;
2414 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2416 set_tp = action_item->conf;
2418 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2419 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2420 /* Update the hdr_bitmap with set tp dst */
2421 ULP_BITMAP_SET(params->act_bitmap.bits,
2422 BNXT_ULP_ACT_BIT_SET_TP_DST);
2423 return BNXT_TF_RC_SUCCESS;
2426 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2427 return BNXT_TF_RC_ERROR;
2430 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2432 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2433 struct ulp_rte_parser_params *params)
2435 /* Update the act_bitmap with dec ttl */
2436 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2437 return BNXT_TF_RC_SUCCESS;
2440 /* Function to handle the parsing of RTE Flow action JUMP */
2442 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2443 struct ulp_rte_parser_params *params)
2445 /* Update the act_bitmap with dec ttl */
2446 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2447 return BNXT_TF_RC_SUCCESS;
2451 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2452 struct ulp_rte_parser_params *params)
2454 const struct rte_flow_action_sample *sample;
2457 sample = action_item->conf;
2459 /* if SAMPLE bit is set it means this sample action is nested within the
2460 * actions of another sample action; this is not allowed
2462 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2463 BNXT_ULP_ACT_BIT_SAMPLE))
2464 return BNXT_TF_RC_ERROR;
2466 /* a sample action is only allowed as a shared action */
2467 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2468 BNXT_ULP_ACT_BIT_SHARED))
2469 return BNXT_TF_RC_ERROR;
2471 /* only a ratio of 1 i.e. 100% is supported */
2472 if (sample->ratio != 1)
2473 return BNXT_TF_RC_ERROR;
2475 if (!sample->actions)
2476 return BNXT_TF_RC_ERROR;
2478 /* parse the nested actions for a sample action */
2479 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2480 if (ret == BNXT_TF_RC_SUCCESS)
2481 /* Update the act_bitmap with sample */
2482 ULP_BITMAP_SET(params->act_bitmap.bits,
2483 BNXT_ULP_ACT_BIT_SAMPLE);