1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
20 #include "ulp_template_db_tbl.h"
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK 0x700
25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN 4789
28 /* Utility function to skip the void items. */
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
43 /* Utility function to update the field_bitmap */
45 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
47 enum bnxt_ulp_prsr_action prsr_act)
49 struct ulp_rte_hdr_field *field;
51 field = ¶ms->hdr_field[idx];
52 if (ulp_bitmap_notzero(field->mask, field->size)) {
53 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
54 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
55 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
57 if (!ulp_bitmap_is_ones(field->mask, field->size))
58 ULP_COMP_FLD_IDX_WR(params,
59 BNXT_ULP_CF_IDX_WC_MATCH, 1);
61 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
65 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
66 /* Utility function to copy field spec and masks items */
68 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
71 const void *spec_buff,
72 const void *mask_buff,
73 enum bnxt_ulp_prsr_action prsr_act)
75 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
77 /* update the field size */
80 /* copy the mask specifications only if mask is not null */
81 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
82 memcpy(field->mask, mask_buff, size);
83 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
86 /* copy the protocol specifications only if mask is not null*/
87 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
88 memcpy(field->spec, spec_buff, size);
90 /* Increment the index */
94 /* Utility function to copy field spec and masks items */
96 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
100 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
101 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
104 *idx = params->field_idx;
105 params->field_idx += size;
110 * Function to handle the parsing of RTE Flows and placing
111 * the RTE flow items into the ulp structures.
114 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
115 struct ulp_rte_parser_params *params)
117 const struct rte_flow_item *item = pattern;
118 struct bnxt_ulp_rte_hdr_info *hdr_info;
120 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
122 /* Set the computed flags for no vlan tags before parsing */
123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
124 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
126 /* Parse all the items in the pattern */
127 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
128 if (item->type >= (uint32_t)
129 BNXT_RTE_FLOW_ITEM_TYPE_END) {
131 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
132 goto hdr_parser_error;
133 /* get the header information */
134 hdr_info = &ulp_vendor_hdr_info[item->type -
135 BNXT_RTE_FLOW_ITEM_TYPE_END];
137 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
138 goto hdr_parser_error;
139 hdr_info = &ulp_hdr_info[item->type];
141 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
142 goto hdr_parser_error;
143 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
144 /* call the registered callback handler */
145 if (hdr_info->proto_hdr_func) {
146 if (hdr_info->proto_hdr_func(item, params) !=
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied SVIF */
155 return ulp_rte_parser_implicit_match_port_process(params);
158 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
160 return BNXT_TF_RC_PARSE_ERR;
164 * Function to handle the parsing of RTE Flows and placing
165 * the RTE flow actions into the ulp structures.
168 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
169 struct ulp_rte_parser_params *params)
171 const struct rte_flow_action *action_item = actions;
172 struct bnxt_ulp_rte_act_info *hdr_info;
174 /* Parse all the items in the pattern */
175 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
176 if (action_item->type >=
177 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
178 if (action_item->type >=
179 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
180 goto act_parser_error;
181 /* get the header information from bnxt actinfo table */
182 hdr_info = &ulp_vendor_act_info[action_item->type -
183 BNXT_RTE_FLOW_ACTION_TYPE_END];
185 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
186 goto act_parser_error;
187 /* get the header information from the act info table */
188 hdr_info = &ulp_act_info[action_item->type];
190 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
191 goto act_parser_error;
192 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
193 /* call the registered callback handler */
194 if (hdr_info->proto_act_func) {
195 if (hdr_info->proto_act_func(action_item,
197 BNXT_TF_RC_SUCCESS) {
198 return BNXT_TF_RC_ERROR;
204 /* update the implied port details */
205 ulp_rte_parser_implicit_act_port_process(params);
206 return BNXT_TF_RC_SUCCESS;
209 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
211 return BNXT_TF_RC_ERROR;
215 * Function to handle the post processing of the computed
216 * fields for the interface.
219 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
222 uint16_t port_id, parif;
224 enum bnxt_ulp_direction_type dir;
226 /* get the direction details */
227 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
229 /* read the port id details */
230 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
231 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
234 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
238 if (dir == BNXT_ULP_DIR_INGRESS) {
240 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
241 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
242 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
245 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
248 /* Get the match port type */
249 mtype = ULP_COMP_FLD_IDX_RD(params,
250 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
251 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
252 ULP_COMP_FLD_IDX_WR(params,
253 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
255 /* Set VF func PARIF */
256 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
257 BNXT_ULP_VF_FUNC_PARIF,
260 "ParseErr:ifindex is not valid\n");
263 ULP_COMP_FLD_IDX_WR(params,
264 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
268 /* Set DRV func PARIF */
269 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
270 BNXT_ULP_DRV_FUNC_PARIF,
273 "ParseErr:ifindex is not valid\n");
276 ULP_COMP_FLD_IDX_WR(params,
277 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
280 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
281 ULP_COMP_FLD_IDX_WR(params,
282 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
289 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
291 enum bnxt_ulp_intf_type match_port_type, act_port_type;
292 enum bnxt_ulp_direction_type dir;
293 uint32_t act_port_set;
295 /* Get the computed details */
296 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
297 match_port_type = ULP_COMP_FLD_IDX_RD(params,
298 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
299 act_port_type = ULP_COMP_FLD_IDX_RD(params,
300 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
301 act_port_set = ULP_COMP_FLD_IDX_RD(params,
302 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
304 /* set the flow direction in the proto and action header */
305 if (dir == BNXT_ULP_DIR_EGRESS) {
306 ULP_BITMAP_SET(params->hdr_bitmap.bits,
307 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
308 ULP_BITMAP_SET(params->act_bitmap.bits,
309 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
312 /* calculate the VF to VF flag */
313 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
314 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
317 /* Update the decrement ttl computational fields */
318 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
319 BNXT_ULP_ACT_BIT_DEC_TTL)) {
321 * Check that vxlan proto is included and vxlan decap
322 * action is not set then decrement tunnel ttl.
323 * Similarly add GRE and NVGRE in future.
325 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
326 BNXT_ULP_HDR_BIT_T_VXLAN) &&
327 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
328 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
329 ULP_COMP_FLD_IDX_WR(params,
330 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
332 ULP_COMP_FLD_IDX_WR(params,
333 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
337 /* Merge the hdr_fp_bit into the proto header bit */
338 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
340 /* Update the comp fld fid */
341 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
343 /* Update the computed interface parameters */
344 bnxt_ulp_comp_fld_intf_update(params);
346 /* TBD: Handle the flow rejection scenarios */
351 * Function to handle the post processing of the parsing details
354 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
356 ulp_post_process_normal_flow(params);
360 * Function to compute the flow direction based on the match port details
363 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
365 enum bnxt_ulp_intf_type match_port_type;
367 /* Get the match port type */
368 match_port_type = ULP_COMP_FLD_IDX_RD(params,
369 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
371 /* If ingress flow and matchport is vf rep then dir is egress*/
372 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
373 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
374 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
375 BNXT_ULP_DIR_EGRESS);
377 /* Assign the input direction */
378 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
379 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
380 BNXT_ULP_DIR_INGRESS);
382 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
383 BNXT_ULP_DIR_EGRESS);
387 /* Function to handle the parsing of RTE Flow item PF Header. */
389 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
394 enum bnxt_ulp_direction_type dir;
395 struct ulp_rte_hdr_field *hdr_field;
396 enum bnxt_ulp_svif_type svif_type;
397 enum bnxt_ulp_intf_type port_type;
399 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
400 BNXT_ULP_INVALID_SVIF_VAL) {
402 "SVIF already set,multiple source not support'd\n");
403 return BNXT_TF_RC_ERROR;
406 /* Get port type details */
407 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
408 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
409 BNXT_TF_DBG(ERR, "Invalid port type\n");
410 return BNXT_TF_RC_ERROR;
413 /* Update the match port type */
414 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
416 /* compute the direction */
417 bnxt_ulp_rte_parser_direction_compute(params);
419 /* Get the computed direction */
420 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
421 if (dir == BNXT_ULP_DIR_INGRESS) {
422 svif_type = BNXT_ULP_PHY_PORT_SVIF;
424 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
425 svif_type = BNXT_ULP_VF_FUNC_SVIF;
427 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
429 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
431 svif = rte_cpu_to_be_16(svif);
432 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
433 memcpy(hdr_field->spec, &svif, sizeof(svif));
434 memcpy(hdr_field->mask, &mask, sizeof(mask));
435 hdr_field->size = sizeof(svif);
436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
437 rte_be_to_cpu_16(svif));
438 return BNXT_TF_RC_SUCCESS;
441 /* Function to handle the parsing of the RTE port id */
443 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
445 uint16_t port_id = 0;
446 uint16_t svif_mask = 0xFFFF;
448 int32_t rc = BNXT_TF_RC_ERROR;
450 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
451 BNXT_ULP_INVALID_SVIF_VAL)
452 return BNXT_TF_RC_SUCCESS;
454 /* SVIF not set. So get the port id */
455 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
457 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
460 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
464 /* Update the SVIF details */
465 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
469 /* Function to handle the implicit action port id */
471 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
473 struct rte_flow_action action_item = {0};
474 struct rte_flow_action_port_id port_id = {0};
476 /* Read the action port set bit */
477 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
478 /* Already set, so just exit */
479 return BNXT_TF_RC_SUCCESS;
481 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
482 action_item.conf = &port_id;
484 /* Update the action port based on incoming port */
485 ulp_rte_port_id_act_handler(&action_item, params);
487 /* Reset the action port set bit */
488 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
489 return BNXT_TF_RC_SUCCESS;
492 /* Function to handle the parsing of RTE Flow item PF Header. */
494 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
495 struct ulp_rte_parser_params *params)
497 uint16_t port_id = 0;
498 uint16_t svif_mask = 0xFFFF;
501 /* Get the implicit port id */
502 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
504 /* perform the conversion from dpdk port to bnxt ifindex */
505 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
508 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
509 return BNXT_TF_RC_ERROR;
512 /* Update the SVIF details */
513 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
516 /* Function to handle the parsing of RTE Flow item VF Header. */
518 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
519 struct ulp_rte_parser_params *params)
521 const struct rte_flow_item_vf *vf_spec = item->spec;
522 const struct rte_flow_item_vf *vf_mask = item->mask;
525 int32_t rc = BNXT_TF_RC_PARSE_ERR;
527 /* Get VF rte_flow_item for Port details */
529 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
533 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
538 /* perform the conversion from VF Func id to bnxt ifindex */
539 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
542 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
545 /* Update the SVIF details */
546 return ulp_rte_parser_svif_set(params, ifindex, mask);
549 /* Function to handle the parsing of RTE Flow item port id Header. */
551 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
552 struct ulp_rte_parser_params *params)
554 const struct rte_flow_item_port_id *port_spec = item->spec;
555 const struct rte_flow_item_port_id *port_mask = item->mask;
557 int32_t rc = BNXT_TF_RC_PARSE_ERR;
561 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
565 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
568 mask = port_mask->id;
570 /* perform the conversion from dpdk port to bnxt ifindex */
571 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
574 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
577 /* Update the SVIF details */
578 return ulp_rte_parser_svif_set(params, ifindex, mask);
581 /* Function to handle the parsing of RTE Flow item phy port Header. */
583 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
584 struct ulp_rte_parser_params *params)
586 const struct rte_flow_item_phy_port *port_spec = item->spec;
587 const struct rte_flow_item_phy_port *port_mask = item->mask;
589 int32_t rc = BNXT_TF_RC_ERROR;
591 enum bnxt_ulp_direction_type dir;
592 struct ulp_rte_hdr_field *hdr_field;
594 /* Copy the rte_flow_item for phy port into hdr_field */
596 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
600 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
603 mask = port_mask->index;
605 /* Update the match port type */
606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
607 BNXT_ULP_INTF_TYPE_PHY_PORT);
609 /* Compute the Hw direction */
610 bnxt_ulp_rte_parser_direction_compute(params);
612 /* Direction validation */
613 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
614 if (dir == BNXT_ULP_DIR_EGRESS) {
616 "Parse Err:Phy ports are valid only for ingress\n");
617 return BNXT_TF_RC_PARSE_ERR;
620 /* Get the physical port details from port db */
621 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
624 BNXT_TF_DBG(ERR, "Failed to get port details\n");
625 return BNXT_TF_RC_PARSE_ERR;
628 /* Update the SVIF details */
629 svif = rte_cpu_to_be_16(svif);
630 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
631 memcpy(hdr_field->spec, &svif, sizeof(svif));
632 memcpy(hdr_field->mask, &mask, sizeof(mask));
633 hdr_field->size = sizeof(svif);
634 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
635 rte_be_to_cpu_16(svif));
636 return BNXT_TF_RC_SUCCESS;
639 /* Function to handle the update of proto header based on field values */
641 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
642 uint16_t type, uint32_t in_flag)
644 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
646 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
647 BNXT_ULP_HDR_BIT_I_IPV4);
648 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
650 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
651 BNXT_ULP_HDR_BIT_O_IPV4);
652 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
654 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
656 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
657 BNXT_ULP_HDR_BIT_I_IPV6);
658 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
660 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
661 BNXT_ULP_HDR_BIT_O_IPV6);
662 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
667 /* Internal Function to identify broadcast or multicast packets */
669 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
671 if (rte_is_multicast_ether_addr(eth_addr) ||
672 rte_is_broadcast_ether_addr(eth_addr)) {
674 "No support for bcast or mcast addr offload\n");
680 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
682 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
683 struct ulp_rte_parser_params *params)
685 const struct rte_flow_item_eth *eth_spec = item->spec;
686 const struct rte_flow_item_eth *eth_mask = item->mask;
687 uint32_t idx = 0, dmac_idx = 0;
689 uint16_t eth_type = 0;
690 uint32_t inner_flag = 0;
692 /* Perform validations */
694 /* Todo: work around to avoid multicast and broadcast addr */
695 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
696 return BNXT_TF_RC_PARSE_ERR;
698 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
699 return BNXT_TF_RC_PARSE_ERR;
701 eth_type = eth_spec->type;
704 if (ulp_rte_prsr_fld_size_validate(params, &idx,
705 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
706 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
707 return BNXT_TF_RC_ERROR;
710 * Copy the rte_flow_item for eth into hdr_field using ethernet
714 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
715 ulp_rte_prsr_fld_mask(params, &idx, size,
716 ulp_deference_struct(eth_spec, dst.addr_bytes),
717 ulp_deference_struct(eth_mask, dst.addr_bytes),
718 ULP_PRSR_ACT_DEFAULT);
720 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
721 ulp_rte_prsr_fld_mask(params, &idx, size,
722 ulp_deference_struct(eth_spec, src.addr_bytes),
723 ulp_deference_struct(eth_mask, src.addr_bytes),
724 ULP_PRSR_ACT_DEFAULT);
726 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
727 ulp_rte_prsr_fld_mask(params, &idx, size,
728 ulp_deference_struct(eth_spec, type),
729 ulp_deference_struct(eth_mask, type),
730 ULP_PRSR_ACT_MATCH_IGNORE);
732 /* Update the protocol hdr bitmap */
733 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
734 BNXT_ULP_HDR_BIT_O_ETH) ||
735 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
736 BNXT_ULP_HDR_BIT_O_IPV4) ||
737 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
738 BNXT_ULP_HDR_BIT_O_IPV6) ||
739 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
740 BNXT_ULP_HDR_BIT_O_UDP) ||
741 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
742 BNXT_ULP_HDR_BIT_O_TCP)) {
743 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
746 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
747 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
750 /* Update the field protocol hdr bitmap */
751 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
753 return BNXT_TF_RC_SUCCESS;
756 /* Function to handle the parsing of RTE Flow item Vlan Header. */
758 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
759 struct ulp_rte_parser_params *params)
761 const struct rte_flow_item_vlan *vlan_spec = item->spec;
762 const struct rte_flow_item_vlan *vlan_mask = item->mask;
763 struct ulp_rte_hdr_bitmap *hdr_bit;
765 uint16_t vlan_tag = 0, priority = 0;
766 uint16_t vlan_tag_mask = 0, priority_mask = 0;
767 uint32_t outer_vtag_num;
768 uint32_t inner_vtag_num;
769 uint16_t eth_type = 0;
770 uint32_t inner_flag = 0;
774 vlan_tag = ntohs(vlan_spec->tci);
775 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
776 vlan_tag &= ULP_VLAN_TAG_MASK;
777 vlan_tag = htons(vlan_tag);
778 eth_type = vlan_spec->inner_type;
782 vlan_tag_mask = ntohs(vlan_mask->tci);
783 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
784 vlan_tag_mask &= 0xfff;
787 * the storage for priority and vlan tag is 2 bytes
788 * The mask of priority which is 3 bits if it is all 1's
789 * then make the rest bits 13 bits as 1's
790 * so that it is matched as exact match.
792 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
793 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
794 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
795 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
796 vlan_tag_mask = htons(vlan_tag_mask);
799 if (ulp_rte_prsr_fld_size_validate(params, &idx,
800 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
801 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
802 return BNXT_TF_RC_ERROR;
806 * Copy the rte_flow_item for vlan into hdr_field using Vlan
809 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
811 * The priority field is ignored since OVS is setting it as
812 * wild card match and it is not supported. This is a work
813 * around and shall be addressed in the future.
815 ulp_rte_prsr_fld_mask(params, &idx, size,
818 ULP_PRSR_ACT_MASK_IGNORE);
820 ulp_rte_prsr_fld_mask(params, &idx, size,
823 ULP_PRSR_ACT_DEFAULT);
825 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
826 ulp_rte_prsr_fld_mask(params, &idx, size,
827 ulp_deference_struct(vlan_spec, inner_type),
828 ulp_deference_struct(vlan_mask, inner_type),
829 ULP_PRSR_ACT_MATCH_IGNORE);
831 /* Get the outer tag and inner tag counts */
832 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
833 BNXT_ULP_CF_IDX_O_VTAG_NUM);
834 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
835 BNXT_ULP_CF_IDX_I_VTAG_NUM);
837 /* Update the hdr_bitmap of the vlans */
838 hdr_bit = ¶ms->hdr_bitmap;
839 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
840 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
842 /* Update the vlan tag num */
844 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
846 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
847 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
848 ULP_BITMAP_SET(params->hdr_bitmap.bits,
849 BNXT_ULP_HDR_BIT_OO_VLAN);
850 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
851 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
852 outer_vtag_num == 1) {
853 /* update the vlan tag num */
855 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
857 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
859 ULP_BITMAP_SET(params->hdr_bitmap.bits,
860 BNXT_ULP_HDR_BIT_OI_VLAN);
861 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
862 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
864 /* update the vlan tag num */
866 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
868 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
869 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
870 ULP_BITMAP_SET(params->hdr_bitmap.bits,
871 BNXT_ULP_HDR_BIT_IO_VLAN);
873 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
874 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
875 inner_vtag_num == 1) {
876 /* update the vlan tag num */
878 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
880 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
881 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
882 ULP_BITMAP_SET(params->hdr_bitmap.bits,
883 BNXT_ULP_HDR_BIT_II_VLAN);
886 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
887 return BNXT_TF_RC_ERROR;
889 /* Update the field protocol hdr bitmap */
890 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
891 return BNXT_TF_RC_SUCCESS;
894 /* Function to handle the update of proto header based on field values */
896 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
897 uint8_t proto, uint32_t in_flag)
899 if (proto == IPPROTO_UDP) {
901 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
902 BNXT_ULP_HDR_BIT_I_UDP);
903 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
905 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
906 BNXT_ULP_HDR_BIT_O_UDP);
907 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
909 } else if (proto == IPPROTO_TCP) {
911 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
912 BNXT_ULP_HDR_BIT_I_TCP);
913 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
915 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
916 BNXT_ULP_HDR_BIT_O_TCP);
917 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
919 } else if (proto == IPPROTO_GRE) {
920 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
921 } else if (proto == IPPROTO_ICMP) {
922 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
923 ULP_BITMAP_SET(param->hdr_bitmap.bits,
924 BNXT_ULP_HDR_BIT_I_ICMP);
926 ULP_BITMAP_SET(param->hdr_bitmap.bits,
927 BNXT_ULP_HDR_BIT_O_ICMP);
931 ULP_COMP_FLD_IDX_WR(param,
932 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
934 ULP_COMP_FLD_IDX_WR(param,
935 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
938 ULP_COMP_FLD_IDX_WR(param,
939 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
941 ULP_COMP_FLD_IDX_WR(param,
942 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
948 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
950 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
951 struct ulp_rte_parser_params *params)
953 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
954 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
955 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
956 uint32_t idx = 0, dip_idx = 0;
959 uint32_t inner_flag = 0;
962 /* validate there are no 3rd L3 header */
963 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
965 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
966 return BNXT_TF_RC_ERROR;
969 if (ulp_rte_prsr_fld_size_validate(params, &idx,
970 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
971 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
972 return BNXT_TF_RC_ERROR;
976 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
979 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
980 ulp_rte_prsr_fld_mask(params, &idx, size,
981 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
982 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
983 ULP_PRSR_ACT_DEFAULT);
986 * The tos field is ignored since OVS is setting it as wild card
987 * match and it is not supported. This is a work around and
988 * shall be addressed in the future.
990 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
991 ulp_rte_prsr_fld_mask(params, &idx, size,
992 ulp_deference_struct(ipv4_spec,
993 hdr.type_of_service),
994 ulp_deference_struct(ipv4_mask,
995 hdr.type_of_service),
996 ULP_PRSR_ACT_MASK_IGNORE);
998 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
999 ulp_rte_prsr_fld_mask(params, &idx, size,
1000 ulp_deference_struct(ipv4_spec, hdr.total_length),
1001 ulp_deference_struct(ipv4_mask, hdr.total_length),
1002 ULP_PRSR_ACT_DEFAULT);
1004 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1005 ulp_rte_prsr_fld_mask(params, &idx, size,
1006 ulp_deference_struct(ipv4_spec, hdr.packet_id),
1007 ulp_deference_struct(ipv4_mask, hdr.packet_id),
1008 ULP_PRSR_ACT_DEFAULT);
1010 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1011 ulp_rte_prsr_fld_mask(params, &idx, size,
1012 ulp_deference_struct(ipv4_spec,
1013 hdr.fragment_offset),
1014 ulp_deference_struct(ipv4_mask,
1015 hdr.fragment_offset),
1016 ULP_PRSR_ACT_DEFAULT);
1018 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1019 ulp_rte_prsr_fld_mask(params, &idx, size,
1020 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1021 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1022 ULP_PRSR_ACT_DEFAULT);
1024 /* Ignore proto for matching templates */
1025 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1026 ulp_rte_prsr_fld_mask(params, &idx, size,
1027 ulp_deference_struct(ipv4_spec,
1029 ulp_deference_struct(ipv4_mask,
1031 ULP_PRSR_ACT_MATCH_IGNORE);
1033 proto = ipv4_spec->hdr.next_proto_id;
1035 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1036 ulp_rte_prsr_fld_mask(params, &idx, size,
1037 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1038 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1039 ULP_PRSR_ACT_DEFAULT);
1041 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1042 ulp_rte_prsr_fld_mask(params, &idx, size,
1043 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1044 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1045 ULP_PRSR_ACT_DEFAULT);
1048 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1049 ulp_rte_prsr_fld_mask(params, &idx, size,
1050 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1051 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1052 ULP_PRSR_ACT_DEFAULT);
1054 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1055 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1056 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1057 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1058 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1061 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1062 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1063 /* Update the tunnel offload dest ip offset */
1064 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1068 /* Some of the PMD applications may set the protocol field
1069 * in the IPv4 spec but don't set the mask. So, consider
1070 * the mask in the proto value calculation.
1073 proto &= ipv4_mask->hdr.next_proto_id;
1075 /* Update the field protocol hdr bitmap */
1076 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1077 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1078 return BNXT_TF_RC_SUCCESS;
1081 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1083 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1084 struct ulp_rte_parser_params *params)
1086 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1087 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1088 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1089 uint32_t idx = 0, dip_idx = 0;
1091 uint32_t ver_spec = 0, ver_mask = 0;
1092 uint32_t tc_spec = 0, tc_mask = 0;
1093 uint32_t lab_spec = 0, lab_mask = 0;
1095 uint32_t inner_flag = 0;
1098 /* validate there are no 3rd L3 header */
1099 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1101 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1102 return BNXT_TF_RC_ERROR;
1105 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1106 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1107 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1108 return BNXT_TF_RC_ERROR;
1112 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1116 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1117 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1118 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1119 proto = ipv6_spec->hdr.proto;
1123 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1124 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1125 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1127 /* Some of the PMD applications may set the protocol field
1128 * in the IPv6 spec but don't set the mask. So, consider
1129 * the mask in proto value calculation.
1131 proto &= ipv6_mask->hdr.proto;
1134 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1135 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1136 ULP_PRSR_ACT_DEFAULT);
1138 * The TC and flow label field are ignored since OVS is
1139 * setting it for match and it is not supported.
1140 * This is a work around and
1141 * shall be addressed in the future.
1143 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1144 ULP_PRSR_ACT_MASK_IGNORE);
1145 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1146 ULP_PRSR_ACT_MASK_IGNORE);
1148 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1149 ulp_rte_prsr_fld_mask(params, &idx, size,
1150 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1151 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1152 ULP_PRSR_ACT_DEFAULT);
1154 /* Ignore proto for template matching */
1155 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1156 ulp_rte_prsr_fld_mask(params, &idx, size,
1157 ulp_deference_struct(ipv6_spec, hdr.proto),
1158 ulp_deference_struct(ipv6_mask, hdr.proto),
1159 ULP_PRSR_ACT_MATCH_IGNORE);
1161 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1162 ulp_rte_prsr_fld_mask(params, &idx, size,
1163 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1164 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1165 ULP_PRSR_ACT_DEFAULT);
1167 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1168 ulp_rte_prsr_fld_mask(params, &idx, size,
1169 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1170 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1171 ULP_PRSR_ACT_DEFAULT);
1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1175 ulp_rte_prsr_fld_mask(params, &idx, size,
1176 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1177 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1178 ULP_PRSR_ACT_DEFAULT);
1180 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1181 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1182 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1183 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1184 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1187 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1188 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1189 /* Update the tunnel offload dest ip offset */
1190 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1194 /* Update the field protocol hdr bitmap */
1195 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1196 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1198 return BNXT_TF_RC_SUCCESS;
1201 /* Function to handle the update of proto header based on field values */
1203 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1206 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1207 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1208 BNXT_ULP_HDR_BIT_T_VXLAN);
1209 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1212 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1213 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1214 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1215 BNXT_ULP_HDR_BIT_T_GRE))
1216 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1219 /* Function to handle the parsing of RTE Flow item UDP Header. */
1221 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1222 struct ulp_rte_parser_params *params)
1224 const struct rte_flow_item_udp *udp_spec = item->spec;
1225 const struct rte_flow_item_udp *udp_mask = item->mask;
1226 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1229 uint16_t dport = 0, sport = 0;
1232 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1234 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1235 return BNXT_TF_RC_ERROR;
1239 sport = udp_spec->hdr.src_port;
1240 dport = udp_spec->hdr.dst_port;
1243 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1244 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1245 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1246 return BNXT_TF_RC_ERROR;
1250 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1253 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1254 ulp_rte_prsr_fld_mask(params, &idx, size,
1255 ulp_deference_struct(udp_spec, hdr.src_port),
1256 ulp_deference_struct(udp_mask, hdr.src_port),
1257 ULP_PRSR_ACT_DEFAULT);
1259 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1260 ulp_rte_prsr_fld_mask(params, &idx, size,
1261 ulp_deference_struct(udp_spec, hdr.dst_port),
1262 ulp_deference_struct(udp_mask, hdr.dst_port),
1263 ULP_PRSR_ACT_DEFAULT);
1265 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1266 ulp_rte_prsr_fld_mask(params, &idx, size,
1267 ulp_deference_struct(udp_spec, hdr.dgram_len),
1268 ulp_deference_struct(udp_mask, hdr.dgram_len),
1269 ULP_PRSR_ACT_DEFAULT);
1271 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1272 ulp_rte_prsr_fld_mask(params, &idx, size,
1273 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1274 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1275 ULP_PRSR_ACT_DEFAULT);
1277 /* Set the udp header bitmap and computed l4 header bitmaps */
1278 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1279 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1280 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1282 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1283 (uint32_t)rte_be_to_cpu_16(sport));
1284 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1285 (uint32_t)rte_be_to_cpu_16(dport));
1286 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1288 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1290 if (udp_mask && udp_mask->hdr.src_port)
1291 ULP_COMP_FLD_IDX_WR(params,
1292 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1294 if (udp_mask && udp_mask->hdr.dst_port)
1295 ULP_COMP_FLD_IDX_WR(params,
1296 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1299 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1301 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1302 (uint32_t)rte_be_to_cpu_16(sport));
1303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1304 (uint32_t)rte_be_to_cpu_16(dport));
1305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1309 if (udp_mask && udp_mask->hdr.src_port)
1310 ULP_COMP_FLD_IDX_WR(params,
1311 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1313 if (udp_mask && udp_mask->hdr.dst_port)
1314 ULP_COMP_FLD_IDX_WR(params,
1315 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1318 /* Update the field protocol hdr bitmap */
1319 ulp_rte_l4_proto_type_update(params, dport);
1321 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1322 return BNXT_TF_RC_SUCCESS;
1325 /* Function to handle the parsing of RTE Flow item TCP Header. */
1327 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1328 struct ulp_rte_parser_params *params)
1330 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1331 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1332 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1334 uint16_t dport = 0, sport = 0;
1338 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1340 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1341 return BNXT_TF_RC_ERROR;
1345 sport = tcp_spec->hdr.src_port;
1346 dport = tcp_spec->hdr.dst_port;
1349 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1350 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1351 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1352 return BNXT_TF_RC_ERROR;
1356 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1359 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1360 ulp_rte_prsr_fld_mask(params, &idx, size,
1361 ulp_deference_struct(tcp_spec, hdr.src_port),
1362 ulp_deference_struct(tcp_mask, hdr.src_port),
1363 ULP_PRSR_ACT_DEFAULT);
1365 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1366 ulp_rte_prsr_fld_mask(params, &idx, size,
1367 ulp_deference_struct(tcp_spec, hdr.dst_port),
1368 ulp_deference_struct(tcp_mask, hdr.dst_port),
1369 ULP_PRSR_ACT_DEFAULT);
1371 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1372 ulp_rte_prsr_fld_mask(params, &idx, size,
1373 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1374 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1375 ULP_PRSR_ACT_DEFAULT);
1377 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1378 ulp_rte_prsr_fld_mask(params, &idx, size,
1379 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1380 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1381 ULP_PRSR_ACT_DEFAULT);
1383 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1384 ulp_rte_prsr_fld_mask(params, &idx, size,
1385 ulp_deference_struct(tcp_spec, hdr.data_off),
1386 ulp_deference_struct(tcp_mask, hdr.data_off),
1387 ULP_PRSR_ACT_DEFAULT);
1389 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1390 ulp_rte_prsr_fld_mask(params, &idx, size,
1391 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1392 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1393 ULP_PRSR_ACT_DEFAULT);
1395 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1396 ulp_rte_prsr_fld_mask(params, &idx, size,
1397 ulp_deference_struct(tcp_spec, hdr.rx_win),
1398 ulp_deference_struct(tcp_mask, hdr.rx_win),
1399 ULP_PRSR_ACT_DEFAULT);
1401 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1402 ulp_rte_prsr_fld_mask(params, &idx, size,
1403 ulp_deference_struct(tcp_spec, hdr.cksum),
1404 ulp_deference_struct(tcp_mask, hdr.cksum),
1405 ULP_PRSR_ACT_DEFAULT);
1407 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1408 ulp_rte_prsr_fld_mask(params, &idx, size,
1409 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1410 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1411 ULP_PRSR_ACT_DEFAULT);
1413 /* Set the udp header bitmap and computed l4 header bitmaps */
1414 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1415 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1416 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1417 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1418 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1419 (uint32_t)rte_be_to_cpu_16(sport));
1420 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1421 (uint32_t)rte_be_to_cpu_16(dport));
1422 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1424 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1426 if (tcp_mask && tcp_mask->hdr.src_port)
1427 ULP_COMP_FLD_IDX_WR(params,
1428 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1430 if (tcp_mask && tcp_mask->hdr.dst_port)
1431 ULP_COMP_FLD_IDX_WR(params,
1432 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1435 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1438 (uint32_t)rte_be_to_cpu_16(sport));
1439 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1440 (uint32_t)rte_be_to_cpu_16(dport));
1441 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1443 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1445 if (tcp_mask && tcp_mask->hdr.src_port)
1446 ULP_COMP_FLD_IDX_WR(params,
1447 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1449 if (tcp_mask && tcp_mask->hdr.dst_port)
1450 ULP_COMP_FLD_IDX_WR(params,
1451 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1454 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1455 return BNXT_TF_RC_SUCCESS;
1458 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1460 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1461 struct ulp_rte_parser_params *params)
1463 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1464 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1465 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1469 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1470 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1471 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1472 return BNXT_TF_RC_ERROR;
1476 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1479 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1480 ulp_rte_prsr_fld_mask(params, &idx, size,
1481 ulp_deference_struct(vxlan_spec, flags),
1482 ulp_deference_struct(vxlan_mask, flags),
1483 ULP_PRSR_ACT_DEFAULT);
1485 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1486 ulp_rte_prsr_fld_mask(params, &idx, size,
1487 ulp_deference_struct(vxlan_spec, rsvd0),
1488 ulp_deference_struct(vxlan_mask, rsvd0),
1489 ULP_PRSR_ACT_DEFAULT);
1491 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1492 ulp_rte_prsr_fld_mask(params, &idx, size,
1493 ulp_deference_struct(vxlan_spec, vni),
1494 ulp_deference_struct(vxlan_mask, vni),
1495 ULP_PRSR_ACT_DEFAULT);
1497 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1498 ulp_rte_prsr_fld_mask(params, &idx, size,
1499 ulp_deference_struct(vxlan_spec, rsvd1),
1500 ulp_deference_struct(vxlan_mask, rsvd1),
1501 ULP_PRSR_ACT_DEFAULT);
1503 /* Update the hdr_bitmap with vxlan */
1504 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1505 ulp_rte_l4_proto_type_update(params, 0);
1506 return BNXT_TF_RC_SUCCESS;
1509 /* Function to handle the parsing of RTE Flow item GRE Header. */
1511 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1512 struct ulp_rte_parser_params *params)
1514 const struct rte_flow_item_gre *gre_spec = item->spec;
1515 const struct rte_flow_item_gre *gre_mask = item->mask;
1516 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1520 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1521 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1522 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1523 return BNXT_TF_RC_ERROR;
1526 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1527 ulp_rte_prsr_fld_mask(params, &idx, size,
1528 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1529 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1530 ULP_PRSR_ACT_DEFAULT);
1532 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1533 ulp_rte_prsr_fld_mask(params, &idx, size,
1534 ulp_deference_struct(gre_spec, protocol),
1535 ulp_deference_struct(gre_mask, protocol),
1536 ULP_PRSR_ACT_DEFAULT);
1538 /* Update the hdr_bitmap with GRE */
1539 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1540 ulp_rte_l4_proto_type_update(params, 0);
1541 return BNXT_TF_RC_SUCCESS;
1544 /* Function to handle the parsing of RTE Flow item ANY. */
1546 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1547 struct ulp_rte_parser_params *params __rte_unused)
1549 return BNXT_TF_RC_SUCCESS;
1552 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1554 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1555 struct ulp_rte_parser_params *params)
1557 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1558 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1559 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1563 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1564 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1565 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1566 return BNXT_TF_RC_ERROR;
1569 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1570 ulp_rte_prsr_fld_mask(params, &idx, size,
1571 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1572 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1573 ULP_PRSR_ACT_DEFAULT);
1575 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1576 ulp_rte_prsr_fld_mask(params, &idx, size,
1577 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1578 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1579 ULP_PRSR_ACT_DEFAULT);
1581 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1582 ulp_rte_prsr_fld_mask(params, &idx, size,
1583 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1584 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1585 ULP_PRSR_ACT_DEFAULT);
1587 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1588 ulp_rte_prsr_fld_mask(params, &idx, size,
1589 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1590 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1591 ULP_PRSR_ACT_DEFAULT);
1593 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1594 ulp_rte_prsr_fld_mask(params, &idx, size,
1595 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1596 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1597 ULP_PRSR_ACT_DEFAULT);
1599 /* Update the hdr_bitmap with ICMP */
1600 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1601 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1603 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1604 return BNXT_TF_RC_SUCCESS;
1607 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1609 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1610 struct ulp_rte_parser_params *params)
1612 const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1613 const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1614 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1618 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1619 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1620 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1621 return BNXT_TF_RC_ERROR;
1624 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1625 ulp_rte_prsr_fld_mask(params, &idx, size,
1626 ulp_deference_struct(icmp_spec, type),
1627 ulp_deference_struct(icmp_mask, type),
1628 ULP_PRSR_ACT_DEFAULT);
1630 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1631 ulp_rte_prsr_fld_mask(params, &idx, size,
1632 ulp_deference_struct(icmp_spec, code),
1633 ulp_deference_struct(icmp_mask, code),
1634 ULP_PRSR_ACT_DEFAULT);
1636 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1637 ulp_rte_prsr_fld_mask(params, &idx, size,
1638 ulp_deference_struct(icmp_spec, checksum),
1639 ulp_deference_struct(icmp_mask, checksum),
1640 ULP_PRSR_ACT_DEFAULT);
1642 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1643 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1644 return BNXT_TF_RC_ERROR;
1647 /* Update the hdr_bitmap with ICMP */
1648 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1649 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1651 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1652 return BNXT_TF_RC_SUCCESS;
1655 /* Function to handle the parsing of RTE Flow item void Header */
1657 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1658 struct ulp_rte_parser_params *params __rte_unused)
1660 return BNXT_TF_RC_SUCCESS;
1663 /* Function to handle the parsing of RTE Flow action void Header. */
1665 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1666 struct ulp_rte_parser_params *params __rte_unused)
1668 return BNXT_TF_RC_SUCCESS;
1671 /* Function to handle the parsing of RTE Flow action Mark Header. */
1673 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1674 struct ulp_rte_parser_params *param)
1676 const struct rte_flow_action_mark *mark;
1677 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1680 mark = action_item->conf;
1682 mark_id = tfp_cpu_to_be_32(mark->id);
1683 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1684 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1686 /* Update the hdr_bitmap with vxlan */
1687 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1688 return BNXT_TF_RC_SUCCESS;
1690 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1691 return BNXT_TF_RC_ERROR;
1694 /* Function to handle the parsing of RTE Flow action RSS Header. */
1696 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1697 struct ulp_rte_parser_params *param)
1699 const struct rte_flow_action_rss *rss;
1700 struct ulp_rte_act_prop *ap = ¶m->act_prop;
1702 if (action_item == NULL || action_item->conf == NULL) {
1703 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1704 return BNXT_TF_RC_ERROR;
1707 rss = action_item->conf;
1708 /* Copy the rss into the specific action properties */
1709 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1710 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1711 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1712 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1713 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1714 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1716 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1717 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1718 return BNXT_TF_RC_ERROR;
1720 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1723 /* set the RSS action header bit */
1724 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1726 return BNXT_TF_RC_SUCCESS;
1729 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1731 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1732 struct ulp_rte_parser_params *params)
1734 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1735 const struct rte_flow_item *item;
1736 const struct rte_flow_item_eth *eth_spec;
1737 const struct rte_flow_item_ipv4 *ipv4_spec;
1738 const struct rte_flow_item_ipv6 *ipv6_spec;
1739 struct rte_flow_item_vxlan vxlan_spec;
1740 uint32_t vlan_num = 0, vlan_size = 0;
1741 uint32_t ip_size = 0, ip_type = 0;
1742 uint32_t vxlan_size = 0;
1744 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1745 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1747 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1748 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1750 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1751 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1752 const uint8_t *tmp_buff;
1754 vxlan_encap = action_item->conf;
1756 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1757 return BNXT_TF_RC_ERROR;
1760 item = vxlan_encap->definition;
1762 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1763 return BNXT_TF_RC_ERROR;
1766 if (!ulp_rte_item_skip_void(&item, 0))
1767 return BNXT_TF_RC_ERROR;
1769 /* must have ethernet header */
1770 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1771 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1772 return BNXT_TF_RC_ERROR;
1774 eth_spec = item->spec;
1775 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1776 ulp_encap_buffer_copy(buff,
1777 eth_spec->dst.addr_bytes,
1778 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1779 ULP_BUFFER_ALIGN_8_BYTE);
1781 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1782 ulp_encap_buffer_copy(buff,
1783 eth_spec->src.addr_bytes,
1784 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1785 ULP_BUFFER_ALIGN_8_BYTE);
1787 /* Goto the next item */
1788 if (!ulp_rte_item_skip_void(&item, 1))
1789 return BNXT_TF_RC_ERROR;
1791 /* May have vlan header */
1792 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1794 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1795 ulp_encap_buffer_copy(buff,
1797 sizeof(struct rte_flow_item_vlan),
1798 ULP_BUFFER_ALIGN_8_BYTE);
1800 if (!ulp_rte_item_skip_void(&item, 1))
1801 return BNXT_TF_RC_ERROR;
1804 /* may have two vlan headers */
1805 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1807 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1808 sizeof(struct rte_flow_item_vlan)],
1810 sizeof(struct rte_flow_item_vlan));
1811 if (!ulp_rte_item_skip_void(&item, 1))
1812 return BNXT_TF_RC_ERROR;
1814 /* Update the vlan count and size of more than one */
1816 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1817 vlan_num = tfp_cpu_to_be_32(vlan_num);
1818 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1821 vlan_size = tfp_cpu_to_be_32(vlan_size);
1822 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1827 /* L3 must be IPv4, IPv6 */
1828 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1829 ipv4_spec = item->spec;
1830 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1832 /* copy the ipv4 details */
1833 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1834 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1835 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1836 ulp_encap_buffer_copy(buff,
1838 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1839 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1840 ULP_BUFFER_ALIGN_8_BYTE);
1842 /* Total length being ignored in the ip hdr. */
1843 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1844 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1845 ulp_encap_buffer_copy(buff,
1847 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1848 ULP_BUFFER_ALIGN_8_BYTE);
1849 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1850 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1851 ulp_encap_buffer_copy(buff,
1852 &ipv4_spec->hdr.version_ihl,
1853 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1854 ULP_BUFFER_ALIGN_8_BYTE);
1857 /* Update the dst ip address in ip encap buffer */
1858 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1859 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1860 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1861 ulp_encap_buffer_copy(buff,
1862 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1863 sizeof(ipv4_spec->hdr.dst_addr),
1864 ULP_BUFFER_ALIGN_8_BYTE);
1866 /* Update the src ip address */
1867 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1868 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1869 sizeof(ipv4_spec->hdr.src_addr)];
1870 ulp_encap_buffer_copy(buff,
1871 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1872 sizeof(ipv4_spec->hdr.src_addr),
1873 ULP_BUFFER_ALIGN_8_BYTE);
1875 /* Update the ip size details */
1876 ip_size = tfp_cpu_to_be_32(ip_size);
1877 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1878 &ip_size, sizeof(uint32_t));
1880 /* update the ip type */
1881 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1882 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1883 &ip_type, sizeof(uint32_t));
1885 /* update the computed field to notify it is ipv4 header */
1886 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1889 if (!ulp_rte_item_skip_void(&item, 1))
1890 return BNXT_TF_RC_ERROR;
1891 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1892 ipv6_spec = item->spec;
1893 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1895 /* copy the ipv6 details */
1896 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1897 if (ulp_buffer_is_empty(tmp_buff,
1898 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1899 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1900 ulp_encap_buffer_copy(buff,
1902 sizeof(def_ipv6_hdr),
1903 ULP_BUFFER_ALIGN_8_BYTE);
1905 /* The payload length being ignored in the ip hdr. */
1906 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1907 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1908 ulp_encap_buffer_copy(buff,
1910 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1911 ULP_BUFFER_ALIGN_8_BYTE);
1912 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1913 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1914 BNXT_ULP_ENCAP_IPV6_DO];
1915 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1916 ulp_encap_buffer_copy(buff,
1918 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1919 ULP_BUFFER_ALIGN_8_BYTE);
1921 /* Update the dst ip address in ip encap buffer */
1922 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1923 sizeof(def_ipv6_hdr)];
1924 ulp_encap_buffer_copy(buff,
1925 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1926 sizeof(ipv6_spec->hdr.dst_addr),
1927 ULP_BUFFER_ALIGN_8_BYTE);
1929 /* Update the src ip address */
1930 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1931 ulp_encap_buffer_copy(buff,
1932 (const uint8_t *)ipv6_spec->hdr.src_addr,
1933 sizeof(ipv6_spec->hdr.src_addr),
1934 ULP_BUFFER_ALIGN_16_BYTE);
1936 /* Update the ip size details */
1937 ip_size = tfp_cpu_to_be_32(ip_size);
1938 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1939 &ip_size, sizeof(uint32_t));
1941 /* update the ip type */
1942 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1943 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1944 &ip_type, sizeof(uint32_t));
1946 /* update the computed field to notify it is ipv6 header */
1947 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1950 if (!ulp_rte_item_skip_void(&item, 1))
1951 return BNXT_TF_RC_ERROR;
1953 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1954 return BNXT_TF_RC_ERROR;
1958 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1959 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1960 return BNXT_TF_RC_ERROR;
1962 /* copy the udp details */
1963 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1964 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1965 ULP_BUFFER_ALIGN_8_BYTE);
1967 if (!ulp_rte_item_skip_void(&item, 1))
1968 return BNXT_TF_RC_ERROR;
1971 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1972 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1973 return BNXT_TF_RC_ERROR;
1975 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1976 /* copy the vxlan details */
1977 memcpy(&vxlan_spec, item->spec, vxlan_size);
1978 vxlan_spec.flags = 0x08;
1979 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1980 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1981 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1982 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1984 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1985 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1986 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1987 (const uint8_t *)&vxlan_spec.vni,
1988 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1990 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1991 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1992 &vxlan_size, sizeof(uint32_t));
1994 /* update the hdr_bitmap with vxlan */
1995 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1996 return BNXT_TF_RC_SUCCESS;
1999 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2001 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2003 struct ulp_rte_parser_params *params)
2005 /* update the hdr_bitmap with vxlan */
2006 ULP_BITMAP_SET(params->act_bitmap.bits,
2007 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2008 /* Update computational field with tunnel decap info */
2009 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2010 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2011 return BNXT_TF_RC_SUCCESS;
2014 /* Function to handle the parsing of RTE Flow action drop Header. */
2016 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2017 struct ulp_rte_parser_params *params)
2019 /* Update the hdr_bitmap with drop */
2020 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2021 return BNXT_TF_RC_SUCCESS;
2024 /* Function to handle the parsing of RTE Flow action count. */
2026 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2027 struct ulp_rte_parser_params *params)
2029 const struct rte_flow_action_count *act_count;
2030 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
2032 act_count = action_item->conf;
2034 if (act_count->shared) {
2036 "Parse Error:Shared count not supported\n");
2037 return BNXT_TF_RC_PARSE_ERR;
2039 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2041 BNXT_ULP_ACT_PROP_SZ_COUNT);
2044 /* Update the hdr_bitmap with count */
2045 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2046 return BNXT_TF_RC_SUCCESS;
2049 /* Function to handle the parsing of action ports. */
2051 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2054 enum bnxt_ulp_direction_type dir;
2057 struct ulp_rte_act_prop *act = ¶m->act_prop;
2058 enum bnxt_ulp_intf_type port_type;
2061 /* Get the direction */
2062 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2063 if (dir == BNXT_ULP_DIR_EGRESS) {
2064 /* For egress direction, fill vport */
2065 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2066 return BNXT_TF_RC_ERROR;
2069 pid = rte_cpu_to_be_32(pid);
2070 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2071 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2073 /* For ingress direction, fill vnic */
2074 port_type = ULP_COMP_FLD_IDX_RD(param,
2075 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2076 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2077 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2079 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2081 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2083 return BNXT_TF_RC_ERROR;
2086 pid = rte_cpu_to_be_32(pid);
2087 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2088 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2091 /* Update the action port set bit */
2092 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2093 return BNXT_TF_RC_SUCCESS;
2096 /* Function to handle the parsing of RTE Flow action PF. */
2098 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2099 struct ulp_rte_parser_params *params)
2103 enum bnxt_ulp_intf_type intf_type;
2105 /* Get the port id of the current device */
2106 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2108 /* Get the port db ifindex */
2109 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2111 BNXT_TF_DBG(ERR, "Invalid port id\n");
2112 return BNXT_TF_RC_ERROR;
2115 /* Check the port is PF port */
2116 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2117 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2118 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2119 return BNXT_TF_RC_ERROR;
2121 /* Update the action properties */
2122 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2123 return ulp_rte_parser_act_port_set(params, ifindex);
2126 /* Function to handle the parsing of RTE Flow action VF. */
2128 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2129 struct ulp_rte_parser_params *params)
2131 const struct rte_flow_action_vf *vf_action;
2132 enum bnxt_ulp_intf_type intf_type;
2136 vf_action = action_item->conf;
2138 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2139 return BNXT_TF_RC_PARSE_ERR;
2142 if (vf_action->original) {
2143 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2144 return BNXT_TF_RC_PARSE_ERR;
2147 bp = bnxt_pmd_get_bp(params->port_id);
2149 BNXT_TF_DBG(ERR, "Invalid bp\n");
2150 return BNXT_TF_RC_ERROR;
2153 /* vf_action->id is a logical number which in this case is an
2154 * offset from the first VF. So, to get the absolute VF id, the
2155 * offset must be added to the absolute first vf id of that port.
2157 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2161 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2162 return BNXT_TF_RC_ERROR;
2164 /* Check the port is VF port */
2165 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2166 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2167 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2168 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2169 return BNXT_TF_RC_ERROR;
2172 /* Update the action properties */
2173 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2174 return ulp_rte_parser_act_port_set(params, ifindex);
2177 /* Function to handle the parsing of RTE Flow action port_id. */
2179 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2180 struct ulp_rte_parser_params *param)
2182 const struct rte_flow_action_port_id *port_id = act_item->conf;
2184 enum bnxt_ulp_intf_type intf_type;
2188 "ParseErr: Invalid Argument\n");
2189 return BNXT_TF_RC_PARSE_ERR;
2191 if (port_id->original) {
2193 "ParseErr:Portid Original not supported\n");
2194 return BNXT_TF_RC_PARSE_ERR;
2197 /* Get the port db ifindex */
2198 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2200 BNXT_TF_DBG(ERR, "Invalid port id\n");
2201 return BNXT_TF_RC_ERROR;
2204 /* Get the intf type */
2205 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2207 BNXT_TF_DBG(ERR, "Invalid port type\n");
2208 return BNXT_TF_RC_ERROR;
2211 /* Set the action port */
2212 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2213 return ulp_rte_parser_act_port_set(param, ifindex);
2216 /* Function to handle the parsing of RTE Flow action phy_port. */
2218 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2219 struct ulp_rte_parser_params *prm)
2221 const struct rte_flow_action_phy_port *phy_port;
2225 enum bnxt_ulp_direction_type dir;
2227 phy_port = action_item->conf;
2230 "ParseErr: Invalid Argument\n");
2231 return BNXT_TF_RC_PARSE_ERR;
2234 if (phy_port->original) {
2236 "Parse Err:Port Original not supported\n");
2237 return BNXT_TF_RC_PARSE_ERR;
2239 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2240 if (dir != BNXT_ULP_DIR_EGRESS) {
2242 "Parse Err:Phy ports are valid only for egress\n");
2243 return BNXT_TF_RC_PARSE_ERR;
2245 /* Get the physical port details from port db */
2246 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2249 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2254 pid = rte_cpu_to_be_32(pid);
2255 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2256 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2258 /* Update the action port set bit */
2259 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2260 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2261 BNXT_ULP_INTF_TYPE_PHY_PORT);
2262 return BNXT_TF_RC_SUCCESS;
2265 /* Function to handle the parsing of RTE Flow action pop vlan. */
2267 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2268 struct ulp_rte_parser_params *params)
2270 /* Update the act_bitmap with pop */
2271 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2272 return BNXT_TF_RC_SUCCESS;
2275 /* Function to handle the parsing of RTE Flow action push vlan. */
2277 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2278 struct ulp_rte_parser_params *params)
2280 const struct rte_flow_action_of_push_vlan *push_vlan;
2282 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2284 push_vlan = action_item->conf;
2286 ethertype = push_vlan->ethertype;
2287 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2289 "Parse Err: Ethertype not supported\n");
2290 return BNXT_TF_RC_PARSE_ERR;
2292 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2293 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2294 /* Update the hdr_bitmap with push vlan */
2295 ULP_BITMAP_SET(params->act_bitmap.bits,
2296 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2297 return BNXT_TF_RC_SUCCESS;
2299 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2300 return BNXT_TF_RC_ERROR;
2303 /* Function to handle the parsing of RTE Flow action set vlan id. */
2305 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2306 struct ulp_rte_parser_params *params)
2308 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2310 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2312 vlan_vid = action_item->conf;
2313 if (vlan_vid && vlan_vid->vlan_vid) {
2314 vid = vlan_vid->vlan_vid;
2315 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2316 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2317 /* Update the hdr_bitmap with vlan vid */
2318 ULP_BITMAP_SET(params->act_bitmap.bits,
2319 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2320 return BNXT_TF_RC_SUCCESS;
2322 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2323 return BNXT_TF_RC_ERROR;
2326 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2328 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2329 struct ulp_rte_parser_params *params)
2331 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2333 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2335 vlan_pcp = action_item->conf;
2337 pcp = vlan_pcp->vlan_pcp;
2338 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2339 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2340 /* Update the hdr_bitmap with vlan vid */
2341 ULP_BITMAP_SET(params->act_bitmap.bits,
2342 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2343 return BNXT_TF_RC_SUCCESS;
2345 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2346 return BNXT_TF_RC_ERROR;
2349 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2351 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2352 struct ulp_rte_parser_params *params)
2354 const struct rte_flow_action_set_ipv4 *set_ipv4;
2355 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2357 set_ipv4 = action_item->conf;
2359 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2360 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2361 /* Update the hdr_bitmap with set ipv4 src */
2362 ULP_BITMAP_SET(params->act_bitmap.bits,
2363 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2364 return BNXT_TF_RC_SUCCESS;
2366 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2367 return BNXT_TF_RC_ERROR;
2370 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2372 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2373 struct ulp_rte_parser_params *params)
2375 const struct rte_flow_action_set_ipv4 *set_ipv4;
2376 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2378 set_ipv4 = action_item->conf;
2380 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2381 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2382 /* Update the hdr_bitmap with set ipv4 dst */
2383 ULP_BITMAP_SET(params->act_bitmap.bits,
2384 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2385 return BNXT_TF_RC_SUCCESS;
2387 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2388 return BNXT_TF_RC_ERROR;
2391 /* Function to handle the parsing of RTE Flow action set tp src.*/
2393 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2394 struct ulp_rte_parser_params *params)
2396 const struct rte_flow_action_set_tp *set_tp;
2397 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2399 set_tp = action_item->conf;
2401 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2402 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2403 /* Update the hdr_bitmap with set tp src */
2404 ULP_BITMAP_SET(params->act_bitmap.bits,
2405 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2406 return BNXT_TF_RC_SUCCESS;
2409 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2410 return BNXT_TF_RC_ERROR;
2413 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2415 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2416 struct ulp_rte_parser_params *params)
2418 const struct rte_flow_action_set_tp *set_tp;
2419 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2421 set_tp = action_item->conf;
2423 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2424 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2425 /* Update the hdr_bitmap with set tp dst */
2426 ULP_BITMAP_SET(params->act_bitmap.bits,
2427 BNXT_ULP_ACT_BIT_SET_TP_DST);
2428 return BNXT_TF_RC_SUCCESS;
2431 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2432 return BNXT_TF_RC_ERROR;
2435 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2437 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2438 struct ulp_rte_parser_params *params)
2440 /* Update the act_bitmap with dec ttl */
2441 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2442 return BNXT_TF_RC_SUCCESS;
2445 /* Function to handle the parsing of RTE Flow action JUMP */
2447 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2448 struct ulp_rte_parser_params *params)
2450 /* Update the act_bitmap with dec ttl */
2451 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2452 return BNXT_TF_RC_SUCCESS;
2456 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2457 struct ulp_rte_parser_params *params)
2459 const struct rte_flow_action_sample *sample;
2462 sample = action_item->conf;
2464 /* if SAMPLE bit is set it means this sample action is nested within the
2465 * actions of another sample action; this is not allowed
2467 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2468 BNXT_ULP_ACT_BIT_SAMPLE))
2469 return BNXT_TF_RC_ERROR;
2471 /* a sample action is only allowed as a shared action */
2472 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2473 BNXT_ULP_ACT_BIT_SHARED))
2474 return BNXT_TF_RC_ERROR;
2476 /* only a ratio of 1 i.e. 100% is supported */
2477 if (sample->ratio != 1)
2478 return BNXT_TF_RC_ERROR;
2480 if (!sample->actions)
2481 return BNXT_TF_RC_ERROR;
2483 /* parse the nested actions for a sample action */
2484 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2485 if (ret == BNXT_TF_RC_SUCCESS)
2486 /* Update the act_bitmap with sample */
2487 ULP_BITMAP_SET(params->act_bitmap.bits,
2488 BNXT_ULP_ACT_BIT_SAMPLE);
2493 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2495 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2496 struct ulp_rte_parser_params *params)
2498 /* Set the F1 flow header bit */
2499 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2500 return ulp_rte_vxlan_decap_act_handler(action_item, params);
2503 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2505 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2506 struct ulp_rte_parser_params *params)
2509 /* Set the F2 flow header bit */
2510 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2511 return ulp_rte_vxlan_decap_act_handler(NULL, params);