1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
19 #include "ulp_template_db_tbl.h"
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK 0x700
24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN 4789
27 /* Utility function to skip the void items. */
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42 /* Utility function to update the field_bitmap */
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
46 enum bnxt_ulp_prsr_action prsr_act)
48 struct ulp_rte_hdr_field *field;
50 field = ¶ms->hdr_field[idx];
51 if (ulp_bitmap_notzero(field->mask, field->size)) {
52 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
53 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
54 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
56 if (!ulp_bitmap_is_ones(field->mask, field->size))
57 ULP_COMP_FLD_IDX_WR(params,
58 BNXT_ULP_CF_IDX_WC_MATCH, 1);
60 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
64 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
65 /* Utility function to copy field spec and masks items */
67 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
70 const void *spec_buff,
71 const void *mask_buff,
72 enum bnxt_ulp_prsr_action prsr_act)
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 /* update the field size */
79 /* copy the mask specifications only if mask is not null */
80 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
81 memcpy(field->mask, mask_buff, size);
82 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
85 /* copy the protocol specifications only if mask is not null*/
86 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
87 memcpy(field->spec, spec_buff, size);
89 /* Increment the index */
93 /* Utility function to copy field spec and masks items */
95 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
99 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
100 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
103 *idx = params->field_idx;
104 params->field_idx += size;
109 * Function to handle the parsing of RTE Flows and placing
110 * the RTE flow items into the ulp structures.
113 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
114 struct ulp_rte_parser_params *params)
116 const struct rte_flow_item *item = pattern;
117 struct bnxt_ulp_rte_hdr_info *hdr_info;
119 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
121 /* Set the computed flags for no vlan tags before parsing */
122 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
125 /* Parse all the items in the pattern */
126 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
127 /* get the header information from the flow_hdr_info table */
128 hdr_info = &ulp_hdr_info[item->type];
129 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
131 "Truflow parser does not support type %d\n",
133 return BNXT_TF_RC_PARSE_ERR;
134 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
135 /* call the registered callback handler */
136 if (hdr_info->proto_hdr_func) {
137 if (hdr_info->proto_hdr_func(item, params) !=
138 BNXT_TF_RC_SUCCESS) {
139 return BNXT_TF_RC_ERROR;
145 /* update the implied SVIF */
146 return ulp_rte_parser_implicit_match_port_process(params);
150 * Function to handle the parsing of RTE Flows and placing
151 * the RTE flow actions into the ulp structures.
154 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
155 struct ulp_rte_parser_params *params)
157 const struct rte_flow_action *action_item = actions;
158 struct bnxt_ulp_rte_act_info *hdr_info;
160 /* Parse all the items in the pattern */
161 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
162 /* get the header information from the flow_hdr_info table */
163 hdr_info = &ulp_act_info[action_item->type];
164 if (hdr_info->act_type ==
165 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
167 "Truflow parser does not support act %u\n",
169 return BNXT_TF_RC_ERROR;
170 } else if (hdr_info->act_type ==
171 BNXT_ULP_ACT_TYPE_SUPPORTED) {
172 /* call the registered callback handler */
173 if (hdr_info->proto_act_func) {
174 if (hdr_info->proto_act_func(action_item,
176 BNXT_TF_RC_SUCCESS) {
177 return BNXT_TF_RC_ERROR;
183 /* update the implied port details */
184 ulp_rte_parser_implicit_act_port_process(params);
185 return BNXT_TF_RC_SUCCESS;
189 * Function to handle the post processing of the computed
190 * fields for the interface.
193 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
196 uint16_t port_id, parif;
198 enum bnxt_ulp_direction_type dir;
200 /* get the direction details */
201 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
203 /* read the port id details */
204 port_id = ULP_COMP_FLD_IDX_RD(params,
205 BNXT_ULP_CF_IDX_INCOMING_IF);
206 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
209 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
213 if (dir == BNXT_ULP_DIR_INGRESS) {
215 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
216 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
217 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
220 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
223 /* Get the match port type */
224 mtype = ULP_COMP_FLD_IDX_RD(params,
225 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
226 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
227 ULP_COMP_FLD_IDX_WR(params,
228 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
230 /* Set VF func PARIF */
231 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
232 BNXT_ULP_VF_FUNC_PARIF,
235 "ParseErr:ifindex is not valid\n");
238 ULP_COMP_FLD_IDX_WR(params,
239 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
243 /* Set DRV func PARIF */
244 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
245 BNXT_ULP_DRV_FUNC_PARIF,
248 "ParseErr:ifindex is not valid\n");
251 ULP_COMP_FLD_IDX_WR(params,
252 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
255 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
256 ULP_COMP_FLD_IDX_WR(params,
257 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
264 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
266 enum bnxt_ulp_intf_type match_port_type, act_port_type;
267 enum bnxt_ulp_direction_type dir;
268 uint32_t act_port_set;
270 /* Get the computed details */
271 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
272 match_port_type = ULP_COMP_FLD_IDX_RD(params,
273 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
274 act_port_type = ULP_COMP_FLD_IDX_RD(params,
275 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
276 act_port_set = ULP_COMP_FLD_IDX_RD(params,
277 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
279 /* set the flow direction in the proto and action header */
280 if (dir == BNXT_ULP_DIR_EGRESS) {
281 ULP_BITMAP_SET(params->hdr_bitmap.bits,
282 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
283 ULP_BITMAP_SET(params->act_bitmap.bits,
284 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
287 /* calculate the VF to VF flag */
288 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
289 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
290 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
292 /* Update the decrement ttl computational fields */
293 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
294 BNXT_ULP_ACT_BIT_DEC_TTL)) {
296 * Check that vxlan proto is included and vxlan decap
297 * action is not set then decrement tunnel ttl.
298 * Similarly add GRE and NVGRE in future.
300 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
301 BNXT_ULP_HDR_BIT_T_VXLAN) &&
302 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
303 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
304 ULP_COMP_FLD_IDX_WR(params,
305 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
307 ULP_COMP_FLD_IDX_WR(params,
308 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
312 /* Merge the hdr_fp_bit into the proto header bit */
313 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
315 /* Update the comp fld fid */
316 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
318 /* Update the computed interface parameters */
319 bnxt_ulp_comp_fld_intf_update(params);
321 /* TBD: Handle the flow rejection scenarios */
326 * Function to handle the post processing of the parsing details
329 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
331 ulp_post_process_normal_flow(params);
332 return ulp_post_process_tun_flow(params);
336 * Function to compute the flow direction based on the match port details
339 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
341 enum bnxt_ulp_intf_type match_port_type;
343 /* Get the match port type */
344 match_port_type = ULP_COMP_FLD_IDX_RD(params,
345 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
347 /* If ingress flow and matchport is vf rep then dir is egress*/
348 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
349 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
350 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
351 BNXT_ULP_DIR_EGRESS);
353 /* Assign the input direction */
354 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
355 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
356 BNXT_ULP_DIR_INGRESS);
358 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
359 BNXT_ULP_DIR_EGRESS);
363 /* Function to handle the parsing of RTE Flow item PF Header. */
365 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
370 enum bnxt_ulp_direction_type dir;
371 struct ulp_rte_hdr_field *hdr_field;
372 enum bnxt_ulp_svif_type svif_type;
373 enum bnxt_ulp_intf_type port_type;
375 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
376 BNXT_ULP_INVALID_SVIF_VAL) {
378 "SVIF already set,multiple source not support'd\n");
379 return BNXT_TF_RC_ERROR;
382 /* Get port type details */
383 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
384 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
385 BNXT_TF_DBG(ERR, "Invalid port type\n");
386 return BNXT_TF_RC_ERROR;
389 /* Update the match port type */
390 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
392 /* compute the direction */
393 bnxt_ulp_rte_parser_direction_compute(params);
395 /* Get the computed direction */
396 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
397 if (dir == BNXT_ULP_DIR_INGRESS) {
398 svif_type = BNXT_ULP_PHY_PORT_SVIF;
400 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
401 svif_type = BNXT_ULP_VF_FUNC_SVIF;
403 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
405 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
407 svif = rte_cpu_to_be_16(svif);
408 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
409 memcpy(hdr_field->spec, &svif, sizeof(svif));
410 memcpy(hdr_field->mask, &mask, sizeof(mask));
411 hdr_field->size = sizeof(svif);
412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
413 rte_be_to_cpu_16(svif));
414 return BNXT_TF_RC_SUCCESS;
417 /* Function to handle the parsing of the RTE port id */
419 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
421 uint16_t port_id = 0;
422 uint16_t svif_mask = 0xFFFF;
424 int32_t rc = BNXT_TF_RC_ERROR;
426 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
427 BNXT_ULP_INVALID_SVIF_VAL)
428 return BNXT_TF_RC_SUCCESS;
430 /* SVIF not set. So get the port id */
431 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
433 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
436 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
440 /* Update the SVIF details */
441 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
445 /* Function to handle the implicit action port id */
447 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
449 struct rte_flow_action action_item = {0};
450 struct rte_flow_action_port_id port_id = {0};
452 /* Read the action port set bit */
453 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
454 /* Already set, so just exit */
455 return BNXT_TF_RC_SUCCESS;
457 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
458 action_item.conf = &port_id;
460 /* Update the action port based on incoming port */
461 ulp_rte_port_id_act_handler(&action_item, params);
463 /* Reset the action port set bit */
464 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
465 return BNXT_TF_RC_SUCCESS;
468 /* Function to handle the parsing of RTE Flow item PF Header. */
470 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
471 struct ulp_rte_parser_params *params)
473 uint16_t port_id = 0;
474 uint16_t svif_mask = 0xFFFF;
477 /* Get the implicit port id */
478 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
480 /* perform the conversion from dpdk port to bnxt ifindex */
481 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
484 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
485 return BNXT_TF_RC_ERROR;
488 /* Update the SVIF details */
489 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
492 /* Function to handle the parsing of RTE Flow item VF Header. */
494 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
495 struct ulp_rte_parser_params *params)
497 const struct rte_flow_item_vf *vf_spec = item->spec;
498 const struct rte_flow_item_vf *vf_mask = item->mask;
501 int32_t rc = BNXT_TF_RC_PARSE_ERR;
503 /* Get VF rte_flow_item for Port details */
505 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
509 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
514 /* perform the conversion from VF Func id to bnxt ifindex */
515 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
518 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521 /* Update the SVIF details */
522 return ulp_rte_parser_svif_set(params, ifindex, mask);
525 /* Function to handle the parsing of RTE Flow item port id Header. */
527 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
528 struct ulp_rte_parser_params *params)
530 const struct rte_flow_item_port_id *port_spec = item->spec;
531 const struct rte_flow_item_port_id *port_mask = item->mask;
533 int32_t rc = BNXT_TF_RC_PARSE_ERR;
537 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
541 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
544 mask = port_mask->id;
546 /* perform the conversion from dpdk port to bnxt ifindex */
547 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
550 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
553 /* Update the SVIF details */
554 return ulp_rte_parser_svif_set(params, ifindex, mask);
557 /* Function to handle the parsing of RTE Flow item phy port Header. */
559 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
560 struct ulp_rte_parser_params *params)
562 const struct rte_flow_item_phy_port *port_spec = item->spec;
563 const struct rte_flow_item_phy_port *port_mask = item->mask;
565 int32_t rc = BNXT_TF_RC_ERROR;
567 enum bnxt_ulp_direction_type dir;
568 struct ulp_rte_hdr_field *hdr_field;
570 /* Copy the rte_flow_item for phy port into hdr_field */
572 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
576 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
579 mask = port_mask->index;
581 /* Update the match port type */
582 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
583 BNXT_ULP_INTF_TYPE_PHY_PORT);
585 /* Compute the Hw direction */
586 bnxt_ulp_rte_parser_direction_compute(params);
588 /* Direction validation */
589 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
590 if (dir == BNXT_ULP_DIR_EGRESS) {
592 "Parse Err:Phy ports are valid only for ingress\n");
593 return BNXT_TF_RC_PARSE_ERR;
596 /* Get the physical port details from port db */
597 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
600 BNXT_TF_DBG(ERR, "Failed to get port details\n");
601 return BNXT_TF_RC_PARSE_ERR;
604 /* Update the SVIF details */
605 svif = rte_cpu_to_be_16(svif);
606 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
607 memcpy(hdr_field->spec, &svif, sizeof(svif));
608 memcpy(hdr_field->mask, &mask, sizeof(mask));
609 hdr_field->size = sizeof(svif);
610 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
611 rte_be_to_cpu_16(svif));
612 return BNXT_TF_RC_SUCCESS;
615 /* Function to handle the update of proto header based on field values */
617 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
618 uint16_t type, uint32_t in_flag)
620 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
622 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623 BNXT_ULP_HDR_BIT_I_IPV4);
624 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
626 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
627 BNXT_ULP_HDR_BIT_O_IPV4);
628 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
630 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
632 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
633 BNXT_ULP_HDR_BIT_I_IPV6);
634 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
636 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
637 BNXT_ULP_HDR_BIT_O_IPV6);
638 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
643 /* Internal Function to identify broadcast or multicast packets */
645 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
647 if (rte_is_multicast_ether_addr(eth_addr) ||
648 rte_is_broadcast_ether_addr(eth_addr)) {
650 "No support for bcast or mcast addr offload\n");
656 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
658 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
659 struct ulp_rte_parser_params *params)
661 const struct rte_flow_item_eth *eth_spec = item->spec;
662 const struct rte_flow_item_eth *eth_mask = item->mask;
665 uint16_t eth_type = 0;
666 uint32_t inner_flag = 0;
668 /* Perform validations */
670 /* Todo: work around to avoid multicast and broadcast addr */
671 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
672 return BNXT_TF_RC_PARSE_ERR;
674 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
675 return BNXT_TF_RC_PARSE_ERR;
677 eth_type = eth_spec->type;
680 if (ulp_rte_prsr_fld_size_validate(params, &idx,
681 BNXT_ULP_PROTO_HDR_ETH_NUM)) {
682 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
683 return BNXT_TF_RC_ERROR;
686 * Copy the rte_flow_item for eth into hdr_field using ethernet
689 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
690 ulp_rte_prsr_fld_mask(params, &idx, size,
691 ulp_deference_struct(eth_spec, dst.addr_bytes),
692 ulp_deference_struct(eth_mask, dst.addr_bytes),
693 ULP_PRSR_ACT_DEFAULT);
695 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
696 ulp_rte_prsr_fld_mask(params, &idx, size,
697 ulp_deference_struct(eth_spec, src.addr_bytes),
698 ulp_deference_struct(eth_mask, src.addr_bytes),
699 ULP_PRSR_ACT_DEFAULT);
701 size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
702 ulp_rte_prsr_fld_mask(params, &idx, size,
703 ulp_deference_struct(eth_spec, type),
704 ulp_deference_struct(eth_mask, type),
705 ULP_PRSR_ACT_MATCH_IGNORE);
707 /* Update the protocol hdr bitmap */
708 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
709 BNXT_ULP_HDR_BIT_O_ETH) ||
710 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
711 BNXT_ULP_HDR_BIT_O_IPV4) ||
712 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
713 BNXT_ULP_HDR_BIT_O_IPV6) ||
714 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
715 BNXT_ULP_HDR_BIT_O_UDP) ||
716 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
717 BNXT_ULP_HDR_BIT_O_TCP)) {
718 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
721 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
723 /* Update the field protocol hdr bitmap */
724 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
726 return BNXT_TF_RC_SUCCESS;
729 /* Function to handle the parsing of RTE Flow item Vlan Header. */
731 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
732 struct ulp_rte_parser_params *params)
734 const struct rte_flow_item_vlan *vlan_spec = item->spec;
735 const struct rte_flow_item_vlan *vlan_mask = item->mask;
736 struct ulp_rte_hdr_bitmap *hdr_bit;
738 uint16_t vlan_tag = 0, priority = 0;
739 uint16_t vlan_tag_mask = 0, priority_mask = 0;
740 uint32_t outer_vtag_num;
741 uint32_t inner_vtag_num;
742 uint16_t eth_type = 0;
743 uint32_t inner_flag = 0;
747 vlan_tag = ntohs(vlan_spec->tci);
748 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
749 vlan_tag &= ULP_VLAN_TAG_MASK;
750 vlan_tag = htons(vlan_tag);
751 eth_type = vlan_spec->inner_type;
755 vlan_tag_mask = ntohs(vlan_mask->tci);
756 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
757 vlan_tag_mask &= 0xfff;
760 * the storage for priority and vlan tag is 2 bytes
761 * The mask of priority which is 3 bits if it is all 1's
762 * then make the rest bits 13 bits as 1's
763 * so that it is matched as exact match.
765 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
766 priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
767 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
768 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
769 vlan_tag_mask = htons(vlan_tag_mask);
772 if (ulp_rte_prsr_fld_size_validate(params, &idx,
773 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
774 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
775 return BNXT_TF_RC_ERROR;
779 * Copy the rte_flow_item for vlan into hdr_field using Vlan
782 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
784 * The priority field is ignored since OVS is setting it as
785 * wild card match and it is not supported. This is a work
786 * around and shall be addressed in the future.
788 ulp_rte_prsr_fld_mask(params, &idx, size,
791 ULP_PRSR_ACT_MASK_IGNORE);
793 ulp_rte_prsr_fld_mask(params, &idx, size,
796 ULP_PRSR_ACT_DEFAULT);
798 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
799 ulp_rte_prsr_fld_mask(params, &idx, size,
800 ulp_deference_struct(vlan_spec, inner_type),
801 ulp_deference_struct(vlan_mask, inner_type),
802 ULP_PRSR_ACT_MATCH_IGNORE);
804 /* Get the outer tag and inner tag counts */
805 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
806 BNXT_ULP_CF_IDX_O_VTAG_NUM);
807 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
808 BNXT_ULP_CF_IDX_I_VTAG_NUM);
810 /* Update the hdr_bitmap of the vlans */
811 hdr_bit = ¶ms->hdr_bitmap;
812 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
815 /* Update the vlan tag num */
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
821 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822 BNXT_ULP_HDR_BIT_OO_VLAN);
823 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
824 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
825 outer_vtag_num == 1) {
826 /* update the vlan tag num */
828 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
830 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
831 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
832 ULP_BITMAP_SET(params->hdr_bitmap.bits,
833 BNXT_ULP_HDR_BIT_OI_VLAN);
834 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
835 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
837 /* update the vlan tag num */
839 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
841 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
842 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
843 ULP_BITMAP_SET(params->hdr_bitmap.bits,
844 BNXT_ULP_HDR_BIT_IO_VLAN);
846 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
847 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
848 inner_vtag_num == 1) {
849 /* update the vlan tag num */
851 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
853 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
854 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
855 ULP_BITMAP_SET(params->hdr_bitmap.bits,
856 BNXT_ULP_HDR_BIT_II_VLAN);
859 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
860 return BNXT_TF_RC_ERROR;
862 /* Update the field protocol hdr bitmap */
863 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
864 return BNXT_TF_RC_SUCCESS;
867 /* Function to handle the update of proto header based on field values */
869 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
870 uint8_t proto, uint32_t in_flag)
872 if (proto == IPPROTO_UDP) {
874 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
875 BNXT_ULP_HDR_BIT_I_UDP);
876 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
878 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
879 BNXT_ULP_HDR_BIT_O_UDP);
880 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
882 } else if (proto == IPPROTO_TCP) {
884 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
885 BNXT_ULP_HDR_BIT_I_TCP);
886 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
888 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
889 BNXT_ULP_HDR_BIT_O_TCP);
890 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
892 } else if (proto == IPPROTO_GRE) {
893 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
894 } else if (proto == IPPROTO_ICMP) {
895 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
896 ULP_BITMAP_SET(param->hdr_bitmap.bits,
897 BNXT_ULP_HDR_BIT_I_ICMP);
899 ULP_BITMAP_SET(param->hdr_bitmap.bits,
900 BNXT_ULP_HDR_BIT_O_ICMP);
904 ULP_COMP_FLD_IDX_WR(param,
905 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
907 ULP_COMP_FLD_IDX_WR(param,
908 BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
911 ULP_COMP_FLD_IDX_WR(param,
912 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
914 ULP_COMP_FLD_IDX_WR(param,
915 BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
921 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
923 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
924 struct ulp_rte_parser_params *params)
926 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
927 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
928 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
932 uint32_t inner_flag = 0;
935 /* validate there are no 3rd L3 header */
936 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
938 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
939 return BNXT_TF_RC_ERROR;
942 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
943 BNXT_ULP_HDR_BIT_O_ETH) &&
944 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
945 BNXT_ULP_HDR_BIT_I_ETH)) {
946 /* Since F2 flow does not include eth item, when parser detects
947 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
948 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
949 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
950 * This will allow the parser post processor to update the
951 * t_dmac in hdr_field[o_eth.dmac]
953 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
954 BNXT_ULP_PROTO_HDR_VLAN_NUM);
955 params->field_idx = idx;
958 if (ulp_rte_prsr_fld_size_validate(params, &idx,
959 BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
960 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
961 return BNXT_TF_RC_ERROR;
965 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
968 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
969 ulp_rte_prsr_fld_mask(params, &idx, size,
970 ulp_deference_struct(ipv4_spec, hdr.version_ihl),
971 ulp_deference_struct(ipv4_mask, hdr.version_ihl),
972 ULP_PRSR_ACT_DEFAULT);
975 * The tos field is ignored since OVS is setting it as wild card
976 * match and it is not supported. This is a work around and
977 * shall be addressed in the future.
979 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
980 ulp_rte_prsr_fld_mask(params, &idx, size,
981 ulp_deference_struct(ipv4_spec,
982 hdr.type_of_service),
983 ulp_deference_struct(ipv4_mask,
984 hdr.type_of_service),
985 ULP_PRSR_ACT_MASK_IGNORE);
987 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
988 ulp_rte_prsr_fld_mask(params, &idx, size,
989 ulp_deference_struct(ipv4_spec, hdr.total_length),
990 ulp_deference_struct(ipv4_mask, hdr.total_length),
991 ULP_PRSR_ACT_DEFAULT);
993 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
994 ulp_rte_prsr_fld_mask(params, &idx, size,
995 ulp_deference_struct(ipv4_spec, hdr.packet_id),
996 ulp_deference_struct(ipv4_mask, hdr.packet_id),
997 ULP_PRSR_ACT_DEFAULT);
999 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1000 ulp_rte_prsr_fld_mask(params, &idx, size,
1001 ulp_deference_struct(ipv4_spec,
1002 hdr.fragment_offset),
1003 ulp_deference_struct(ipv4_mask,
1004 hdr.fragment_offset),
1005 ULP_PRSR_ACT_DEFAULT);
1007 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1008 ulp_rte_prsr_fld_mask(params, &idx, size,
1009 ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1010 ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1011 ULP_PRSR_ACT_DEFAULT);
1013 /* Ignore proto for matching templates */
1014 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1015 ulp_rte_prsr_fld_mask(params, &idx, size,
1016 ulp_deference_struct(ipv4_spec,
1018 ulp_deference_struct(ipv4_mask,
1020 ULP_PRSR_ACT_MATCH_IGNORE);
1022 proto = ipv4_spec->hdr.next_proto_id;
1024 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1025 ulp_rte_prsr_fld_mask(params, &idx, size,
1026 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1027 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1028 ULP_PRSR_ACT_DEFAULT);
1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1031 ulp_rte_prsr_fld_mask(params, &idx, size,
1032 ulp_deference_struct(ipv4_spec, hdr.src_addr),
1033 ulp_deference_struct(ipv4_mask, hdr.src_addr),
1034 ULP_PRSR_ACT_DEFAULT);
1036 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1037 ulp_rte_prsr_fld_mask(params, &idx, size,
1038 ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1039 ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1040 ULP_PRSR_ACT_DEFAULT);
1042 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1043 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1044 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1045 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1046 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1049 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1050 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1053 /* Some of the PMD applications may set the protocol field
1054 * in the IPv4 spec but don't set the mask. So, consider
1055 * the mask in the proto value calculation.
1058 proto &= ipv4_mask->hdr.next_proto_id;
1060 /* Update the field protocol hdr bitmap */
1061 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1062 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1063 return BNXT_TF_RC_SUCCESS;
1066 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1068 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1069 struct ulp_rte_parser_params *params)
1071 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1072 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1073 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1076 uint32_t ver_spec = 0, ver_mask = 0;
1077 uint32_t tc_spec = 0, tc_mask = 0;
1078 uint32_t lab_spec = 0, lab_mask = 0;
1080 uint32_t inner_flag = 0;
1083 /* validate there are no 3rd L3 header */
1084 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1086 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1087 return BNXT_TF_RC_ERROR;
1090 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1091 BNXT_ULP_HDR_BIT_O_ETH) &&
1092 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1093 BNXT_ULP_HDR_BIT_I_ETH)) {
1094 /* Since F2 flow does not include eth item, when parser detects
1095 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1096 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1097 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1098 * This will allow the parser post processor to update the
1099 * t_dmac in hdr_field[o_eth.dmac]
1101 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1102 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1103 params->field_idx = idx;
1106 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1107 BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1108 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1109 return BNXT_TF_RC_ERROR;
1113 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1117 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1118 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1119 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1120 proto = ipv6_spec->hdr.proto;
1124 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1125 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1126 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1128 /* Some of the PMD applications may set the protocol field
1129 * in the IPv6 spec but don't set the mask. So, consider
1130 * the mask in proto value calculation.
1132 proto &= ipv6_mask->hdr.proto;
1135 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1136 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1137 ULP_PRSR_ACT_DEFAULT);
1139 * The TC and flow label field are ignored since OVS is setting
1140 * it for match and it is not supported.
1141 * This is a work around and
1142 * shall be addressed in the future.
1144 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1145 ULP_PRSR_ACT_MASK_IGNORE);
1146 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1147 ULP_PRSR_ACT_MASK_IGNORE);
1149 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1150 ulp_rte_prsr_fld_mask(params, &idx, size,
1151 ulp_deference_struct(ipv6_spec, hdr.payload_len),
1152 ulp_deference_struct(ipv6_mask, hdr.payload_len),
1153 ULP_PRSR_ACT_DEFAULT);
1155 /* Ignore proto for template matching */
1156 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1157 ulp_rte_prsr_fld_mask(params, &idx, size,
1158 ulp_deference_struct(ipv6_spec, hdr.proto),
1159 ulp_deference_struct(ipv6_mask, hdr.proto),
1160 ULP_PRSR_ACT_MATCH_IGNORE);
1162 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1163 ulp_rte_prsr_fld_mask(params, &idx, size,
1164 ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1165 ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1166 ULP_PRSR_ACT_DEFAULT);
1168 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1169 ulp_rte_prsr_fld_mask(params, &idx, size,
1170 ulp_deference_struct(ipv6_spec, hdr.src_addr),
1171 ulp_deference_struct(ipv6_mask, hdr.src_addr),
1172 ULP_PRSR_ACT_DEFAULT);
1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1175 ulp_rte_prsr_fld_mask(params, &idx, size,
1176 ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1177 ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1178 ULP_PRSR_ACT_DEFAULT);
1180 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1181 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1182 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1183 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1184 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1187 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1188 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1191 /* Update the field protocol hdr bitmap */
1192 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1195 return BNXT_TF_RC_SUCCESS;
1198 /* Function to handle the update of proto header based on field values */
1200 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1203 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1204 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1205 BNXT_ULP_HDR_BIT_T_VXLAN);
1207 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1208 BNXT_ULP_HDR_BIT_T_VXLAN) ||
1209 ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1210 BNXT_ULP_HDR_BIT_T_GRE))
1211 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1214 /* Function to handle the parsing of RTE Flow item UDP Header. */
1216 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1217 struct ulp_rte_parser_params *params)
1219 const struct rte_flow_item_udp *udp_spec = item->spec;
1220 const struct rte_flow_item_udp *udp_mask = item->mask;
1221 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1224 uint16_t dport = 0, sport = 0;
1227 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1229 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1230 return BNXT_TF_RC_ERROR;
1234 sport = udp_spec->hdr.src_port;
1235 dport = udp_spec->hdr.dst_port;
1238 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1239 BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1240 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1241 return BNXT_TF_RC_ERROR;
1245 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1248 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1249 ulp_rte_prsr_fld_mask(params, &idx, size,
1250 ulp_deference_struct(udp_spec, hdr.src_port),
1251 ulp_deference_struct(udp_mask, hdr.src_port),
1252 ULP_PRSR_ACT_DEFAULT);
1254 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1255 ulp_rte_prsr_fld_mask(params, &idx, size,
1256 ulp_deference_struct(udp_spec, hdr.dst_port),
1257 ulp_deference_struct(udp_mask, hdr.dst_port),
1258 ULP_PRSR_ACT_DEFAULT);
1260 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1261 ulp_rte_prsr_fld_mask(params, &idx, size,
1262 ulp_deference_struct(udp_spec, hdr.dgram_len),
1263 ulp_deference_struct(udp_mask, hdr.dgram_len),
1264 ULP_PRSR_ACT_DEFAULT);
1266 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1267 ulp_rte_prsr_fld_mask(params, &idx, size,
1268 ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1269 ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1270 ULP_PRSR_ACT_DEFAULT);
1272 /* Set the udp header bitmap and computed l4 header bitmaps */
1273 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1274 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1275 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1277 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1278 (uint32_t)rte_be_to_cpu_16(sport));
1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1280 (uint32_t)rte_be_to_cpu_16(dport));
1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1285 if (udp_mask && udp_mask->hdr.src_port)
1286 ULP_COMP_FLD_IDX_WR(params,
1287 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1289 if (udp_mask && udp_mask->hdr.dst_port)
1290 ULP_COMP_FLD_IDX_WR(params,
1291 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1294 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1295 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1296 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1297 (uint32_t)rte_be_to_cpu_16(sport));
1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1299 (uint32_t)rte_be_to_cpu_16(dport));
1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1302 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1304 if (udp_mask && udp_mask->hdr.src_port)
1305 ULP_COMP_FLD_IDX_WR(params,
1306 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1308 if (udp_mask && udp_mask->hdr.dst_port)
1309 ULP_COMP_FLD_IDX_WR(params,
1310 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1313 /* Update the field protocol hdr bitmap */
1314 ulp_rte_l4_proto_type_update(params, dport);
1316 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1317 return BNXT_TF_RC_SUCCESS;
1320 /* Function to handle the parsing of RTE Flow item TCP Header. */
1322 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1323 struct ulp_rte_parser_params *params)
1325 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1326 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1327 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1329 uint16_t dport = 0, sport = 0;
1333 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1335 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1336 return BNXT_TF_RC_ERROR;
1340 sport = tcp_spec->hdr.src_port;
1341 dport = tcp_spec->hdr.dst_port;
1344 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1345 BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1346 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1347 return BNXT_TF_RC_ERROR;
1351 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1354 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1355 ulp_rte_prsr_fld_mask(params, &idx, size,
1356 ulp_deference_struct(tcp_spec, hdr.src_port),
1357 ulp_deference_struct(tcp_mask, hdr.src_port),
1358 ULP_PRSR_ACT_DEFAULT);
1360 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1361 ulp_rte_prsr_fld_mask(params, &idx, size,
1362 ulp_deference_struct(tcp_spec, hdr.dst_port),
1363 ulp_deference_struct(tcp_mask, hdr.dst_port),
1364 ULP_PRSR_ACT_DEFAULT);
1366 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1367 ulp_rte_prsr_fld_mask(params, &idx, size,
1368 ulp_deference_struct(tcp_spec, hdr.sent_seq),
1369 ulp_deference_struct(tcp_mask, hdr.sent_seq),
1370 ULP_PRSR_ACT_DEFAULT);
1372 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1373 ulp_rte_prsr_fld_mask(params, &idx, size,
1374 ulp_deference_struct(tcp_spec, hdr.recv_ack),
1375 ulp_deference_struct(tcp_mask, hdr.recv_ack),
1376 ULP_PRSR_ACT_DEFAULT);
1378 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1379 ulp_rte_prsr_fld_mask(params, &idx, size,
1380 ulp_deference_struct(tcp_spec, hdr.data_off),
1381 ulp_deference_struct(tcp_mask, hdr.data_off),
1382 ULP_PRSR_ACT_DEFAULT);
1384 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1385 ulp_rte_prsr_fld_mask(params, &idx, size,
1386 ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1387 ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1388 ULP_PRSR_ACT_DEFAULT);
1390 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1391 ulp_rte_prsr_fld_mask(params, &idx, size,
1392 ulp_deference_struct(tcp_spec, hdr.rx_win),
1393 ulp_deference_struct(tcp_mask, hdr.rx_win),
1394 ULP_PRSR_ACT_DEFAULT);
1396 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1397 ulp_rte_prsr_fld_mask(params, &idx, size,
1398 ulp_deference_struct(tcp_spec, hdr.cksum),
1399 ulp_deference_struct(tcp_mask, hdr.cksum),
1400 ULP_PRSR_ACT_DEFAULT);
1402 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1403 ulp_rte_prsr_fld_mask(params, &idx, size,
1404 ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1405 ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1406 ULP_PRSR_ACT_DEFAULT);
1408 /* Set the udp header bitmap and computed l4 header bitmaps */
1409 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1410 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1411 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1413 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1414 (uint32_t)rte_be_to_cpu_16(sport));
1415 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1416 (uint32_t)rte_be_to_cpu_16(dport));
1417 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1419 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1421 if (tcp_mask && tcp_mask->hdr.src_port)
1422 ULP_COMP_FLD_IDX_WR(params,
1423 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1425 if (tcp_mask && tcp_mask->hdr.dst_port)
1426 ULP_COMP_FLD_IDX_WR(params,
1427 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1430 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1431 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1432 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1433 (uint32_t)rte_be_to_cpu_16(sport));
1434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1435 (uint32_t)rte_be_to_cpu_16(dport));
1436 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1440 if (tcp_mask && tcp_mask->hdr.src_port)
1441 ULP_COMP_FLD_IDX_WR(params,
1442 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1444 if (tcp_mask && tcp_mask->hdr.dst_port)
1445 ULP_COMP_FLD_IDX_WR(params,
1446 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1450 return BNXT_TF_RC_SUCCESS;
1453 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1455 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1456 struct ulp_rte_parser_params *params)
1458 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1459 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1460 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1464 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1465 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1466 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1467 return BNXT_TF_RC_ERROR;
1471 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1474 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1475 ulp_rte_prsr_fld_mask(params, &idx, size,
1476 ulp_deference_struct(vxlan_spec, flags),
1477 ulp_deference_struct(vxlan_mask, flags),
1478 ULP_PRSR_ACT_DEFAULT);
1480 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1481 ulp_rte_prsr_fld_mask(params, &idx, size,
1482 ulp_deference_struct(vxlan_spec, rsvd0),
1483 ulp_deference_struct(vxlan_mask, rsvd0),
1484 ULP_PRSR_ACT_DEFAULT);
1486 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1487 ulp_rte_prsr_fld_mask(params, &idx, size,
1488 ulp_deference_struct(vxlan_spec, vni),
1489 ulp_deference_struct(vxlan_mask, vni),
1490 ULP_PRSR_ACT_DEFAULT);
1492 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1493 ulp_rte_prsr_fld_mask(params, &idx, size,
1494 ulp_deference_struct(vxlan_spec, rsvd1),
1495 ulp_deference_struct(vxlan_mask, rsvd1),
1496 ULP_PRSR_ACT_DEFAULT);
1498 /* Update the hdr_bitmap with vxlan */
1499 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1500 ulp_rte_l4_proto_type_update(params, 0);
1501 return BNXT_TF_RC_SUCCESS;
1504 /* Function to handle the parsing of RTE Flow item GRE Header. */
1506 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1507 struct ulp_rte_parser_params *params)
1509 const struct rte_flow_item_gre *gre_spec = item->spec;
1510 const struct rte_flow_item_gre *gre_mask = item->mask;
1511 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1515 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1516 BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1517 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1518 return BNXT_TF_RC_ERROR;
1521 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1522 ulp_rte_prsr_fld_mask(params, &idx, size,
1523 ulp_deference_struct(gre_spec, c_rsvd0_ver),
1524 ulp_deference_struct(gre_mask, c_rsvd0_ver),
1525 ULP_PRSR_ACT_DEFAULT);
1527 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1528 ulp_rte_prsr_fld_mask(params, &idx, size,
1529 ulp_deference_struct(gre_spec, protocol),
1530 ulp_deference_struct(gre_mask, protocol),
1531 ULP_PRSR_ACT_DEFAULT);
1533 /* Update the hdr_bitmap with GRE */
1534 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1535 ulp_rte_l4_proto_type_update(params, 0);
1536 return BNXT_TF_RC_SUCCESS;
1539 /* Function to handle the parsing of RTE Flow item ANY. */
1541 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1542 struct ulp_rte_parser_params *params __rte_unused)
1544 return BNXT_TF_RC_SUCCESS;
1547 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1549 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1550 struct ulp_rte_parser_params *params)
1552 const struct rte_flow_item_icmp *icmp_spec = item->spec;
1553 const struct rte_flow_item_icmp *icmp_mask = item->mask;
1554 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1558 if (ulp_rte_prsr_fld_size_validate(params, &idx,
1559 BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1560 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1561 return BNXT_TF_RC_ERROR;
1564 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1565 ulp_rte_prsr_fld_mask(params, &idx, size,
1566 ulp_deference_struct(icmp_spec, hdr.icmp_type),
1567 ulp_deference_struct(icmp_mask, hdr.icmp_type),
1568 ULP_PRSR_ACT_DEFAULT);
1570 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1571 ulp_rte_prsr_fld_mask(params, &idx, size,
1572 ulp_deference_struct(icmp_spec, hdr.icmp_code),
1573 ulp_deference_struct(icmp_mask, hdr.icmp_code),
1574 ULP_PRSR_ACT_DEFAULT);
1576 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1577 ulp_rte_prsr_fld_mask(params, &idx, size,
1578 ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1579 ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1580 ULP_PRSR_ACT_DEFAULT);
1582 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1583 ulp_rte_prsr_fld_mask(params, &idx, size,
1584 ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1585 ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1586 ULP_PRSR_ACT_DEFAULT);
1588 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1589 ulp_rte_prsr_fld_mask(params, &idx, size,
1590 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1591 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1592 ULP_PRSR_ACT_DEFAULT);
1594 /* Update the hdr_bitmap with ICMP */
1595 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1596 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1598 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1599 return BNXT_TF_RC_SUCCESS;
1602 /* Function to handle the parsing of RTE Flow item void Header */
1604 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1605 struct ulp_rte_parser_params *params __rte_unused)
1607 return BNXT_TF_RC_SUCCESS;
1610 /* Function to handle the parsing of RTE Flow action void Header. */
1612 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1613 struct ulp_rte_parser_params *params __rte_unused)
1615 return BNXT_TF_RC_SUCCESS;
1618 /* Function to handle the parsing of RTE Flow action Mark Header. */
1620 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1621 struct ulp_rte_parser_params *param)
1623 const struct rte_flow_action_mark *mark;
1624 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1627 mark = action_item->conf;
1629 mark_id = tfp_cpu_to_be_32(mark->id);
1630 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1631 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1633 /* Update the hdr_bitmap with vxlan */
1634 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1635 return BNXT_TF_RC_SUCCESS;
1637 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1638 return BNXT_TF_RC_ERROR;
1641 /* Function to handle the parsing of RTE Flow action RSS Header. */
1643 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1644 struct ulp_rte_parser_params *param)
1646 const struct rte_flow_action_rss *rss = action_item->conf;
1649 /* Update the hdr_bitmap with vxlan */
1650 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1651 return BNXT_TF_RC_SUCCESS;
1653 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1654 return BNXT_TF_RC_ERROR;
1657 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1659 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1660 struct ulp_rte_parser_params *params)
1662 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1663 const struct rte_flow_item *item;
1664 const struct rte_flow_item_eth *eth_spec;
1665 const struct rte_flow_item_ipv4 *ipv4_spec;
1666 const struct rte_flow_item_ipv6 *ipv6_spec;
1667 struct rte_flow_item_vxlan vxlan_spec;
1668 uint32_t vlan_num = 0, vlan_size = 0;
1669 uint32_t ip_size = 0, ip_type = 0;
1670 uint32_t vxlan_size = 0;
1672 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1673 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1675 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1676 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1678 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1679 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1680 const uint8_t *tmp_buff;
1682 vxlan_encap = action_item->conf;
1684 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1685 return BNXT_TF_RC_ERROR;
1688 item = vxlan_encap->definition;
1690 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1691 return BNXT_TF_RC_ERROR;
1694 if (!ulp_rte_item_skip_void(&item, 0))
1695 return BNXT_TF_RC_ERROR;
1697 /* must have ethernet header */
1698 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1699 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1700 return BNXT_TF_RC_ERROR;
1702 eth_spec = item->spec;
1703 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1704 ulp_encap_buffer_copy(buff,
1705 eth_spec->dst.addr_bytes,
1706 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1707 ULP_BUFFER_ALIGN_8_BYTE);
1709 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1710 ulp_encap_buffer_copy(buff,
1711 eth_spec->src.addr_bytes,
1712 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1713 ULP_BUFFER_ALIGN_8_BYTE);
1715 /* Goto the next item */
1716 if (!ulp_rte_item_skip_void(&item, 1))
1717 return BNXT_TF_RC_ERROR;
1719 /* May have vlan header */
1720 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1722 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1723 ulp_encap_buffer_copy(buff,
1725 sizeof(struct rte_flow_item_vlan),
1726 ULP_BUFFER_ALIGN_8_BYTE);
1728 if (!ulp_rte_item_skip_void(&item, 1))
1729 return BNXT_TF_RC_ERROR;
1732 /* may have two vlan headers */
1733 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1735 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1736 sizeof(struct rte_flow_item_vlan)],
1738 sizeof(struct rte_flow_item_vlan));
1739 if (!ulp_rte_item_skip_void(&item, 1))
1740 return BNXT_TF_RC_ERROR;
1742 /* Update the vlan count and size of more than one */
1744 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1745 vlan_num = tfp_cpu_to_be_32(vlan_num);
1746 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1749 vlan_size = tfp_cpu_to_be_32(vlan_size);
1750 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1755 /* L3 must be IPv4, IPv6 */
1756 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1757 ipv4_spec = item->spec;
1758 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1760 /* copy the ipv4 details */
1761 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1762 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1763 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1764 ulp_encap_buffer_copy(buff,
1766 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1767 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1768 ULP_BUFFER_ALIGN_8_BYTE);
1770 /* Total length being ignored in the ip hdr. */
1771 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1772 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1773 ulp_encap_buffer_copy(buff,
1775 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1776 ULP_BUFFER_ALIGN_8_BYTE);
1777 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1778 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1779 ulp_encap_buffer_copy(buff,
1780 &ipv4_spec->hdr.version_ihl,
1781 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1782 ULP_BUFFER_ALIGN_8_BYTE);
1785 /* Update the dst ip address in ip encap buffer */
1786 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1787 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1788 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1789 ulp_encap_buffer_copy(buff,
1790 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1791 sizeof(ipv4_spec->hdr.dst_addr),
1792 ULP_BUFFER_ALIGN_8_BYTE);
1794 /* Update the src ip address */
1795 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1796 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1797 sizeof(ipv4_spec->hdr.src_addr)];
1798 ulp_encap_buffer_copy(buff,
1799 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1800 sizeof(ipv4_spec->hdr.src_addr),
1801 ULP_BUFFER_ALIGN_8_BYTE);
1803 /* Update the ip size details */
1804 ip_size = tfp_cpu_to_be_32(ip_size);
1805 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1806 &ip_size, sizeof(uint32_t));
1808 /* update the ip type */
1809 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1810 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1811 &ip_type, sizeof(uint32_t));
1813 /* update the computed field to notify it is ipv4 header */
1814 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1817 if (!ulp_rte_item_skip_void(&item, 1))
1818 return BNXT_TF_RC_ERROR;
1819 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1820 ipv6_spec = item->spec;
1821 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1823 /* copy the ipv6 details */
1824 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1825 if (ulp_buffer_is_empty(tmp_buff,
1826 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1827 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1828 ulp_encap_buffer_copy(buff,
1830 sizeof(def_ipv6_hdr),
1831 ULP_BUFFER_ALIGN_8_BYTE);
1833 /* The payload length being ignored in the ip hdr. */
1834 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1835 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1836 ulp_encap_buffer_copy(buff,
1838 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1839 ULP_BUFFER_ALIGN_8_BYTE);
1840 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1841 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1842 BNXT_ULP_ENCAP_IPV6_DO];
1843 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1844 ulp_encap_buffer_copy(buff,
1846 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1847 ULP_BUFFER_ALIGN_8_BYTE);
1849 /* Update the dst ip address in ip encap buffer */
1850 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1851 sizeof(def_ipv6_hdr)];
1852 ulp_encap_buffer_copy(buff,
1853 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1854 sizeof(ipv6_spec->hdr.dst_addr),
1855 ULP_BUFFER_ALIGN_8_BYTE);
1857 /* Update the src ip address */
1858 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1859 ulp_encap_buffer_copy(buff,
1860 (const uint8_t *)ipv6_spec->hdr.src_addr,
1861 sizeof(ipv6_spec->hdr.src_addr),
1862 ULP_BUFFER_ALIGN_16_BYTE);
1864 /* Update the ip size details */
1865 ip_size = tfp_cpu_to_be_32(ip_size);
1866 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1867 &ip_size, sizeof(uint32_t));
1869 /* update the ip type */
1870 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1871 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1872 &ip_type, sizeof(uint32_t));
1874 /* update the computed field to notify it is ipv6 header */
1875 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1878 if (!ulp_rte_item_skip_void(&item, 1))
1879 return BNXT_TF_RC_ERROR;
1881 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1882 return BNXT_TF_RC_ERROR;
1886 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1887 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1888 return BNXT_TF_RC_ERROR;
1890 /* copy the udp details */
1891 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1892 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1893 ULP_BUFFER_ALIGN_8_BYTE);
1895 if (!ulp_rte_item_skip_void(&item, 1))
1896 return BNXT_TF_RC_ERROR;
1899 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1900 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1901 return BNXT_TF_RC_ERROR;
1903 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1904 /* copy the vxlan details */
1905 memcpy(&vxlan_spec, item->spec, vxlan_size);
1906 vxlan_spec.flags = 0x08;
1907 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1908 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1909 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1910 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1912 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1913 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1914 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1915 (const uint8_t *)&vxlan_spec.vni,
1916 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1918 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1919 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1920 &vxlan_size, sizeof(uint32_t));
1922 /* update the hdr_bitmap with vxlan */
1923 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1924 return BNXT_TF_RC_SUCCESS;
1927 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1929 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1931 struct ulp_rte_parser_params *params)
1933 /* update the hdr_bitmap with vxlan */
1934 ULP_BITMAP_SET(params->act_bitmap.bits,
1935 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1936 /* Update computational field with tunnel decap info */
1937 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1938 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1939 return BNXT_TF_RC_SUCCESS;
1942 /* Function to handle the parsing of RTE Flow action drop Header. */
1944 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1945 struct ulp_rte_parser_params *params)
1947 /* Update the hdr_bitmap with drop */
1948 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1949 return BNXT_TF_RC_SUCCESS;
1952 /* Function to handle the parsing of RTE Flow action count. */
1954 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1955 struct ulp_rte_parser_params *params)
1957 const struct rte_flow_action_count *act_count;
1958 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1960 act_count = action_item->conf;
1962 if (act_count->shared) {
1964 "Parse Error:Shared count not supported\n");
1965 return BNXT_TF_RC_PARSE_ERR;
1967 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1969 BNXT_ULP_ACT_PROP_SZ_COUNT);
1972 /* Update the hdr_bitmap with count */
1973 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1974 return BNXT_TF_RC_SUCCESS;
1977 /* Function to handle the parsing of action ports. */
1979 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1982 enum bnxt_ulp_direction_type dir;
1985 struct ulp_rte_act_prop *act = ¶m->act_prop;
1986 enum bnxt_ulp_intf_type port_type;
1989 /* Get the direction */
1990 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1991 if (dir == BNXT_ULP_DIR_EGRESS) {
1992 /* For egress direction, fill vport */
1993 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1994 return BNXT_TF_RC_ERROR;
1997 pid = rte_cpu_to_be_32(pid);
1998 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1999 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2001 /* For ingress direction, fill vnic */
2002 port_type = ULP_COMP_FLD_IDX_RD(param,
2003 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2004 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2005 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2007 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2009 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2011 return BNXT_TF_RC_ERROR;
2014 pid = rte_cpu_to_be_32(pid);
2015 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2016 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2019 /* Update the action port set bit */
2020 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2021 return BNXT_TF_RC_SUCCESS;
2024 /* Function to handle the parsing of RTE Flow action PF. */
2026 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2027 struct ulp_rte_parser_params *params)
2031 enum bnxt_ulp_intf_type intf_type;
2033 /* Get the port id of the current device */
2034 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2036 /* Get the port db ifindex */
2037 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2039 BNXT_TF_DBG(ERR, "Invalid port id\n");
2040 return BNXT_TF_RC_ERROR;
2043 /* Check the port is PF port */
2044 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2045 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2046 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2047 return BNXT_TF_RC_ERROR;
2049 /* Update the action properties */
2050 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2051 return ulp_rte_parser_act_port_set(params, ifindex);
2054 /* Function to handle the parsing of RTE Flow action VF. */
2056 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2057 struct ulp_rte_parser_params *params)
2059 const struct rte_flow_action_vf *vf_action;
2060 enum bnxt_ulp_intf_type intf_type;
2064 vf_action = action_item->conf;
2066 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2067 return BNXT_TF_RC_PARSE_ERR;
2070 if (vf_action->original) {
2071 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2072 return BNXT_TF_RC_PARSE_ERR;
2075 bp = bnxt_get_bp(params->port_id);
2077 BNXT_TF_DBG(ERR, "Invalid bp\n");
2078 return BNXT_TF_RC_ERROR;
2081 /* vf_action->id is a logical number which in this case is an
2082 * offset from the first VF. So, to get the absolute VF id, the
2083 * offset must be added to the absolute first vf id of that port.
2085 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2089 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2090 return BNXT_TF_RC_ERROR;
2092 /* Check the port is VF port */
2093 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2094 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2095 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2096 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2097 return BNXT_TF_RC_ERROR;
2100 /* Update the action properties */
2101 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2102 return ulp_rte_parser_act_port_set(params, ifindex);
2105 /* Function to handle the parsing of RTE Flow action port_id. */
2107 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2108 struct ulp_rte_parser_params *param)
2110 const struct rte_flow_action_port_id *port_id = act_item->conf;
2112 enum bnxt_ulp_intf_type intf_type;
2116 "ParseErr: Invalid Argument\n");
2117 return BNXT_TF_RC_PARSE_ERR;
2119 if (port_id->original) {
2121 "ParseErr:Portid Original not supported\n");
2122 return BNXT_TF_RC_PARSE_ERR;
2125 /* Get the port db ifindex */
2126 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2128 BNXT_TF_DBG(ERR, "Invalid port id\n");
2129 return BNXT_TF_RC_ERROR;
2132 /* Get the intf type */
2133 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2135 BNXT_TF_DBG(ERR, "Invalid port type\n");
2136 return BNXT_TF_RC_ERROR;
2139 /* Set the action port */
2140 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2141 return ulp_rte_parser_act_port_set(param, ifindex);
2144 /* Function to handle the parsing of RTE Flow action phy_port. */
2146 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2147 struct ulp_rte_parser_params *prm)
2149 const struct rte_flow_action_phy_port *phy_port;
2153 enum bnxt_ulp_direction_type dir;
2155 phy_port = action_item->conf;
2158 "ParseErr: Invalid Argument\n");
2159 return BNXT_TF_RC_PARSE_ERR;
2162 if (phy_port->original) {
2164 "Parse Err:Port Original not supported\n");
2165 return BNXT_TF_RC_PARSE_ERR;
2167 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2168 if (dir != BNXT_ULP_DIR_EGRESS) {
2170 "Parse Err:Phy ports are valid only for egress\n");
2171 return BNXT_TF_RC_PARSE_ERR;
2173 /* Get the physical port details from port db */
2174 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2177 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2182 pid = rte_cpu_to_be_32(pid);
2183 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2184 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2186 /* Update the action port set bit */
2187 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2188 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2189 BNXT_ULP_INTF_TYPE_PHY_PORT);
2190 return BNXT_TF_RC_SUCCESS;
2193 /* Function to handle the parsing of RTE Flow action pop vlan. */
2195 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2196 struct ulp_rte_parser_params *params)
2198 /* Update the act_bitmap with pop */
2199 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2200 return BNXT_TF_RC_SUCCESS;
2203 /* Function to handle the parsing of RTE Flow action push vlan. */
2205 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2206 struct ulp_rte_parser_params *params)
2208 const struct rte_flow_action_of_push_vlan *push_vlan;
2210 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2212 push_vlan = action_item->conf;
2214 ethertype = push_vlan->ethertype;
2215 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2217 "Parse Err: Ethertype not supported\n");
2218 return BNXT_TF_RC_PARSE_ERR;
2220 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2221 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2222 /* Update the hdr_bitmap with push vlan */
2223 ULP_BITMAP_SET(params->act_bitmap.bits,
2224 BNXT_ULP_ACT_BIT_PUSH_VLAN);
2225 return BNXT_TF_RC_SUCCESS;
2227 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2228 return BNXT_TF_RC_ERROR;
2231 /* Function to handle the parsing of RTE Flow action set vlan id. */
2233 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2234 struct ulp_rte_parser_params *params)
2236 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2238 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2240 vlan_vid = action_item->conf;
2241 if (vlan_vid && vlan_vid->vlan_vid) {
2242 vid = vlan_vid->vlan_vid;
2243 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2244 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2245 /* Update the hdr_bitmap with vlan vid */
2246 ULP_BITMAP_SET(params->act_bitmap.bits,
2247 BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2248 return BNXT_TF_RC_SUCCESS;
2250 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2251 return BNXT_TF_RC_ERROR;
2254 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2256 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2257 struct ulp_rte_parser_params *params)
2259 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2261 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2263 vlan_pcp = action_item->conf;
2265 pcp = vlan_pcp->vlan_pcp;
2266 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2267 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2268 /* Update the hdr_bitmap with vlan vid */
2269 ULP_BITMAP_SET(params->act_bitmap.bits,
2270 BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2271 return BNXT_TF_RC_SUCCESS;
2273 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2274 return BNXT_TF_RC_ERROR;
2277 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2279 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2280 struct ulp_rte_parser_params *params)
2282 const struct rte_flow_action_set_ipv4 *set_ipv4;
2283 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2285 set_ipv4 = action_item->conf;
2287 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2288 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2289 /* Update the hdr_bitmap with set ipv4 src */
2290 ULP_BITMAP_SET(params->act_bitmap.bits,
2291 BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2292 return BNXT_TF_RC_SUCCESS;
2294 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2295 return BNXT_TF_RC_ERROR;
2298 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2300 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2301 struct ulp_rte_parser_params *params)
2303 const struct rte_flow_action_set_ipv4 *set_ipv4;
2304 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2306 set_ipv4 = action_item->conf;
2308 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2309 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2310 /* Update the hdr_bitmap with set ipv4 dst */
2311 ULP_BITMAP_SET(params->act_bitmap.bits,
2312 BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2313 return BNXT_TF_RC_SUCCESS;
2315 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2316 return BNXT_TF_RC_ERROR;
2319 /* Function to handle the parsing of RTE Flow action set tp src.*/
2321 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2322 struct ulp_rte_parser_params *params)
2324 const struct rte_flow_action_set_tp *set_tp;
2325 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2327 set_tp = action_item->conf;
2329 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2330 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2331 /* Update the hdr_bitmap with set tp src */
2332 ULP_BITMAP_SET(params->act_bitmap.bits,
2333 BNXT_ULP_ACT_BIT_SET_TP_SRC);
2334 return BNXT_TF_RC_SUCCESS;
2337 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2338 return BNXT_TF_RC_ERROR;
2341 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2343 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2344 struct ulp_rte_parser_params *params)
2346 const struct rte_flow_action_set_tp *set_tp;
2347 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2349 set_tp = action_item->conf;
2351 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2352 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2353 /* Update the hdr_bitmap with set tp dst */
2354 ULP_BITMAP_SET(params->act_bitmap.bits,
2355 BNXT_ULP_ACT_BIT_SET_TP_DST);
2356 return BNXT_TF_RC_SUCCESS;
2359 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2360 return BNXT_TF_RC_ERROR;
2363 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2365 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2366 struct ulp_rte_parser_params *params)
2368 /* Update the act_bitmap with dec ttl */
2369 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2370 return BNXT_TF_RC_SUCCESS;
2373 /* Function to handle the parsing of RTE Flow action JUMP */
2375 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2376 struct ulp_rte_parser_params *params)
2378 /* Update the act_bitmap with dec ttl */
2379 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2380 return BNXT_TF_RC_SUCCESS;
2384 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2385 struct ulp_rte_parser_params *params)
2387 const struct rte_flow_action_sample *sample;
2390 sample = action_item->conf;
2392 /* if SAMPLE bit is set it means this sample action is nested within the
2393 * actions of another sample action; this is not allowed
2395 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2396 BNXT_ULP_ACT_BIT_SAMPLE))
2397 return BNXT_TF_RC_ERROR;
2399 /* a sample action is only allowed as a shared action */
2400 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2401 BNXT_ULP_ACT_BIT_SHARED))
2402 return BNXT_TF_RC_ERROR;
2404 /* only a ratio of 1 i.e. 100% is supported */
2405 if (sample->ratio != 1)
2406 return BNXT_TF_RC_ERROR;
2408 if (!sample->actions)
2409 return BNXT_TF_RC_ERROR;
2411 /* parse the nested actions for a sample action */
2412 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2413 if (ret == BNXT_TF_RC_SUCCESS)
2414 /* Update the act_bitmap with sample */
2415 ULP_BITMAP_SET(params->act_bitmap.bits,
2416 BNXT_ULP_ACT_BIT_SAMPLE);