1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the computed
163 * fields for the interface.
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
169 uint16_t port_id, parif;
171 enum bnxt_ulp_direction_type dir;
173 /* get the direction details */
174 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
176 /* read the port id details */
177 port_id = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_INCOMING_IF);
179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
186 if (dir == BNXT_ULP_DIR_INGRESS) {
188 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
189 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
190 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
196 /* Get the match port type */
197 mtype = ULP_COMP_FLD_IDX_RD(params,
198 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
199 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
200 ULP_COMP_FLD_IDX_WR(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
203 /* Set VF func PARIF */
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_VF_FUNC_PARIF,
208 "ParseErr:ifindex is not valid\n");
211 ULP_COMP_FLD_IDX_WR(params,
212 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
215 /* populate the loopback parif */
216 ULP_COMP_FLD_IDX_WR(params,
217 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
218 BNXT_ULP_SYM_VF_FUNC_PARIF);
221 /* Set DRV func PARIF */
222 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
223 BNXT_ULP_DRV_FUNC_PARIF,
226 "ParseErr:ifindex is not valid\n");
229 ULP_COMP_FLD_IDX_WR(params,
230 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
237 * Function to handle the post processing of the parsing details
240 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
242 enum bnxt_ulp_direction_type dir;
243 enum bnxt_ulp_intf_type match_port_type, act_port_type;
244 uint32_t act_port_set;
246 /* Get the computed details */
247 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
248 match_port_type = ULP_COMP_FLD_IDX_RD(params,
249 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
250 act_port_type = ULP_COMP_FLD_IDX_RD(params,
251 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
252 act_port_set = ULP_COMP_FLD_IDX_RD(params,
253 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
255 /* set the flow direction in the proto and action header */
256 if (dir == BNXT_ULP_DIR_EGRESS) {
257 ULP_BITMAP_SET(params->hdr_bitmap.bits,
258 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
259 ULP_BITMAP_SET(params->act_bitmap.bits,
260 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
263 /* calculate the VF to VF flag */
264 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
265 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
266 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
268 /* Update the decrement ttl computational fields */
269 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
270 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
272 * Check that vxlan proto is included and vxlan decap
273 * action is not set then decrement tunnel ttl.
274 * Similarly add GRE and NVGRE in future.
276 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
277 BNXT_ULP_HDR_BIT_T_VXLAN) &&
278 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
279 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
280 ULP_COMP_FLD_IDX_WR(params,
281 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
283 ULP_COMP_FLD_IDX_WR(params,
284 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
288 /* Merge the hdr_fp_bit into the proto header bit */
289 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
291 /* Update the computed interface parameters */
292 bnxt_ulp_comp_fld_intf_update(params);
294 /* TBD: Handle the flow rejection scenarios */
299 * Function to compute the flow direction based on the match port details
302 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
304 enum bnxt_ulp_intf_type match_port_type;
306 /* Get the match port type */
307 match_port_type = ULP_COMP_FLD_IDX_RD(params,
308 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
310 /* If ingress flow and matchport is vf rep then dir is egress*/
311 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
312 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
314 BNXT_ULP_DIR_EGRESS);
316 /* Assign the input direction */
317 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
318 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
319 BNXT_ULP_DIR_INGRESS);
321 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
322 BNXT_ULP_DIR_EGRESS);
326 /* Function to handle the parsing of RTE Flow item PF Header. */
328 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
333 enum bnxt_ulp_direction_type dir;
334 struct ulp_rte_hdr_field *hdr_field;
335 enum bnxt_ulp_svif_type svif_type;
336 enum bnxt_ulp_intf_type port_type;
338 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
339 BNXT_ULP_INVALID_SVIF_VAL) {
341 "SVIF already set,multiple source not support'd\n");
342 return BNXT_TF_RC_ERROR;
345 /* Get port type details */
346 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
347 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
348 BNXT_TF_DBG(ERR, "Invalid port type\n");
349 return BNXT_TF_RC_ERROR;
352 /* Update the match port type */
353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
355 /* compute the direction */
356 bnxt_ulp_rte_parser_direction_compute(params);
358 /* Get the computed direction */
359 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
360 if (dir == BNXT_ULP_DIR_INGRESS) {
361 svif_type = BNXT_ULP_PHY_PORT_SVIF;
363 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
364 svif_type = BNXT_ULP_VF_FUNC_SVIF;
366 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
368 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
370 svif = rte_cpu_to_be_16(svif);
371 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
372 memcpy(hdr_field->spec, &svif, sizeof(svif));
373 memcpy(hdr_field->mask, &mask, sizeof(mask));
374 hdr_field->size = sizeof(svif);
375 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
376 rte_be_to_cpu_16(svif));
377 return BNXT_TF_RC_SUCCESS;
380 /* Function to handle the parsing of the RTE port id */
382 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
384 uint16_t port_id = 0;
385 uint16_t svif_mask = 0xFFFF;
387 int32_t rc = BNXT_TF_RC_ERROR;
389 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
390 BNXT_ULP_INVALID_SVIF_VAL)
391 return BNXT_TF_RC_SUCCESS;
393 /* SVIF not set. So get the port id */
394 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
396 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
399 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
403 /* Update the SVIF details */
404 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
408 /* Function to handle the implicit action port id */
410 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
412 struct rte_flow_action action_item = {0};
413 struct rte_flow_action_port_id port_id = {0};
415 /* Read the action port set bit */
416 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
417 /* Already set, so just exit */
418 return BNXT_TF_RC_SUCCESS;
420 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
421 action_item.conf = &port_id;
423 /* Update the action port based on incoming port */
424 ulp_rte_port_id_act_handler(&action_item, params);
426 /* Reset the action port set bit */
427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
428 return BNXT_TF_RC_SUCCESS;
431 /* Function to handle the parsing of RTE Flow item PF Header. */
433 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
434 struct ulp_rte_parser_params *params)
436 uint16_t port_id = 0;
437 uint16_t svif_mask = 0xFFFF;
440 /* Get the implicit port id */
441 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443 /* perform the conversion from dpdk port to bnxt ifindex */
444 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
447 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
448 return BNXT_TF_RC_ERROR;
451 /* Update the SVIF details */
452 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
455 /* Function to handle the parsing of RTE Flow item VF Header. */
457 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
458 struct ulp_rte_parser_params *params)
460 const struct rte_flow_item_vf *vf_spec = item->spec;
461 const struct rte_flow_item_vf *vf_mask = item->mask;
464 int32_t rc = BNXT_TF_RC_PARSE_ERR;
466 /* Get VF rte_flow_item for Port details */
468 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
472 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
477 /* perform the conversion from VF Func id to bnxt ifindex */
478 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
481 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
484 /* Update the SVIF details */
485 return ulp_rte_parser_svif_set(params, ifindex, mask);
488 /* Function to handle the parsing of RTE Flow item port id Header. */
490 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
491 struct ulp_rte_parser_params *params)
493 const struct rte_flow_item_port_id *port_spec = item->spec;
494 const struct rte_flow_item_port_id *port_mask = item->mask;
496 int32_t rc = BNXT_TF_RC_PARSE_ERR;
500 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
504 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
507 mask = port_mask->id;
509 /* perform the conversion from dpdk port to bnxt ifindex */
510 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
513 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
516 /* Update the SVIF details */
517 return ulp_rte_parser_svif_set(params, ifindex, mask);
520 /* Function to handle the parsing of RTE Flow item phy port Header. */
522 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
523 struct ulp_rte_parser_params *params)
525 const struct rte_flow_item_phy_port *port_spec = item->spec;
526 const struct rte_flow_item_phy_port *port_mask = item->mask;
528 int32_t rc = BNXT_TF_RC_ERROR;
530 enum bnxt_ulp_direction_type dir;
531 struct ulp_rte_hdr_field *hdr_field;
533 /* Copy the rte_flow_item for phy port into hdr_field */
535 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
539 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
542 mask = port_mask->index;
544 /* Update the match port type */
545 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
546 BNXT_ULP_INTF_TYPE_PHY_PORT);
548 /* Compute the Hw direction */
549 bnxt_ulp_rte_parser_direction_compute(params);
551 /* Direction validation */
552 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
553 if (dir == BNXT_ULP_DIR_EGRESS) {
555 "Parse Err:Phy ports are valid only for ingress\n");
556 return BNXT_TF_RC_PARSE_ERR;
559 /* Get the physical port details from port db */
560 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
563 BNXT_TF_DBG(ERR, "Failed to get port details\n");
564 return BNXT_TF_RC_PARSE_ERR;
567 /* Update the SVIF details */
568 svif = rte_cpu_to_be_16(svif);
569 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
570 memcpy(hdr_field->spec, &svif, sizeof(svif));
571 memcpy(hdr_field->mask, &mask, sizeof(mask));
572 hdr_field->size = sizeof(svif);
573 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
574 rte_be_to_cpu_16(svif));
575 return BNXT_TF_RC_SUCCESS;
578 /* Function to handle the update of proto header based on field values */
580 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
581 uint16_t type, uint32_t in_flag)
583 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
585 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
586 BNXT_ULP_HDR_BIT_I_IPV4);
587 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
589 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
590 BNXT_ULP_HDR_BIT_O_IPV4);
591 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
593 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
595 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
596 BNXT_ULP_HDR_BIT_I_IPV6);
597 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
599 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
600 BNXT_ULP_HDR_BIT_O_IPV6);
601 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
606 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
608 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
609 struct ulp_rte_parser_params *params)
611 const struct rte_flow_item_eth *eth_spec = item->spec;
612 const struct rte_flow_item_eth *eth_mask = item->mask;
613 struct ulp_rte_hdr_field *field;
614 uint32_t idx = params->field_idx;
616 uint16_t eth_type = 0;
617 uint32_t inner_flag = 0;
620 * Copy the rte_flow_item for eth into hdr_field using ethernet
624 size = sizeof(eth_spec->dst.addr_bytes);
625 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
626 eth_spec->dst.addr_bytes,
628 size = sizeof(eth_spec->src.addr_bytes);
629 field = ulp_rte_parser_fld_copy(field,
630 eth_spec->src.addr_bytes,
632 field = ulp_rte_parser_fld_copy(field,
634 sizeof(eth_spec->type));
635 eth_type = eth_spec->type;
638 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
639 sizeof(eth_mask->dst.addr_bytes));
640 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
641 sizeof(eth_mask->src.addr_bytes));
642 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
643 sizeof(eth_mask->type));
645 /* Add number of vlan header elements */
646 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
647 params->vlan_idx = params->field_idx;
648 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
650 /* Update the protocol hdr bitmap */
651 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
652 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
655 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
657 /* Update the field protocol hdr bitmap */
658 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
660 return BNXT_TF_RC_SUCCESS;
663 /* Function to handle the parsing of RTE Flow item Vlan Header. */
665 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
666 struct ulp_rte_parser_params *params)
668 const struct rte_flow_item_vlan *vlan_spec = item->spec;
669 const struct rte_flow_item_vlan *vlan_mask = item->mask;
670 struct ulp_rte_hdr_field *field;
671 struct ulp_rte_hdr_bitmap *hdr_bit;
672 uint32_t idx = params->vlan_idx;
673 uint16_t vlan_tag, priority;
674 uint32_t outer_vtag_num;
675 uint32_t inner_vtag_num;
676 uint16_t eth_type = 0;
677 uint32_t inner_flag = 0;
680 * Copy the rte_flow_item for vlan into hdr_field using Vlan
684 vlan_tag = ntohs(vlan_spec->tci);
685 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
686 vlan_tag &= ULP_VLAN_TAG_MASK;
687 vlan_tag = htons(vlan_tag);
689 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
692 field = ulp_rte_parser_fld_copy(field,
695 field = ulp_rte_parser_fld_copy(field,
696 &vlan_spec->inner_type,
697 sizeof(vlan_spec->inner_type));
698 eth_type = vlan_spec->inner_type;
702 vlan_tag = ntohs(vlan_mask->tci);
703 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
707 * the storage for priority and vlan tag is 2 bytes
708 * The mask of priority which is 3 bits if it is all 1's
709 * then make the rest bits 13 bits as 1's
710 * so that it is matched as exact match.
712 if (priority == ULP_VLAN_PRIORITY_MASK)
713 priority |= ~ULP_VLAN_PRIORITY_MASK;
714 if (vlan_tag == ULP_VLAN_TAG_MASK)
715 vlan_tag |= ~ULP_VLAN_TAG_MASK;
716 vlan_tag = htons(vlan_tag);
719 * The priority field is ignored since OVS is setting it as
720 * wild card match and it is not supported. This is a work
721 * around and shall be addressed in the future.
725 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
727 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
728 sizeof(vlan_mask->inner_type));
730 /* Set the vlan index to new incremented value */
731 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
733 /* Get the outer tag and inner tag counts */
734 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
735 BNXT_ULP_CF_IDX_O_VTAG_NUM);
736 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
737 BNXT_ULP_CF_IDX_I_VTAG_NUM);
739 /* Update the hdr_bitmap of the vlans */
740 hdr_bit = ¶ms->hdr_bitmap;
741 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
742 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
744 /* Update the vlan tag num */
746 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
748 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
749 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
750 ULP_BITMAP_SET(params->hdr_bitmap.bits,
751 BNXT_ULP_HDR_BIT_OO_VLAN);
752 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
753 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
754 outer_vtag_num == 1) {
755 /* update the vlan tag num */
757 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
760 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
761 ULP_BITMAP_SET(params->hdr_bitmap.bits,
762 BNXT_ULP_HDR_BIT_OI_VLAN);
763 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
764 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
766 /* update the vlan tag num */
768 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
770 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
771 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
772 ULP_BITMAP_SET(params->hdr_bitmap.bits,
773 BNXT_ULP_HDR_BIT_IO_VLAN);
775 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
776 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
777 inner_vtag_num == 1) {
778 /* update the vlan tag num */
780 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
782 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
783 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
784 ULP_BITMAP_SET(params->hdr_bitmap.bits,
785 BNXT_ULP_HDR_BIT_II_VLAN);
788 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
789 return BNXT_TF_RC_ERROR;
791 /* Update the field protocol hdr bitmap */
792 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
793 return BNXT_TF_RC_SUCCESS;
796 /* Function to handle the update of proto header based on field values */
798 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
799 uint8_t proto, uint32_t in_flag)
801 if (proto == IPPROTO_UDP) {
803 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
804 BNXT_ULP_HDR_BIT_I_UDP);
805 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
807 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
808 BNXT_ULP_HDR_BIT_O_UDP);
809 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
811 } else if (proto == IPPROTO_TCP) {
813 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
814 BNXT_ULP_HDR_BIT_I_TCP);
815 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
817 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
818 BNXT_ULP_HDR_BIT_O_TCP);
819 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
824 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
826 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
827 struct ulp_rte_parser_params *params)
829 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
830 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
831 struct ulp_rte_hdr_field *field;
832 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
833 uint32_t idx = params->field_idx;
836 uint32_t inner_flag = 0;
839 /* validate there are no 3rd L3 header */
840 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
842 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
843 return BNXT_TF_RC_ERROR;
847 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
851 size = sizeof(ipv4_spec->hdr.version_ihl);
852 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
853 &ipv4_spec->hdr.version_ihl,
855 size = sizeof(ipv4_spec->hdr.type_of_service);
856 field = ulp_rte_parser_fld_copy(field,
857 &ipv4_spec->hdr.type_of_service,
859 size = sizeof(ipv4_spec->hdr.total_length);
860 field = ulp_rte_parser_fld_copy(field,
861 &ipv4_spec->hdr.total_length,
863 size = sizeof(ipv4_spec->hdr.packet_id);
864 field = ulp_rte_parser_fld_copy(field,
865 &ipv4_spec->hdr.packet_id,
867 size = sizeof(ipv4_spec->hdr.fragment_offset);
868 field = ulp_rte_parser_fld_copy(field,
869 &ipv4_spec->hdr.fragment_offset,
871 size = sizeof(ipv4_spec->hdr.time_to_live);
872 field = ulp_rte_parser_fld_copy(field,
873 &ipv4_spec->hdr.time_to_live,
875 size = sizeof(ipv4_spec->hdr.next_proto_id);
876 field = ulp_rte_parser_fld_copy(field,
877 &ipv4_spec->hdr.next_proto_id,
879 proto = ipv4_spec->hdr.next_proto_id;
880 size = sizeof(ipv4_spec->hdr.hdr_checksum);
881 field = ulp_rte_parser_fld_copy(field,
882 &ipv4_spec->hdr.hdr_checksum,
884 size = sizeof(ipv4_spec->hdr.src_addr);
885 field = ulp_rte_parser_fld_copy(field,
886 &ipv4_spec->hdr.src_addr,
888 size = sizeof(ipv4_spec->hdr.dst_addr);
889 field = ulp_rte_parser_fld_copy(field,
890 &ipv4_spec->hdr.dst_addr,
894 ulp_rte_prsr_mask_copy(params, &idx,
895 &ipv4_mask->hdr.version_ihl,
896 sizeof(ipv4_mask->hdr.version_ihl));
898 * The tos field is ignored since OVS is setting it as wild card
899 * match and it is not supported. This is a work around and
900 * shall be addressed in the future.
904 ulp_rte_prsr_mask_copy(params, &idx,
905 &ipv4_mask->hdr.total_length,
906 sizeof(ipv4_mask->hdr.total_length));
907 ulp_rte_prsr_mask_copy(params, &idx,
908 &ipv4_mask->hdr.packet_id,
909 sizeof(ipv4_mask->hdr.packet_id));
910 ulp_rte_prsr_mask_copy(params, &idx,
911 &ipv4_mask->hdr.fragment_offset,
912 sizeof(ipv4_mask->hdr.fragment_offset));
913 ulp_rte_prsr_mask_copy(params, &idx,
914 &ipv4_mask->hdr.time_to_live,
915 sizeof(ipv4_mask->hdr.time_to_live));
916 ulp_rte_prsr_mask_copy(params, &idx,
917 &ipv4_mask->hdr.next_proto_id,
918 sizeof(ipv4_mask->hdr.next_proto_id));
919 ulp_rte_prsr_mask_copy(params, &idx,
920 &ipv4_mask->hdr.hdr_checksum,
921 sizeof(ipv4_mask->hdr.hdr_checksum));
922 ulp_rte_prsr_mask_copy(params, &idx,
923 &ipv4_mask->hdr.src_addr,
924 sizeof(ipv4_mask->hdr.src_addr));
925 ulp_rte_prsr_mask_copy(params, &idx,
926 &ipv4_mask->hdr.dst_addr,
927 sizeof(ipv4_mask->hdr.dst_addr));
929 /* Add the number of ipv4 header elements */
930 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
932 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
933 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
934 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
935 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
936 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
939 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
940 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
943 /* Update the field protocol hdr bitmap */
944 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
945 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
946 return BNXT_TF_RC_SUCCESS;
949 /* Function to handle the parsing of RTE Flow item IPV6 Header */
951 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
952 struct ulp_rte_parser_params *params)
954 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
955 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
956 struct ulp_rte_hdr_field *field;
957 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
958 uint32_t idx = params->field_idx;
960 uint32_t vtcf, vtcf_mask;
962 uint32_t inner_flag = 0;
965 /* validate there are no 3rd L3 header */
966 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
968 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
969 return BNXT_TF_RC_ERROR;
973 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
977 size = sizeof(ipv6_spec->hdr.vtc_flow);
979 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
980 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
984 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
985 field = ulp_rte_parser_fld_copy(field,
989 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
990 field = ulp_rte_parser_fld_copy(field,
994 size = sizeof(ipv6_spec->hdr.payload_len);
995 field = ulp_rte_parser_fld_copy(field,
996 &ipv6_spec->hdr.payload_len,
998 size = sizeof(ipv6_spec->hdr.proto);
999 field = ulp_rte_parser_fld_copy(field,
1000 &ipv6_spec->hdr.proto,
1002 proto = ipv6_spec->hdr.proto;
1003 size = sizeof(ipv6_spec->hdr.hop_limits);
1004 field = ulp_rte_parser_fld_copy(field,
1005 &ipv6_spec->hdr.hop_limits,
1007 size = sizeof(ipv6_spec->hdr.src_addr);
1008 field = ulp_rte_parser_fld_copy(field,
1009 &ipv6_spec->hdr.src_addr,
1011 size = sizeof(ipv6_spec->hdr.dst_addr);
1012 field = ulp_rte_parser_fld_copy(field,
1013 &ipv6_spec->hdr.dst_addr,
1017 size = sizeof(ipv6_mask->hdr.vtc_flow);
1019 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1020 ulp_rte_prsr_mask_copy(params, &idx,
1024 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1025 ulp_rte_prsr_mask_copy(params, &idx,
1030 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1031 ulp_rte_prsr_mask_copy(params, &idx,
1035 ulp_rte_prsr_mask_copy(params, &idx,
1036 &ipv6_mask->hdr.payload_len,
1037 sizeof(ipv6_mask->hdr.payload_len));
1038 ulp_rte_prsr_mask_copy(params, &idx,
1039 &ipv6_mask->hdr.proto,
1040 sizeof(ipv6_mask->hdr.proto));
1041 ulp_rte_prsr_mask_copy(params, &idx,
1042 &ipv6_mask->hdr.hop_limits,
1043 sizeof(ipv6_mask->hdr.hop_limits));
1044 ulp_rte_prsr_mask_copy(params, &idx,
1045 &ipv6_mask->hdr.src_addr,
1046 sizeof(ipv6_mask->hdr.src_addr));
1047 ulp_rte_prsr_mask_copy(params, &idx,
1048 &ipv6_mask->hdr.dst_addr,
1049 sizeof(ipv6_mask->hdr.dst_addr));
1051 /* add number of ipv6 header elements */
1052 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1054 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1055 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1056 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1057 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1058 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1061 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1062 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1065 /* Update the field protocol hdr bitmap */
1066 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1067 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1069 return BNXT_TF_RC_SUCCESS;
1072 /* Function to handle the update of proto header based on field values */
1074 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1077 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1078 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1079 BNXT_ULP_HDR_BIT_T_VXLAN);
1082 /* Function to handle the parsing of RTE Flow item UDP Header. */
1084 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1085 struct ulp_rte_parser_params *params)
1087 const struct rte_flow_item_udp *udp_spec = item->spec;
1088 const struct rte_flow_item_udp *udp_mask = item->mask;
1089 struct ulp_rte_hdr_field *field;
1090 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1091 uint32_t idx = params->field_idx;
1093 uint16_t dst_port = 0;
1096 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1098 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1099 return BNXT_TF_RC_ERROR;
1103 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1107 size = sizeof(udp_spec->hdr.src_port);
1108 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1109 &udp_spec->hdr.src_port,
1111 size = sizeof(udp_spec->hdr.dst_port);
1112 field = ulp_rte_parser_fld_copy(field,
1113 &udp_spec->hdr.dst_port,
1115 dst_port = udp_spec->hdr.dst_port;
1116 size = sizeof(udp_spec->hdr.dgram_len);
1117 field = ulp_rte_parser_fld_copy(field,
1118 &udp_spec->hdr.dgram_len,
1120 size = sizeof(udp_spec->hdr.dgram_cksum);
1121 field = ulp_rte_parser_fld_copy(field,
1122 &udp_spec->hdr.dgram_cksum,
1126 ulp_rte_prsr_mask_copy(params, &idx,
1127 &udp_mask->hdr.src_port,
1128 sizeof(udp_mask->hdr.src_port));
1129 ulp_rte_prsr_mask_copy(params, &idx,
1130 &udp_mask->hdr.dst_port,
1131 sizeof(udp_mask->hdr.dst_port));
1132 ulp_rte_prsr_mask_copy(params, &idx,
1133 &udp_mask->hdr.dgram_len,
1134 sizeof(udp_mask->hdr.dgram_len));
1135 ulp_rte_prsr_mask_copy(params, &idx,
1136 &udp_mask->hdr.dgram_cksum,
1137 sizeof(udp_mask->hdr.dgram_cksum));
1140 /* Add number of UDP header elements */
1141 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1143 /* Set the udp header bitmap and computed l4 header bitmaps */
1144 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1145 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1146 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1147 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1149 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1150 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1151 /* Update the field protocol hdr bitmap */
1152 ulp_rte_l4_proto_type_update(params, dst_port);
1154 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1155 return BNXT_TF_RC_SUCCESS;
1158 /* Function to handle the parsing of RTE Flow item TCP Header. */
1160 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1161 struct ulp_rte_parser_params *params)
1163 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1164 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1165 struct ulp_rte_hdr_field *field;
1166 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1167 uint32_t idx = params->field_idx;
1171 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1173 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1174 return BNXT_TF_RC_ERROR;
1178 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1182 size = sizeof(tcp_spec->hdr.src_port);
1183 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1184 &tcp_spec->hdr.src_port,
1186 size = sizeof(tcp_spec->hdr.dst_port);
1187 field = ulp_rte_parser_fld_copy(field,
1188 &tcp_spec->hdr.dst_port,
1190 size = sizeof(tcp_spec->hdr.sent_seq);
1191 field = ulp_rte_parser_fld_copy(field,
1192 &tcp_spec->hdr.sent_seq,
1194 size = sizeof(tcp_spec->hdr.recv_ack);
1195 field = ulp_rte_parser_fld_copy(field,
1196 &tcp_spec->hdr.recv_ack,
1198 size = sizeof(tcp_spec->hdr.data_off);
1199 field = ulp_rte_parser_fld_copy(field,
1200 &tcp_spec->hdr.data_off,
1202 size = sizeof(tcp_spec->hdr.tcp_flags);
1203 field = ulp_rte_parser_fld_copy(field,
1204 &tcp_spec->hdr.tcp_flags,
1206 size = sizeof(tcp_spec->hdr.rx_win);
1207 field = ulp_rte_parser_fld_copy(field,
1208 &tcp_spec->hdr.rx_win,
1210 size = sizeof(tcp_spec->hdr.cksum);
1211 field = ulp_rte_parser_fld_copy(field,
1212 &tcp_spec->hdr.cksum,
1214 size = sizeof(tcp_spec->hdr.tcp_urp);
1215 field = ulp_rte_parser_fld_copy(field,
1216 &tcp_spec->hdr.tcp_urp,
1219 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1223 ulp_rte_prsr_mask_copy(params, &idx,
1224 &tcp_mask->hdr.src_port,
1225 sizeof(tcp_mask->hdr.src_port));
1226 ulp_rte_prsr_mask_copy(params, &idx,
1227 &tcp_mask->hdr.dst_port,
1228 sizeof(tcp_mask->hdr.dst_port));
1229 ulp_rte_prsr_mask_copy(params, &idx,
1230 &tcp_mask->hdr.sent_seq,
1231 sizeof(tcp_mask->hdr.sent_seq));
1232 ulp_rte_prsr_mask_copy(params, &idx,
1233 &tcp_mask->hdr.recv_ack,
1234 sizeof(tcp_mask->hdr.recv_ack));
1235 ulp_rte_prsr_mask_copy(params, &idx,
1236 &tcp_mask->hdr.data_off,
1237 sizeof(tcp_mask->hdr.data_off));
1238 ulp_rte_prsr_mask_copy(params, &idx,
1239 &tcp_mask->hdr.tcp_flags,
1240 sizeof(tcp_mask->hdr.tcp_flags));
1241 ulp_rte_prsr_mask_copy(params, &idx,
1242 &tcp_mask->hdr.rx_win,
1243 sizeof(tcp_mask->hdr.rx_win));
1244 ulp_rte_prsr_mask_copy(params, &idx,
1245 &tcp_mask->hdr.cksum,
1246 sizeof(tcp_mask->hdr.cksum));
1247 ulp_rte_prsr_mask_copy(params, &idx,
1248 &tcp_mask->hdr.tcp_urp,
1249 sizeof(tcp_mask->hdr.tcp_urp));
1251 /* add number of TCP header elements */
1252 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1254 /* Set the udp header bitmap and computed l4 header bitmaps */
1255 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1256 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1257 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1258 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1260 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1261 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1263 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1264 return BNXT_TF_RC_SUCCESS;
1267 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1269 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1270 struct ulp_rte_parser_params *params)
1272 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1273 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1274 struct ulp_rte_hdr_field *field;
1275 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1276 uint32_t idx = params->field_idx;
1280 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1284 size = sizeof(vxlan_spec->flags);
1285 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1288 size = sizeof(vxlan_spec->rsvd0);
1289 field = ulp_rte_parser_fld_copy(field,
1292 size = sizeof(vxlan_spec->vni);
1293 field = ulp_rte_parser_fld_copy(field,
1296 size = sizeof(vxlan_spec->rsvd1);
1297 field = ulp_rte_parser_fld_copy(field,
1302 ulp_rte_prsr_mask_copy(params, &idx,
1304 sizeof(vxlan_mask->flags));
1305 ulp_rte_prsr_mask_copy(params, &idx,
1307 sizeof(vxlan_mask->rsvd0));
1308 ulp_rte_prsr_mask_copy(params, &idx,
1310 sizeof(vxlan_mask->vni));
1311 ulp_rte_prsr_mask_copy(params, &idx,
1313 sizeof(vxlan_mask->rsvd1));
1315 /* Add number of vxlan header elements */
1316 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1318 /* Update the hdr_bitmap with vxlan */
1319 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1320 return BNXT_TF_RC_SUCCESS;
1323 /* Function to handle the parsing of RTE Flow item void Header */
1325 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1326 struct ulp_rte_parser_params *params __rte_unused)
1328 return BNXT_TF_RC_SUCCESS;
1331 /* Function to handle the parsing of RTE Flow action void Header. */
1333 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1334 struct ulp_rte_parser_params *params __rte_unused)
1336 return BNXT_TF_RC_SUCCESS;
1339 /* Function to handle the parsing of RTE Flow action Mark Header. */
1341 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1342 struct ulp_rte_parser_params *param)
1344 const struct rte_flow_action_mark *mark;
1345 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1348 mark = action_item->conf;
1350 mark_id = tfp_cpu_to_be_32(mark->id);
1351 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1352 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1354 /* Update the hdr_bitmap with vxlan */
1355 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1356 return BNXT_TF_RC_SUCCESS;
1358 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1359 return BNXT_TF_RC_ERROR;
1362 /* Function to handle the parsing of RTE Flow action RSS Header. */
1364 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1365 struct ulp_rte_parser_params *param)
1367 const struct rte_flow_action_rss *rss = action_item->conf;
1370 /* Update the hdr_bitmap with vxlan */
1371 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1372 return BNXT_TF_RC_SUCCESS;
1374 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1375 return BNXT_TF_RC_ERROR;
1378 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1380 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1381 struct ulp_rte_parser_params *params)
1383 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1384 const struct rte_flow_item *item;
1385 const struct rte_flow_item_eth *eth_spec;
1386 const struct rte_flow_item_ipv4 *ipv4_spec;
1387 const struct rte_flow_item_ipv6 *ipv6_spec;
1388 struct rte_flow_item_vxlan vxlan_spec;
1389 uint32_t vlan_num = 0, vlan_size = 0;
1390 uint32_t ip_size = 0, ip_type = 0;
1391 uint32_t vxlan_size = 0;
1393 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1394 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1396 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1397 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1399 vxlan_encap = action_item->conf;
1401 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1402 return BNXT_TF_RC_ERROR;
1405 item = vxlan_encap->definition;
1407 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1408 return BNXT_TF_RC_ERROR;
1411 if (!ulp_rte_item_skip_void(&item, 0))
1412 return BNXT_TF_RC_ERROR;
1414 /* must have ethernet header */
1415 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1416 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1417 return BNXT_TF_RC_ERROR;
1419 eth_spec = item->spec;
1420 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1421 ulp_encap_buffer_copy(buff,
1422 eth_spec->dst.addr_bytes,
1423 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1425 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1426 ulp_encap_buffer_copy(buff,
1427 eth_spec->src.addr_bytes,
1428 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1430 /* Goto the next item */
1431 if (!ulp_rte_item_skip_void(&item, 1))
1432 return BNXT_TF_RC_ERROR;
1434 /* May have vlan header */
1435 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1437 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1438 ulp_encap_buffer_copy(buff,
1440 sizeof(struct rte_flow_item_vlan));
1442 if (!ulp_rte_item_skip_void(&item, 1))
1443 return BNXT_TF_RC_ERROR;
1446 /* may have two vlan headers */
1447 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1449 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1450 sizeof(struct rte_flow_item_vlan)],
1452 sizeof(struct rte_flow_item_vlan));
1453 if (!ulp_rte_item_skip_void(&item, 1))
1454 return BNXT_TF_RC_ERROR;
1456 /* Update the vlan count and size of more than one */
1458 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1459 vlan_num = tfp_cpu_to_be_32(vlan_num);
1460 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1463 vlan_size = tfp_cpu_to_be_32(vlan_size);
1464 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1469 /* L3 must be IPv4, IPv6 */
1470 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1471 ipv4_spec = item->spec;
1472 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1474 /* copy the ipv4 details */
1475 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1476 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1477 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1478 ulp_encap_buffer_copy(buff,
1480 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1481 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1483 const uint8_t *tmp_buff;
1485 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1486 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1487 ulp_encap_buffer_copy(buff,
1489 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1490 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1491 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1492 ulp_encap_buffer_copy(buff,
1493 &ipv4_spec->hdr.version_ihl,
1494 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1496 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1497 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1498 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1499 ulp_encap_buffer_copy(buff,
1500 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1501 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1503 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1504 ulp_encap_buffer_copy(buff,
1505 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1506 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1508 /* Update the ip size details */
1509 ip_size = tfp_cpu_to_be_32(ip_size);
1510 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1511 &ip_size, sizeof(uint32_t));
1513 /* update the ip type */
1514 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1515 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1516 &ip_type, sizeof(uint32_t));
1518 /* update the computed field to notify it is ipv4 header */
1519 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1522 if (!ulp_rte_item_skip_void(&item, 1))
1523 return BNXT_TF_RC_ERROR;
1524 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1525 ipv6_spec = item->spec;
1526 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1528 /* copy the ipv4 details */
1529 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1530 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1532 /* Update the ip size details */
1533 ip_size = tfp_cpu_to_be_32(ip_size);
1534 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1535 &ip_size, sizeof(uint32_t));
1537 /* update the ip type */
1538 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1539 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1540 &ip_type, sizeof(uint32_t));
1542 /* update the computed field to notify it is ipv6 header */
1543 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1546 if (!ulp_rte_item_skip_void(&item, 1))
1547 return BNXT_TF_RC_ERROR;
1549 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1550 return BNXT_TF_RC_ERROR;
1554 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1555 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1556 return BNXT_TF_RC_ERROR;
1558 /* copy the udp details */
1559 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1560 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1562 if (!ulp_rte_item_skip_void(&item, 1))
1563 return BNXT_TF_RC_ERROR;
1566 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1567 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1568 return BNXT_TF_RC_ERROR;
1570 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1571 /* copy the vxlan details */
1572 memcpy(&vxlan_spec, item->spec, vxlan_size);
1573 vxlan_spec.flags = 0x08;
1574 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1575 (const uint8_t *)&vxlan_spec,
1577 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1578 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1579 &vxlan_size, sizeof(uint32_t));
1581 /* update the hdr_bitmap with vxlan */
1582 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1583 return BNXT_TF_RC_SUCCESS;
1586 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1588 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1590 struct ulp_rte_parser_params *params)
1592 /* update the hdr_bitmap with vxlan */
1593 ULP_BITMAP_SET(params->act_bitmap.bits,
1594 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1595 return BNXT_TF_RC_SUCCESS;
1598 /* Function to handle the parsing of RTE Flow action drop Header. */
1600 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1601 struct ulp_rte_parser_params *params)
1603 /* Update the hdr_bitmap with drop */
1604 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1605 return BNXT_TF_RC_SUCCESS;
1608 /* Function to handle the parsing of RTE Flow action count. */
1610 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1611 struct ulp_rte_parser_params *params)
1614 const struct rte_flow_action_count *act_count;
1615 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1617 act_count = action_item->conf;
1619 if (act_count->shared) {
1621 "Parse Error:Shared count not supported\n");
1622 return BNXT_TF_RC_PARSE_ERR;
1624 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1626 BNXT_ULP_ACT_PROP_SZ_COUNT);
1629 /* Update the hdr_bitmap with count */
1630 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1631 return BNXT_TF_RC_SUCCESS;
1634 /* Function to handle the parsing of action ports. */
1636 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1639 enum bnxt_ulp_direction_type dir;
1642 struct ulp_rte_act_prop *act = ¶m->act_prop;
1643 enum bnxt_ulp_intf_type port_type;
1646 /* Get the direction */
1647 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1648 if (dir == BNXT_ULP_DIR_EGRESS) {
1649 /* For egress direction, fill vport */
1650 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1651 return BNXT_TF_RC_ERROR;
1654 pid = rte_cpu_to_be_32(pid);
1655 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1656 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1658 /* For ingress direction, fill vnic */
1659 port_type = ULP_COMP_FLD_IDX_RD(param,
1660 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1661 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1662 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1664 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1666 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1668 return BNXT_TF_RC_ERROR;
1671 pid = rte_cpu_to_be_32(pid);
1672 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1673 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1676 /* Update the action port set bit */
1677 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1678 return BNXT_TF_RC_SUCCESS;
1681 /* Function to handle the parsing of RTE Flow action PF. */
1683 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1684 struct ulp_rte_parser_params *params)
1688 enum bnxt_ulp_intf_type intf_type;
1690 /* Get the port id of the current device */
1691 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1693 /* Get the port db ifindex */
1694 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1696 BNXT_TF_DBG(ERR, "Invalid port id\n");
1697 return BNXT_TF_RC_ERROR;
1700 /* Check the port is PF port */
1701 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1702 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1703 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1704 return BNXT_TF_RC_ERROR;
1706 /* Update the action properties */
1707 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1708 return ulp_rte_parser_act_port_set(params, ifindex);
1711 /* Function to handle the parsing of RTE Flow action VF. */
1713 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1714 struct ulp_rte_parser_params *params)
1716 const struct rte_flow_action_vf *vf_action;
1718 enum bnxt_ulp_intf_type intf_type;
1720 vf_action = action_item->conf;
1722 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1723 return BNXT_TF_RC_PARSE_ERR;
1726 if (vf_action->original) {
1727 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1728 return BNXT_TF_RC_PARSE_ERR;
1731 /* Check the port is VF port */
1732 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1734 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1735 return BNXT_TF_RC_ERROR;
1737 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1738 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1739 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1740 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1741 return BNXT_TF_RC_ERROR;
1744 /* Update the action properties */
1745 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1746 return ulp_rte_parser_act_port_set(params, ifindex);
1749 /* Function to handle the parsing of RTE Flow action port_id. */
1751 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1752 struct ulp_rte_parser_params *param)
1754 const struct rte_flow_action_port_id *port_id = act_item->conf;
1756 enum bnxt_ulp_intf_type intf_type;
1760 "ParseErr: Invalid Argument\n");
1761 return BNXT_TF_RC_PARSE_ERR;
1763 if (port_id->original) {
1765 "ParseErr:Portid Original not supported\n");
1766 return BNXT_TF_RC_PARSE_ERR;
1769 /* Get the port db ifindex */
1770 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1772 BNXT_TF_DBG(ERR, "Invalid port id\n");
1773 return BNXT_TF_RC_ERROR;
1776 /* Get the intf type */
1777 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1779 BNXT_TF_DBG(ERR, "Invalid port type\n");
1780 return BNXT_TF_RC_ERROR;
1783 /* Set the action port */
1784 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1785 return ulp_rte_parser_act_port_set(param, ifindex);
1788 /* Function to handle the parsing of RTE Flow action phy_port. */
1790 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1791 struct ulp_rte_parser_params *prm)
1793 const struct rte_flow_action_phy_port *phy_port;
1797 enum bnxt_ulp_direction_type dir;
1799 phy_port = action_item->conf;
1802 "ParseErr: Invalid Argument\n");
1803 return BNXT_TF_RC_PARSE_ERR;
1806 if (phy_port->original) {
1808 "Parse Err:Port Original not supported\n");
1809 return BNXT_TF_RC_PARSE_ERR;
1811 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1812 if (dir != BNXT_ULP_DIR_EGRESS) {
1814 "Parse Err:Phy ports are valid only for egress\n");
1815 return BNXT_TF_RC_PARSE_ERR;
1817 /* Get the physical port details from port db */
1818 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1821 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1826 pid = rte_cpu_to_be_32(pid);
1827 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1828 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1830 /* Update the action port set bit */
1831 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1832 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1833 BNXT_ULP_INTF_TYPE_PHY_PORT);
1834 return BNXT_TF_RC_SUCCESS;
1837 /* Function to handle the parsing of RTE Flow action pop vlan. */
1839 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1840 struct ulp_rte_parser_params *params)
1842 /* Update the act_bitmap with pop */
1843 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1844 return BNXT_TF_RC_SUCCESS;
1847 /* Function to handle the parsing of RTE Flow action push vlan. */
1849 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1850 struct ulp_rte_parser_params *params)
1852 const struct rte_flow_action_of_push_vlan *push_vlan;
1854 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1856 push_vlan = action_item->conf;
1858 ethertype = push_vlan->ethertype;
1859 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1861 "Parse Err: Ethertype not supported\n");
1862 return BNXT_TF_RC_PARSE_ERR;
1864 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1865 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1866 /* Update the hdr_bitmap with push vlan */
1867 ULP_BITMAP_SET(params->act_bitmap.bits,
1868 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1869 return BNXT_TF_RC_SUCCESS;
1871 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1872 return BNXT_TF_RC_ERROR;
1875 /* Function to handle the parsing of RTE Flow action set vlan id. */
1877 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1878 struct ulp_rte_parser_params *params)
1880 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1882 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1884 vlan_vid = action_item->conf;
1885 if (vlan_vid && vlan_vid->vlan_vid) {
1886 vid = vlan_vid->vlan_vid;
1887 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1888 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1889 /* Update the hdr_bitmap with vlan vid */
1890 ULP_BITMAP_SET(params->act_bitmap.bits,
1891 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1892 return BNXT_TF_RC_SUCCESS;
1894 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1895 return BNXT_TF_RC_ERROR;
1898 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1900 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1901 struct ulp_rte_parser_params *params)
1903 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1905 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1907 vlan_pcp = action_item->conf;
1909 pcp = vlan_pcp->vlan_pcp;
1910 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1911 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1912 /* Update the hdr_bitmap with vlan vid */
1913 ULP_BITMAP_SET(params->act_bitmap.bits,
1914 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1915 return BNXT_TF_RC_SUCCESS;
1917 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1918 return BNXT_TF_RC_ERROR;
1921 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1923 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1924 struct ulp_rte_parser_params *params)
1926 const struct rte_flow_action_set_ipv4 *set_ipv4;
1927 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1929 set_ipv4 = action_item->conf;
1931 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1932 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1933 /* Update the hdr_bitmap with set ipv4 src */
1934 ULP_BITMAP_SET(params->act_bitmap.bits,
1935 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1936 return BNXT_TF_RC_SUCCESS;
1938 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1939 return BNXT_TF_RC_ERROR;
1942 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1944 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1945 struct ulp_rte_parser_params *params)
1947 const struct rte_flow_action_set_ipv4 *set_ipv4;
1948 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1950 set_ipv4 = action_item->conf;
1952 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1953 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1954 /* Update the hdr_bitmap with set ipv4 dst */
1955 ULP_BITMAP_SET(params->act_bitmap.bits,
1956 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1957 return BNXT_TF_RC_SUCCESS;
1959 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1960 return BNXT_TF_RC_ERROR;
1963 /* Function to handle the parsing of RTE Flow action set tp src.*/
1965 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1966 struct ulp_rte_parser_params *params)
1968 const struct rte_flow_action_set_tp *set_tp;
1969 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1971 set_tp = action_item->conf;
1973 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1974 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1975 /* Update the hdr_bitmap with set tp src */
1976 ULP_BITMAP_SET(params->act_bitmap.bits,
1977 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1978 return BNXT_TF_RC_SUCCESS;
1981 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1982 return BNXT_TF_RC_ERROR;
1985 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1987 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1988 struct ulp_rte_parser_params *params)
1990 const struct rte_flow_action_set_tp *set_tp;
1991 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1993 set_tp = action_item->conf;
1995 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1996 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1997 /* Update the hdr_bitmap with set tp dst */
1998 ULP_BITMAP_SET(params->act_bitmap.bits,
1999 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2000 return BNXT_TF_RC_SUCCESS;
2003 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2004 return BNXT_TF_RC_ERROR;
2007 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2009 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2010 struct ulp_rte_parser_params *params)
2012 /* Update the act_bitmap with dec ttl */
2013 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2014 return BNXT_TF_RC_SUCCESS;