1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the computed
163 * fields for the interface.
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
169 uint16_t port_id, parif;
171 enum bnxt_ulp_direction_type dir;
173 /* get the direction details */
174 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
176 /* read the port id details */
177 port_id = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_INCOMING_IF);
179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
186 if (dir == BNXT_ULP_DIR_INGRESS) {
188 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
189 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
190 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
196 /* Get the match port type */
197 mtype = ULP_COMP_FLD_IDX_RD(params,
198 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
199 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
200 ULP_COMP_FLD_IDX_WR(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
203 /* Set VF func PARIF */
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_VF_FUNC_PARIF,
208 "ParseErr:ifindex is not valid\n");
211 ULP_COMP_FLD_IDX_WR(params,
212 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
215 /* Set DRV func PARIF */
216 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
217 BNXT_ULP_DRV_FUNC_PARIF,
220 "ParseErr:ifindex is not valid\n");
223 ULP_COMP_FLD_IDX_WR(params,
224 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
231 * Function to handle the post processing of the parsing details
234 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
236 enum bnxt_ulp_direction_type dir;
237 enum bnxt_ulp_intf_type match_port_type, act_port_type;
238 uint32_t act_port_set;
240 /* Get the computed details */
241 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
242 match_port_type = ULP_COMP_FLD_IDX_RD(params,
243 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
244 act_port_type = ULP_COMP_FLD_IDX_RD(params,
245 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
246 act_port_set = ULP_COMP_FLD_IDX_RD(params,
247 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
249 /* set the flow direction in the proto and action header */
250 if (dir == BNXT_ULP_DIR_EGRESS) {
251 ULP_BITMAP_SET(params->hdr_bitmap.bits,
252 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
253 ULP_BITMAP_SET(params->act_bitmap.bits,
254 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
257 /* calculate the VF to VF flag */
258 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
259 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
262 /* Update the decrement ttl computational fields */
263 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
264 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
266 * Check that vxlan proto is included and vxlan decap
267 * action is not set then decrement tunnel ttl.
268 * Similarly add GRE and NVGRE in future.
270 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
271 BNXT_ULP_HDR_BIT_T_VXLAN) &&
272 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
273 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
274 ULP_COMP_FLD_IDX_WR(params,
275 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
277 ULP_COMP_FLD_IDX_WR(params,
278 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
282 /* Merge the hdr_fp_bit into the proto header bit */
283 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
285 /* Update the computed interface parameters */
286 bnxt_ulp_comp_fld_intf_update(params);
288 /* TBD: Handle the flow rejection scenarios */
293 * Function to compute the flow direction based on the match port details
296 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
298 enum bnxt_ulp_intf_type match_port_type;
300 /* Get the match port type */
301 match_port_type = ULP_COMP_FLD_IDX_RD(params,
302 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
304 /* If ingress flow and matchport is vf rep then dir is egress*/
305 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
306 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
308 BNXT_ULP_DIR_EGRESS);
310 /* Assign the input direction */
311 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
313 BNXT_ULP_DIR_INGRESS);
315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
316 BNXT_ULP_DIR_EGRESS);
320 /* Function to handle the parsing of RTE Flow item PF Header. */
322 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
327 enum bnxt_ulp_direction_type dir;
328 struct ulp_rte_hdr_field *hdr_field;
329 enum bnxt_ulp_svif_type svif_type;
330 enum bnxt_ulp_intf_type port_type;
332 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
333 BNXT_ULP_INVALID_SVIF_VAL) {
335 "SVIF already set,multiple source not support'd\n");
336 return BNXT_TF_RC_ERROR;
339 /* Get port type details */
340 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
341 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
342 BNXT_TF_DBG(ERR, "Invalid port type\n");
343 return BNXT_TF_RC_ERROR;
346 /* Update the match port type */
347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
349 /* compute the direction */
350 bnxt_ulp_rte_parser_direction_compute(params);
352 /* Get the computed direction */
353 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
354 if (dir == BNXT_ULP_DIR_INGRESS) {
355 svif_type = BNXT_ULP_PHY_PORT_SVIF;
357 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
358 svif_type = BNXT_ULP_VF_FUNC_SVIF;
360 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
362 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
364 svif = rte_cpu_to_be_16(svif);
365 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
366 memcpy(hdr_field->spec, &svif, sizeof(svif));
367 memcpy(hdr_field->mask, &mask, sizeof(mask));
368 hdr_field->size = sizeof(svif);
369 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
370 rte_be_to_cpu_16(svif));
371 return BNXT_TF_RC_SUCCESS;
374 /* Function to handle the parsing of the RTE port id */
376 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
378 uint16_t port_id = 0;
379 uint16_t svif_mask = 0xFFFF;
381 int32_t rc = BNXT_TF_RC_ERROR;
383 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
384 BNXT_ULP_INVALID_SVIF_VAL)
385 return BNXT_TF_RC_SUCCESS;
387 /* SVIF not set. So get the port id */
388 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
390 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
393 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
397 /* Update the SVIF details */
398 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
402 /* Function to handle the implicit action port id */
404 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
406 struct rte_flow_action action_item = {0};
407 struct rte_flow_action_port_id port_id = {0};
409 /* Read the action port set bit */
410 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
411 /* Already set, so just exit */
412 return BNXT_TF_RC_SUCCESS;
414 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
415 action_item.conf = &port_id;
417 /* Update the action port based on incoming port */
418 ulp_rte_port_id_act_handler(&action_item, params);
420 /* Reset the action port set bit */
421 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
422 return BNXT_TF_RC_SUCCESS;
425 /* Function to handle the parsing of RTE Flow item PF Header. */
427 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
428 struct ulp_rte_parser_params *params)
430 uint16_t port_id = 0;
431 uint16_t svif_mask = 0xFFFF;
434 /* Get the implicit port id */
435 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
437 /* perform the conversion from dpdk port to bnxt ifindex */
438 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
441 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
442 return BNXT_TF_RC_ERROR;
445 /* Update the SVIF details */
446 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
449 /* Function to handle the parsing of RTE Flow item VF Header. */
451 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
452 struct ulp_rte_parser_params *params)
454 const struct rte_flow_item_vf *vf_spec = item->spec;
455 const struct rte_flow_item_vf *vf_mask = item->mask;
458 int32_t rc = BNXT_TF_RC_PARSE_ERR;
460 /* Get VF rte_flow_item for Port details */
462 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
466 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
471 /* perform the conversion from VF Func id to bnxt ifindex */
472 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
475 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
478 /* Update the SVIF details */
479 return ulp_rte_parser_svif_set(params, ifindex, mask);
482 /* Function to handle the parsing of RTE Flow item port id Header. */
484 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
485 struct ulp_rte_parser_params *params)
487 const struct rte_flow_item_port_id *port_spec = item->spec;
488 const struct rte_flow_item_port_id *port_mask = item->mask;
490 int32_t rc = BNXT_TF_RC_PARSE_ERR;
494 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
498 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
501 mask = port_mask->id;
503 /* perform the conversion from dpdk port to bnxt ifindex */
504 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
507 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
510 /* Update the SVIF details */
511 return ulp_rte_parser_svif_set(params, ifindex, mask);
514 /* Function to handle the parsing of RTE Flow item phy port Header. */
516 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
517 struct ulp_rte_parser_params *params)
519 const struct rte_flow_item_phy_port *port_spec = item->spec;
520 const struct rte_flow_item_phy_port *port_mask = item->mask;
522 int32_t rc = BNXT_TF_RC_ERROR;
524 enum bnxt_ulp_direction_type dir;
525 struct ulp_rte_hdr_field *hdr_field;
527 /* Copy the rte_flow_item for phy port into hdr_field */
529 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
533 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
536 mask = port_mask->index;
538 /* Update the match port type */
539 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
540 BNXT_ULP_INTF_TYPE_PHY_PORT);
542 /* Compute the Hw direction */
543 bnxt_ulp_rte_parser_direction_compute(params);
545 /* Direction validation */
546 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
547 if (dir == BNXT_ULP_DIR_EGRESS) {
549 "Parse Err:Phy ports are valid only for ingress\n");
550 return BNXT_TF_RC_PARSE_ERR;
553 /* Get the physical port details from port db */
554 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
557 BNXT_TF_DBG(ERR, "Failed to get port details\n");
558 return BNXT_TF_RC_PARSE_ERR;
561 /* Update the SVIF details */
562 svif = rte_cpu_to_be_16(svif);
563 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
564 memcpy(hdr_field->spec, &svif, sizeof(svif));
565 memcpy(hdr_field->mask, &mask, sizeof(mask));
566 hdr_field->size = sizeof(svif);
567 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
568 rte_be_to_cpu_16(svif));
569 return BNXT_TF_RC_SUCCESS;
572 /* Function to handle the update of proto header based on field values */
574 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
575 uint16_t type, uint32_t in_flag)
577 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
579 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
580 BNXT_ULP_HDR_BIT_I_IPV4);
581 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
583 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
584 BNXT_ULP_HDR_BIT_O_IPV4);
585 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
587 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
589 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
590 BNXT_ULP_HDR_BIT_I_IPV6);
591 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
593 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
594 BNXT_ULP_HDR_BIT_O_IPV6);
595 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
600 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
602 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
603 struct ulp_rte_parser_params *params)
605 const struct rte_flow_item_eth *eth_spec = item->spec;
606 const struct rte_flow_item_eth *eth_mask = item->mask;
607 struct ulp_rte_hdr_field *field;
608 uint32_t idx = params->field_idx;
610 uint16_t eth_type = 0;
611 uint32_t inner_flag = 0;
614 * Copy the rte_flow_item for eth into hdr_field using ethernet
618 size = sizeof(eth_spec->dst.addr_bytes);
619 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
620 eth_spec->dst.addr_bytes,
622 size = sizeof(eth_spec->src.addr_bytes);
623 field = ulp_rte_parser_fld_copy(field,
624 eth_spec->src.addr_bytes,
626 field = ulp_rte_parser_fld_copy(field,
628 sizeof(eth_spec->type));
629 eth_type = eth_spec->type;
632 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
633 sizeof(eth_mask->dst.addr_bytes));
634 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
635 sizeof(eth_mask->src.addr_bytes));
636 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
637 sizeof(eth_mask->type));
639 /* Add number of vlan header elements */
640 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
641 params->vlan_idx = params->field_idx;
642 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
644 /* Update the protocol hdr bitmap */
645 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
646 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
649 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
651 /* Update the field protocol hdr bitmap */
652 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
654 return BNXT_TF_RC_SUCCESS;
657 /* Function to handle the parsing of RTE Flow item Vlan Header. */
659 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
660 struct ulp_rte_parser_params *params)
662 const struct rte_flow_item_vlan *vlan_spec = item->spec;
663 const struct rte_flow_item_vlan *vlan_mask = item->mask;
664 struct ulp_rte_hdr_field *field;
665 struct ulp_rte_hdr_bitmap *hdr_bit;
666 uint32_t idx = params->vlan_idx;
667 uint16_t vlan_tag, priority;
668 uint32_t outer_vtag_num;
669 uint32_t inner_vtag_num;
670 uint16_t eth_type = 0;
671 uint32_t inner_flag = 0;
674 * Copy the rte_flow_item for vlan into hdr_field using Vlan
678 vlan_tag = ntohs(vlan_spec->tci);
679 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
680 vlan_tag &= ULP_VLAN_TAG_MASK;
681 vlan_tag = htons(vlan_tag);
683 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
686 field = ulp_rte_parser_fld_copy(field,
689 field = ulp_rte_parser_fld_copy(field,
690 &vlan_spec->inner_type,
691 sizeof(vlan_spec->inner_type));
692 eth_type = vlan_spec->inner_type;
696 vlan_tag = ntohs(vlan_mask->tci);
697 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
701 * the storage for priority and vlan tag is 2 bytes
702 * The mask of priority which is 3 bits if it is all 1's
703 * then make the rest bits 13 bits as 1's
704 * so that it is matched as exact match.
706 if (priority == ULP_VLAN_PRIORITY_MASK)
707 priority |= ~ULP_VLAN_PRIORITY_MASK;
708 if (vlan_tag == ULP_VLAN_TAG_MASK)
709 vlan_tag |= ~ULP_VLAN_TAG_MASK;
710 vlan_tag = htons(vlan_tag);
712 ulp_rte_prsr_mask_copy(params, &idx, &priority,
714 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
716 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
717 sizeof(vlan_mask->inner_type));
719 /* Set the vlan index to new incremented value */
720 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
722 /* Get the outer tag and inner tag counts */
723 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
724 BNXT_ULP_CF_IDX_O_VTAG_NUM);
725 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
726 BNXT_ULP_CF_IDX_I_VTAG_NUM);
728 /* Update the hdr_bitmap of the vlans */
729 hdr_bit = ¶ms->hdr_bitmap;
730 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
731 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
733 /* Update the vlan tag num */
735 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
737 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
738 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
739 ULP_BITMAP_SET(params->hdr_bitmap.bits,
740 BNXT_ULP_HDR_BIT_OO_VLAN);
741 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
742 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
743 outer_vtag_num == 1) {
744 /* update the vlan tag num */
746 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
748 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
749 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
750 ULP_BITMAP_SET(params->hdr_bitmap.bits,
751 BNXT_ULP_HDR_BIT_OI_VLAN);
752 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
753 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
755 /* update the vlan tag num */
757 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
760 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
761 ULP_BITMAP_SET(params->hdr_bitmap.bits,
762 BNXT_ULP_HDR_BIT_IO_VLAN);
764 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
765 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
766 inner_vtag_num == 1) {
767 /* update the vlan tag num */
769 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
771 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
772 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
773 ULP_BITMAP_SET(params->hdr_bitmap.bits,
774 BNXT_ULP_HDR_BIT_II_VLAN);
777 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
778 return BNXT_TF_RC_ERROR;
780 /* Update the field protocol hdr bitmap */
781 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
782 return BNXT_TF_RC_SUCCESS;
785 /* Function to handle the update of proto header based on field values */
787 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
788 uint8_t proto, uint32_t in_flag)
790 if (proto == IPPROTO_UDP) {
792 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
793 BNXT_ULP_HDR_BIT_I_UDP);
794 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
796 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
797 BNXT_ULP_HDR_BIT_O_UDP);
798 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
800 } else if (proto == IPPROTO_TCP) {
802 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
803 BNXT_ULP_HDR_BIT_I_TCP);
804 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
806 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
807 BNXT_ULP_HDR_BIT_O_TCP);
808 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
813 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
815 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
816 struct ulp_rte_parser_params *params)
818 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
819 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
820 struct ulp_rte_hdr_field *field;
821 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
822 uint32_t idx = params->field_idx;
825 uint32_t inner_flag = 0;
828 /* validate there are no 3rd L3 header */
829 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
831 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
832 return BNXT_TF_RC_ERROR;
836 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
840 size = sizeof(ipv4_spec->hdr.version_ihl);
841 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
842 &ipv4_spec->hdr.version_ihl,
844 size = sizeof(ipv4_spec->hdr.type_of_service);
845 field = ulp_rte_parser_fld_copy(field,
846 &ipv4_spec->hdr.type_of_service,
848 size = sizeof(ipv4_spec->hdr.total_length);
849 field = ulp_rte_parser_fld_copy(field,
850 &ipv4_spec->hdr.total_length,
852 size = sizeof(ipv4_spec->hdr.packet_id);
853 field = ulp_rte_parser_fld_copy(field,
854 &ipv4_spec->hdr.packet_id,
856 size = sizeof(ipv4_spec->hdr.fragment_offset);
857 field = ulp_rte_parser_fld_copy(field,
858 &ipv4_spec->hdr.fragment_offset,
860 size = sizeof(ipv4_spec->hdr.time_to_live);
861 field = ulp_rte_parser_fld_copy(field,
862 &ipv4_spec->hdr.time_to_live,
864 size = sizeof(ipv4_spec->hdr.next_proto_id);
865 field = ulp_rte_parser_fld_copy(field,
866 &ipv4_spec->hdr.next_proto_id,
868 proto = ipv4_spec->hdr.next_proto_id;
869 size = sizeof(ipv4_spec->hdr.hdr_checksum);
870 field = ulp_rte_parser_fld_copy(field,
871 &ipv4_spec->hdr.hdr_checksum,
873 size = sizeof(ipv4_spec->hdr.src_addr);
874 field = ulp_rte_parser_fld_copy(field,
875 &ipv4_spec->hdr.src_addr,
877 size = sizeof(ipv4_spec->hdr.dst_addr);
878 field = ulp_rte_parser_fld_copy(field,
879 &ipv4_spec->hdr.dst_addr,
883 ulp_rte_prsr_mask_copy(params, &idx,
884 &ipv4_mask->hdr.version_ihl,
885 sizeof(ipv4_mask->hdr.version_ihl));
886 #ifdef ULP_DONT_IGNORE_TOS
887 ulp_rte_prsr_mask_copy(params, &idx,
888 &ipv4_mask->hdr.type_of_service,
889 sizeof(ipv4_mask->hdr.type_of_service));
892 * The tos field is ignored since OVS is setting it as wild card
893 * match and it is not supported. This is a work around and
894 * shall be addressed in the future.
899 ulp_rte_prsr_mask_copy(params, &idx,
900 &ipv4_mask->hdr.total_length,
901 sizeof(ipv4_mask->hdr.total_length));
902 ulp_rte_prsr_mask_copy(params, &idx,
903 &ipv4_mask->hdr.packet_id,
904 sizeof(ipv4_mask->hdr.packet_id));
905 ulp_rte_prsr_mask_copy(params, &idx,
906 &ipv4_mask->hdr.fragment_offset,
907 sizeof(ipv4_mask->hdr.fragment_offset));
908 ulp_rte_prsr_mask_copy(params, &idx,
909 &ipv4_mask->hdr.time_to_live,
910 sizeof(ipv4_mask->hdr.time_to_live));
911 ulp_rte_prsr_mask_copy(params, &idx,
912 &ipv4_mask->hdr.next_proto_id,
913 sizeof(ipv4_mask->hdr.next_proto_id));
914 ulp_rte_prsr_mask_copy(params, &idx,
915 &ipv4_mask->hdr.hdr_checksum,
916 sizeof(ipv4_mask->hdr.hdr_checksum));
917 ulp_rte_prsr_mask_copy(params, &idx,
918 &ipv4_mask->hdr.src_addr,
919 sizeof(ipv4_mask->hdr.src_addr));
920 ulp_rte_prsr_mask_copy(params, &idx,
921 &ipv4_mask->hdr.dst_addr,
922 sizeof(ipv4_mask->hdr.dst_addr));
924 /* Add the number of ipv4 header elements */
925 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
927 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
928 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
929 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
930 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
931 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
934 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
935 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
938 /* Update the field protocol hdr bitmap */
939 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
940 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
941 return BNXT_TF_RC_SUCCESS;
944 /* Function to handle the parsing of RTE Flow item IPV6 Header */
946 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
947 struct ulp_rte_parser_params *params)
949 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
950 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
951 struct ulp_rte_hdr_field *field;
952 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
953 uint32_t idx = params->field_idx;
955 uint32_t vtcf, vtcf_mask;
957 uint32_t inner_flag = 0;
960 /* validate there are no 3rd L3 header */
961 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
963 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
964 return BNXT_TF_RC_ERROR;
968 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
972 size = sizeof(ipv6_spec->hdr.vtc_flow);
974 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
975 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
979 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
980 field = ulp_rte_parser_fld_copy(field,
984 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
985 field = ulp_rte_parser_fld_copy(field,
989 size = sizeof(ipv6_spec->hdr.payload_len);
990 field = ulp_rte_parser_fld_copy(field,
991 &ipv6_spec->hdr.payload_len,
993 size = sizeof(ipv6_spec->hdr.proto);
994 field = ulp_rte_parser_fld_copy(field,
995 &ipv6_spec->hdr.proto,
997 proto = ipv6_spec->hdr.proto;
998 size = sizeof(ipv6_spec->hdr.hop_limits);
999 field = ulp_rte_parser_fld_copy(field,
1000 &ipv6_spec->hdr.hop_limits,
1002 size = sizeof(ipv6_spec->hdr.src_addr);
1003 field = ulp_rte_parser_fld_copy(field,
1004 &ipv6_spec->hdr.src_addr,
1006 size = sizeof(ipv6_spec->hdr.dst_addr);
1007 field = ulp_rte_parser_fld_copy(field,
1008 &ipv6_spec->hdr.dst_addr,
1012 size = sizeof(ipv6_mask->hdr.vtc_flow);
1014 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1015 ulp_rte_prsr_mask_copy(params, &idx,
1019 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1020 ulp_rte_prsr_mask_copy(params, &idx,
1025 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1026 ulp_rte_prsr_mask_copy(params, &idx,
1030 ulp_rte_prsr_mask_copy(params, &idx,
1031 &ipv6_mask->hdr.payload_len,
1032 sizeof(ipv6_mask->hdr.payload_len));
1033 ulp_rte_prsr_mask_copy(params, &idx,
1034 &ipv6_mask->hdr.proto,
1035 sizeof(ipv6_mask->hdr.proto));
1036 ulp_rte_prsr_mask_copy(params, &idx,
1037 &ipv6_mask->hdr.hop_limits,
1038 sizeof(ipv6_mask->hdr.hop_limits));
1039 ulp_rte_prsr_mask_copy(params, &idx,
1040 &ipv6_mask->hdr.src_addr,
1041 sizeof(ipv6_mask->hdr.src_addr));
1042 ulp_rte_prsr_mask_copy(params, &idx,
1043 &ipv6_mask->hdr.dst_addr,
1044 sizeof(ipv6_mask->hdr.dst_addr));
1046 /* add number of ipv6 header elements */
1047 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1049 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1050 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1051 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1052 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1053 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1056 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1057 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1060 /* Update the field protocol hdr bitmap */
1061 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1062 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1064 return BNXT_TF_RC_SUCCESS;
1067 /* Function to handle the update of proto header based on field values */
1069 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1072 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1073 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1074 BNXT_ULP_HDR_BIT_T_VXLAN);
1077 /* Function to handle the parsing of RTE Flow item UDP Header. */
1079 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1080 struct ulp_rte_parser_params *params)
1082 const struct rte_flow_item_udp *udp_spec = item->spec;
1083 const struct rte_flow_item_udp *udp_mask = item->mask;
1084 struct ulp_rte_hdr_field *field;
1085 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1086 uint32_t idx = params->field_idx;
1088 uint16_t dst_port = 0;
1091 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1093 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1094 return BNXT_TF_RC_ERROR;
1098 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1102 size = sizeof(udp_spec->hdr.src_port);
1103 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1104 &udp_spec->hdr.src_port,
1106 size = sizeof(udp_spec->hdr.dst_port);
1107 field = ulp_rte_parser_fld_copy(field,
1108 &udp_spec->hdr.dst_port,
1110 dst_port = udp_spec->hdr.dst_port;
1111 size = sizeof(udp_spec->hdr.dgram_len);
1112 field = ulp_rte_parser_fld_copy(field,
1113 &udp_spec->hdr.dgram_len,
1115 size = sizeof(udp_spec->hdr.dgram_cksum);
1116 field = ulp_rte_parser_fld_copy(field,
1117 &udp_spec->hdr.dgram_cksum,
1121 ulp_rte_prsr_mask_copy(params, &idx,
1122 &udp_mask->hdr.src_port,
1123 sizeof(udp_mask->hdr.src_port));
1124 ulp_rte_prsr_mask_copy(params, &idx,
1125 &udp_mask->hdr.dst_port,
1126 sizeof(udp_mask->hdr.dst_port));
1127 ulp_rte_prsr_mask_copy(params, &idx,
1128 &udp_mask->hdr.dgram_len,
1129 sizeof(udp_mask->hdr.dgram_len));
1130 ulp_rte_prsr_mask_copy(params, &idx,
1131 &udp_mask->hdr.dgram_cksum,
1132 sizeof(udp_mask->hdr.dgram_cksum));
1135 /* Add number of UDP header elements */
1136 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1138 /* Set the udp header bitmap and computed l4 header bitmaps */
1139 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1140 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1141 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1142 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1144 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1145 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1146 /* Update the field protocol hdr bitmap */
1147 ulp_rte_l4_proto_type_update(params, dst_port);
1149 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1150 return BNXT_TF_RC_SUCCESS;
1153 /* Function to handle the parsing of RTE Flow item TCP Header. */
1155 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1156 struct ulp_rte_parser_params *params)
1158 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1159 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1160 struct ulp_rte_hdr_field *field;
1161 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1162 uint32_t idx = params->field_idx;
1166 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1168 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1169 return BNXT_TF_RC_ERROR;
1173 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1177 size = sizeof(tcp_spec->hdr.src_port);
1178 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1179 &tcp_spec->hdr.src_port,
1181 size = sizeof(tcp_spec->hdr.dst_port);
1182 field = ulp_rte_parser_fld_copy(field,
1183 &tcp_spec->hdr.dst_port,
1185 size = sizeof(tcp_spec->hdr.sent_seq);
1186 field = ulp_rte_parser_fld_copy(field,
1187 &tcp_spec->hdr.sent_seq,
1189 size = sizeof(tcp_spec->hdr.recv_ack);
1190 field = ulp_rte_parser_fld_copy(field,
1191 &tcp_spec->hdr.recv_ack,
1193 size = sizeof(tcp_spec->hdr.data_off);
1194 field = ulp_rte_parser_fld_copy(field,
1195 &tcp_spec->hdr.data_off,
1197 size = sizeof(tcp_spec->hdr.tcp_flags);
1198 field = ulp_rte_parser_fld_copy(field,
1199 &tcp_spec->hdr.tcp_flags,
1201 size = sizeof(tcp_spec->hdr.rx_win);
1202 field = ulp_rte_parser_fld_copy(field,
1203 &tcp_spec->hdr.rx_win,
1205 size = sizeof(tcp_spec->hdr.cksum);
1206 field = ulp_rte_parser_fld_copy(field,
1207 &tcp_spec->hdr.cksum,
1209 size = sizeof(tcp_spec->hdr.tcp_urp);
1210 field = ulp_rte_parser_fld_copy(field,
1211 &tcp_spec->hdr.tcp_urp,
1214 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1218 ulp_rte_prsr_mask_copy(params, &idx,
1219 &tcp_mask->hdr.src_port,
1220 sizeof(tcp_mask->hdr.src_port));
1221 ulp_rte_prsr_mask_copy(params, &idx,
1222 &tcp_mask->hdr.dst_port,
1223 sizeof(tcp_mask->hdr.dst_port));
1224 ulp_rte_prsr_mask_copy(params, &idx,
1225 &tcp_mask->hdr.sent_seq,
1226 sizeof(tcp_mask->hdr.sent_seq));
1227 ulp_rte_prsr_mask_copy(params, &idx,
1228 &tcp_mask->hdr.recv_ack,
1229 sizeof(tcp_mask->hdr.recv_ack));
1230 ulp_rte_prsr_mask_copy(params, &idx,
1231 &tcp_mask->hdr.data_off,
1232 sizeof(tcp_mask->hdr.data_off));
1233 ulp_rte_prsr_mask_copy(params, &idx,
1234 &tcp_mask->hdr.tcp_flags,
1235 sizeof(tcp_mask->hdr.tcp_flags));
1236 ulp_rte_prsr_mask_copy(params, &idx,
1237 &tcp_mask->hdr.rx_win,
1238 sizeof(tcp_mask->hdr.rx_win));
1239 ulp_rte_prsr_mask_copy(params, &idx,
1240 &tcp_mask->hdr.cksum,
1241 sizeof(tcp_mask->hdr.cksum));
1242 ulp_rte_prsr_mask_copy(params, &idx,
1243 &tcp_mask->hdr.tcp_urp,
1244 sizeof(tcp_mask->hdr.tcp_urp));
1246 /* add number of TCP header elements */
1247 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1249 /* Set the udp header bitmap and computed l4 header bitmaps */
1250 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1251 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1252 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1255 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1256 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1258 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1259 return BNXT_TF_RC_SUCCESS;
1262 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1264 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1265 struct ulp_rte_parser_params *params)
1267 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1268 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1269 struct ulp_rte_hdr_field *field;
1270 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1271 uint32_t idx = params->field_idx;
1275 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1279 size = sizeof(vxlan_spec->flags);
1280 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1283 size = sizeof(vxlan_spec->rsvd0);
1284 field = ulp_rte_parser_fld_copy(field,
1287 size = sizeof(vxlan_spec->vni);
1288 field = ulp_rte_parser_fld_copy(field,
1291 size = sizeof(vxlan_spec->rsvd1);
1292 field = ulp_rte_parser_fld_copy(field,
1297 ulp_rte_prsr_mask_copy(params, &idx,
1299 sizeof(vxlan_mask->flags));
1300 ulp_rte_prsr_mask_copy(params, &idx,
1302 sizeof(vxlan_mask->rsvd0));
1303 ulp_rte_prsr_mask_copy(params, &idx,
1305 sizeof(vxlan_mask->vni));
1306 ulp_rte_prsr_mask_copy(params, &idx,
1308 sizeof(vxlan_mask->rsvd1));
1310 /* Add number of vxlan header elements */
1311 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1313 /* Update the hdr_bitmap with vxlan */
1314 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1315 return BNXT_TF_RC_SUCCESS;
1318 /* Function to handle the parsing of RTE Flow item void Header */
1320 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1321 struct ulp_rte_parser_params *params __rte_unused)
1323 return BNXT_TF_RC_SUCCESS;
1326 /* Function to handle the parsing of RTE Flow action void Header. */
1328 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1329 struct ulp_rte_parser_params *params __rte_unused)
1331 return BNXT_TF_RC_SUCCESS;
1334 /* Function to handle the parsing of RTE Flow action Mark Header. */
1336 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1337 struct ulp_rte_parser_params *param)
1339 const struct rte_flow_action_mark *mark;
1340 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1343 mark = action_item->conf;
1345 mark_id = tfp_cpu_to_be_32(mark->id);
1346 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1347 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1349 /* Update the hdr_bitmap with vxlan */
1350 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1351 return BNXT_TF_RC_SUCCESS;
1353 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1354 return BNXT_TF_RC_ERROR;
1357 /* Function to handle the parsing of RTE Flow action RSS Header. */
1359 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1360 struct ulp_rte_parser_params *param)
1362 const struct rte_flow_action_rss *rss = action_item->conf;
1365 /* Update the hdr_bitmap with vxlan */
1366 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1367 return BNXT_TF_RC_SUCCESS;
1369 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1370 return BNXT_TF_RC_ERROR;
1373 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1375 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1376 struct ulp_rte_parser_params *params)
1378 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1379 const struct rte_flow_item *item;
1380 const struct rte_flow_item_eth *eth_spec;
1381 const struct rte_flow_item_ipv4 *ipv4_spec;
1382 const struct rte_flow_item_ipv6 *ipv6_spec;
1383 struct rte_flow_item_vxlan vxlan_spec;
1384 uint32_t vlan_num = 0, vlan_size = 0;
1385 uint32_t ip_size = 0, ip_type = 0;
1386 uint32_t vxlan_size = 0;
1388 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1389 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1391 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1392 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1394 vxlan_encap = action_item->conf;
1396 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1397 return BNXT_TF_RC_ERROR;
1400 item = vxlan_encap->definition;
1402 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1403 return BNXT_TF_RC_ERROR;
1406 if (!ulp_rte_item_skip_void(&item, 0))
1407 return BNXT_TF_RC_ERROR;
1409 /* must have ethernet header */
1410 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1411 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1412 return BNXT_TF_RC_ERROR;
1414 eth_spec = item->spec;
1415 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1416 ulp_encap_buffer_copy(buff,
1417 eth_spec->dst.addr_bytes,
1418 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1420 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1421 ulp_encap_buffer_copy(buff,
1422 eth_spec->src.addr_bytes,
1423 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1425 /* Goto the next item */
1426 if (!ulp_rte_item_skip_void(&item, 1))
1427 return BNXT_TF_RC_ERROR;
1429 /* May have vlan header */
1430 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1432 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1433 ulp_encap_buffer_copy(buff,
1435 sizeof(struct rte_flow_item_vlan));
1437 if (!ulp_rte_item_skip_void(&item, 1))
1438 return BNXT_TF_RC_ERROR;
1441 /* may have two vlan headers */
1442 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1444 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1445 sizeof(struct rte_flow_item_vlan)],
1447 sizeof(struct rte_flow_item_vlan));
1448 if (!ulp_rte_item_skip_void(&item, 1))
1449 return BNXT_TF_RC_ERROR;
1451 /* Update the vlan count and size of more than one */
1453 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1454 vlan_num = tfp_cpu_to_be_32(vlan_num);
1455 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1458 vlan_size = tfp_cpu_to_be_32(vlan_size);
1459 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1464 /* L3 must be IPv4, IPv6 */
1465 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1466 ipv4_spec = item->spec;
1467 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1469 /* copy the ipv4 details */
1470 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1471 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1472 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1473 ulp_encap_buffer_copy(buff,
1475 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1476 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1478 const uint8_t *tmp_buff;
1480 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1481 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1482 ulp_encap_buffer_copy(buff,
1484 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1485 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1486 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1487 ulp_encap_buffer_copy(buff,
1488 &ipv4_spec->hdr.version_ihl,
1489 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1491 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1492 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1493 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1494 ulp_encap_buffer_copy(buff,
1495 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1496 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1498 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1499 ulp_encap_buffer_copy(buff,
1500 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1501 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1503 /* Update the ip size details */
1504 ip_size = tfp_cpu_to_be_32(ip_size);
1505 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1506 &ip_size, sizeof(uint32_t));
1508 /* update the ip type */
1509 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1510 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1511 &ip_type, sizeof(uint32_t));
1513 /* update the computed field to notify it is ipv4 header */
1514 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1517 if (!ulp_rte_item_skip_void(&item, 1))
1518 return BNXT_TF_RC_ERROR;
1519 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1520 ipv6_spec = item->spec;
1521 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1523 /* copy the ipv4 details */
1524 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1525 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1527 /* Update the ip size details */
1528 ip_size = tfp_cpu_to_be_32(ip_size);
1529 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1530 &ip_size, sizeof(uint32_t));
1532 /* update the ip type */
1533 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1534 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1535 &ip_type, sizeof(uint32_t));
1537 /* update the computed field to notify it is ipv6 header */
1538 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1541 if (!ulp_rte_item_skip_void(&item, 1))
1542 return BNXT_TF_RC_ERROR;
1544 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1545 return BNXT_TF_RC_ERROR;
1549 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1550 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1551 return BNXT_TF_RC_ERROR;
1553 /* copy the udp details */
1554 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1555 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1557 if (!ulp_rte_item_skip_void(&item, 1))
1558 return BNXT_TF_RC_ERROR;
1561 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1562 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1563 return BNXT_TF_RC_ERROR;
1565 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1566 /* copy the vxlan details */
1567 memcpy(&vxlan_spec, item->spec, vxlan_size);
1568 vxlan_spec.flags = 0x08;
1569 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1570 (const uint8_t *)&vxlan_spec,
1572 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1573 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1574 &vxlan_size, sizeof(uint32_t));
1576 /* update the hdr_bitmap with vxlan */
1577 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1578 return BNXT_TF_RC_SUCCESS;
1581 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1583 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1585 struct ulp_rte_parser_params *params)
1587 /* update the hdr_bitmap with vxlan */
1588 ULP_BITMAP_SET(params->act_bitmap.bits,
1589 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1590 return BNXT_TF_RC_SUCCESS;
1593 /* Function to handle the parsing of RTE Flow action drop Header. */
1595 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1596 struct ulp_rte_parser_params *params)
1598 /* Update the hdr_bitmap with drop */
1599 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1600 return BNXT_TF_RC_SUCCESS;
1603 /* Function to handle the parsing of RTE Flow action count. */
1605 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1606 struct ulp_rte_parser_params *params)
1609 const struct rte_flow_action_count *act_count;
1610 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1612 act_count = action_item->conf;
1614 if (act_count->shared) {
1616 "Parse Error:Shared count not supported\n");
1617 return BNXT_TF_RC_PARSE_ERR;
1619 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1621 BNXT_ULP_ACT_PROP_SZ_COUNT);
1624 /* Update the hdr_bitmap with count */
1625 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1626 return BNXT_TF_RC_SUCCESS;
1629 /* Function to handle the parsing of action ports. */
1631 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1634 enum bnxt_ulp_direction_type dir;
1637 struct ulp_rte_act_prop *act = ¶m->act_prop;
1638 enum bnxt_ulp_intf_type port_type;
1641 /* Get the direction */
1642 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1643 if (dir == BNXT_ULP_DIR_EGRESS) {
1644 /* For egress direction, fill vport */
1645 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1646 return BNXT_TF_RC_ERROR;
1649 pid = rte_cpu_to_be_32(pid);
1650 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1651 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1653 /* For ingress direction, fill vnic */
1654 port_type = ULP_COMP_FLD_IDX_RD(param,
1655 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1656 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1657 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1659 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1661 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1663 return BNXT_TF_RC_ERROR;
1666 pid = rte_cpu_to_be_32(pid);
1667 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1668 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1671 /* Update the action port set bit */
1672 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1673 return BNXT_TF_RC_SUCCESS;
1676 /* Function to handle the parsing of RTE Flow action PF. */
1678 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1679 struct ulp_rte_parser_params *params)
1683 enum bnxt_ulp_intf_type intf_type;
1685 /* Get the port id of the current device */
1686 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1688 /* Get the port db ifindex */
1689 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1691 BNXT_TF_DBG(ERR, "Invalid port id\n");
1692 return BNXT_TF_RC_ERROR;
1695 /* Check the port is PF port */
1696 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1697 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1698 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1699 return BNXT_TF_RC_ERROR;
1701 /* Update the action properties */
1702 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1703 return ulp_rte_parser_act_port_set(params, ifindex);
1706 /* Function to handle the parsing of RTE Flow action VF. */
1708 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1709 struct ulp_rte_parser_params *params)
1711 const struct rte_flow_action_vf *vf_action;
1713 enum bnxt_ulp_intf_type intf_type;
1715 vf_action = action_item->conf;
1717 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1718 return BNXT_TF_RC_PARSE_ERR;
1721 if (vf_action->original) {
1722 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1723 return BNXT_TF_RC_PARSE_ERR;
1726 /* Check the port is VF port */
1727 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1729 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1730 return BNXT_TF_RC_ERROR;
1732 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1733 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1734 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1735 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1736 return BNXT_TF_RC_ERROR;
1739 /* Update the action properties */
1740 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1741 return ulp_rte_parser_act_port_set(params, ifindex);
1744 /* Function to handle the parsing of RTE Flow action port_id. */
1746 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1747 struct ulp_rte_parser_params *param)
1749 const struct rte_flow_action_port_id *port_id = act_item->conf;
1751 enum bnxt_ulp_intf_type intf_type;
1755 "ParseErr: Invalid Argument\n");
1756 return BNXT_TF_RC_PARSE_ERR;
1758 if (port_id->original) {
1760 "ParseErr:Portid Original not supported\n");
1761 return BNXT_TF_RC_PARSE_ERR;
1764 /* Get the port db ifindex */
1765 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1767 BNXT_TF_DBG(ERR, "Invalid port id\n");
1768 return BNXT_TF_RC_ERROR;
1771 /* Get the intf type */
1772 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1774 BNXT_TF_DBG(ERR, "Invalid port type\n");
1775 return BNXT_TF_RC_ERROR;
1778 /* Set the action port */
1779 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1780 return ulp_rte_parser_act_port_set(param, ifindex);
1783 /* Function to handle the parsing of RTE Flow action phy_port. */
1785 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1786 struct ulp_rte_parser_params *prm)
1788 const struct rte_flow_action_phy_port *phy_port;
1792 enum bnxt_ulp_direction_type dir;
1794 phy_port = action_item->conf;
1797 "ParseErr: Invalid Argument\n");
1798 return BNXT_TF_RC_PARSE_ERR;
1801 if (phy_port->original) {
1803 "Parse Err:Port Original not supported\n");
1804 return BNXT_TF_RC_PARSE_ERR;
1806 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1807 if (dir != BNXT_ULP_DIR_EGRESS) {
1809 "Parse Err:Phy ports are valid only for egress\n");
1810 return BNXT_TF_RC_PARSE_ERR;
1812 /* Get the physical port details from port db */
1813 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1816 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1821 pid = rte_cpu_to_be_32(pid);
1822 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1823 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1825 /* Update the action port set bit */
1826 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1827 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1828 BNXT_ULP_INTF_TYPE_PHY_PORT);
1829 return BNXT_TF_RC_SUCCESS;
1832 /* Function to handle the parsing of RTE Flow action pop vlan. */
1834 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1835 struct ulp_rte_parser_params *params)
1837 /* Update the act_bitmap with pop */
1838 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1839 return BNXT_TF_RC_SUCCESS;
1842 /* Function to handle the parsing of RTE Flow action push vlan. */
1844 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1845 struct ulp_rte_parser_params *params)
1847 const struct rte_flow_action_of_push_vlan *push_vlan;
1849 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1851 push_vlan = action_item->conf;
1853 ethertype = push_vlan->ethertype;
1854 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1856 "Parse Err: Ethertype not supported\n");
1857 return BNXT_TF_RC_PARSE_ERR;
1859 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1860 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1861 /* Update the hdr_bitmap with push vlan */
1862 ULP_BITMAP_SET(params->act_bitmap.bits,
1863 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1864 return BNXT_TF_RC_SUCCESS;
1866 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1867 return BNXT_TF_RC_ERROR;
1870 /* Function to handle the parsing of RTE Flow action set vlan id. */
1872 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1873 struct ulp_rte_parser_params *params)
1875 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1877 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1879 vlan_vid = action_item->conf;
1880 if (vlan_vid && vlan_vid->vlan_vid) {
1881 vid = vlan_vid->vlan_vid;
1882 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1883 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1884 /* Update the hdr_bitmap with vlan vid */
1885 ULP_BITMAP_SET(params->act_bitmap.bits,
1886 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1887 return BNXT_TF_RC_SUCCESS;
1889 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1890 return BNXT_TF_RC_ERROR;
1893 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1895 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1896 struct ulp_rte_parser_params *params)
1898 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1900 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1902 vlan_pcp = action_item->conf;
1904 pcp = vlan_pcp->vlan_pcp;
1905 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1906 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1907 /* Update the hdr_bitmap with vlan vid */
1908 ULP_BITMAP_SET(params->act_bitmap.bits,
1909 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1910 return BNXT_TF_RC_SUCCESS;
1912 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1913 return BNXT_TF_RC_ERROR;
1916 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1918 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1919 struct ulp_rte_parser_params *params)
1921 const struct rte_flow_action_set_ipv4 *set_ipv4;
1922 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1924 set_ipv4 = action_item->conf;
1926 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1927 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1928 /* Update the hdr_bitmap with set ipv4 src */
1929 ULP_BITMAP_SET(params->act_bitmap.bits,
1930 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1931 return BNXT_TF_RC_SUCCESS;
1933 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1934 return BNXT_TF_RC_ERROR;
1937 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1939 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1940 struct ulp_rte_parser_params *params)
1942 const struct rte_flow_action_set_ipv4 *set_ipv4;
1943 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1945 set_ipv4 = action_item->conf;
1947 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1948 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1949 /* Update the hdr_bitmap with set ipv4 dst */
1950 ULP_BITMAP_SET(params->act_bitmap.bits,
1951 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1952 return BNXT_TF_RC_SUCCESS;
1954 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1955 return BNXT_TF_RC_ERROR;
1958 /* Function to handle the parsing of RTE Flow action set tp src.*/
1960 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1961 struct ulp_rte_parser_params *params)
1963 const struct rte_flow_action_set_tp *set_tp;
1964 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1966 set_tp = action_item->conf;
1968 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1969 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1970 /* Update the hdr_bitmap with set tp src */
1971 ULP_BITMAP_SET(params->act_bitmap.bits,
1972 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1973 return BNXT_TF_RC_SUCCESS;
1976 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1977 return BNXT_TF_RC_ERROR;
1980 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1982 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1983 struct ulp_rte_parser_params *params)
1985 const struct rte_flow_action_set_tp *set_tp;
1986 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1988 set_tp = action_item->conf;
1990 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1991 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1992 /* Update the hdr_bitmap with set tp dst */
1993 ULP_BITMAP_SET(params->act_bitmap.bits,
1994 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1995 return BNXT_TF_RC_SUCCESS;
1998 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1999 return BNXT_TF_RC_ERROR;
2002 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2004 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2005 struct ulp_rte_parser_params *params)
2007 /* Update the act_bitmap with dec ttl */
2008 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2009 return BNXT_TF_RC_SUCCESS;