1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the computed
163 * fields for the interface.
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
169 uint16_t port_id, parif;
171 enum bnxt_ulp_direction_type dir;
173 /* get the direction details */
174 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
176 /* read the port id details */
177 port_id = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_INCOMING_IF);
179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
186 if (dir == BNXT_ULP_DIR_INGRESS) {
188 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
189 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
190 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
196 /* Get the match port type */
197 mtype = ULP_COMP_FLD_IDX_RD(params,
198 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
199 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
200 ULP_COMP_FLD_IDX_WR(params,
201 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
203 /* Set VF func PARIF */
204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 BNXT_ULP_VF_FUNC_PARIF,
208 "ParseErr:ifindex is not valid\n");
211 ULP_COMP_FLD_IDX_WR(params,
212 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
215 /* Set DRV func PARIF */
216 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
217 BNXT_ULP_DRV_FUNC_PARIF,
220 "ParseErr:ifindex is not valid\n");
223 ULP_COMP_FLD_IDX_WR(params,
224 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
231 * Function to handle the post processing of the parsing details
234 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
236 enum bnxt_ulp_direction_type dir;
237 enum bnxt_ulp_intf_type match_port_type, act_port_type;
238 uint32_t act_port_set;
240 /* Get the computed details */
241 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
242 match_port_type = ULP_COMP_FLD_IDX_RD(params,
243 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
244 act_port_type = ULP_COMP_FLD_IDX_RD(params,
245 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
246 act_port_set = ULP_COMP_FLD_IDX_RD(params,
247 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
249 /* set the flow direction in the proto and action header */
250 if (dir == BNXT_ULP_DIR_EGRESS) {
251 ULP_BITMAP_SET(params->hdr_bitmap.bits,
252 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
253 ULP_BITMAP_SET(params->act_bitmap.bits,
254 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
257 /* calculate the VF to VF flag */
258 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
259 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
262 /* Update the decrement ttl computational fields */
263 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
264 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
266 * Check that vxlan proto is included and vxlan decap
267 * action is not set then decrement tunnel ttl.
268 * Similarly add GRE and NVGRE in future.
270 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
271 BNXT_ULP_HDR_BIT_T_VXLAN) &&
272 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
273 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
274 ULP_COMP_FLD_IDX_WR(params,
275 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
277 ULP_COMP_FLD_IDX_WR(params,
278 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
282 /* Merge the hdr_fp_bit into the proto header bit */
283 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
285 /* Update the computed interface parameters */
286 bnxt_ulp_comp_fld_intf_update(params);
288 /* TBD: Handle the flow rejection scenarios */
293 * Function to compute the flow direction based on the match port details
296 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
298 enum bnxt_ulp_intf_type match_port_type;
300 /* Get the match port type */
301 match_port_type = ULP_COMP_FLD_IDX_RD(params,
302 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
304 /* If ingress flow and matchport is vf rep then dir is egress*/
305 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
306 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
308 BNXT_ULP_DIR_EGRESS);
310 /* Assign the input direction */
311 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
313 BNXT_ULP_DIR_INGRESS);
315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
316 BNXT_ULP_DIR_EGRESS);
320 /* Function to handle the parsing of RTE Flow item PF Header. */
322 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
327 enum bnxt_ulp_direction_type dir;
328 struct ulp_rte_hdr_field *hdr_field;
329 enum bnxt_ulp_svif_type svif_type;
330 enum bnxt_ulp_intf_type port_type;
332 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
333 BNXT_ULP_INVALID_SVIF_VAL) {
335 "SVIF already set,multiple source not support'd\n");
336 return BNXT_TF_RC_ERROR;
339 /* Get port type details */
340 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
341 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
342 BNXT_TF_DBG(ERR, "Invalid port type\n");
343 return BNXT_TF_RC_ERROR;
346 /* Update the match port type */
347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
349 /* compute the direction */
350 bnxt_ulp_rte_parser_direction_compute(params);
352 /* Get the computed direction */
353 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
354 if (dir == BNXT_ULP_DIR_INGRESS) {
355 svif_type = BNXT_ULP_PHY_PORT_SVIF;
357 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
358 svif_type = BNXT_ULP_VF_FUNC_SVIF;
360 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
362 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
364 svif = rte_cpu_to_be_16(svif);
365 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
366 memcpy(hdr_field->spec, &svif, sizeof(svif));
367 memcpy(hdr_field->mask, &mask, sizeof(mask));
368 hdr_field->size = sizeof(svif);
369 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
370 rte_be_to_cpu_16(svif));
371 return BNXT_TF_RC_SUCCESS;
374 /* Function to handle the parsing of the RTE port id */
376 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
378 uint16_t port_id = 0;
379 uint16_t svif_mask = 0xFFFF;
381 int32_t rc = BNXT_TF_RC_ERROR;
383 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
384 BNXT_ULP_INVALID_SVIF_VAL)
385 return BNXT_TF_RC_SUCCESS;
387 /* SVIF not set. So get the port id */
388 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
390 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
393 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
397 /* Update the SVIF details */
398 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
402 /* Function to handle the implicit action port id */
404 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
406 struct rte_flow_action action_item = {0};
407 struct rte_flow_action_port_id port_id = {0};
409 /* Read the action port set bit */
410 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
411 /* Already set, so just exit */
412 return BNXT_TF_RC_SUCCESS;
414 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
415 action_item.conf = &port_id;
417 /* Update the action port based on incoming port */
418 ulp_rte_port_id_act_handler(&action_item, params);
420 /* Reset the action port set bit */
421 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
422 return BNXT_TF_RC_SUCCESS;
425 /* Function to handle the parsing of RTE Flow item PF Header. */
427 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
428 struct ulp_rte_parser_params *params)
430 uint16_t port_id = 0;
431 uint16_t svif_mask = 0xFFFF;
434 /* Get the implicit port id */
435 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
437 /* perform the conversion from dpdk port to bnxt ifindex */
438 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
441 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
442 return BNXT_TF_RC_ERROR;
445 /* Update the SVIF details */
446 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
449 /* Function to handle the parsing of RTE Flow item VF Header. */
451 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
452 struct ulp_rte_parser_params *params)
454 const struct rte_flow_item_vf *vf_spec = item->spec;
455 const struct rte_flow_item_vf *vf_mask = item->mask;
458 int32_t rc = BNXT_TF_RC_PARSE_ERR;
460 /* Get VF rte_flow_item for Port details */
462 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
466 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
471 /* perform the conversion from VF Func id to bnxt ifindex */
472 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
475 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
478 /* Update the SVIF details */
479 return ulp_rte_parser_svif_set(params, ifindex, mask);
482 /* Function to handle the parsing of RTE Flow item port id Header. */
484 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
485 struct ulp_rte_parser_params *params)
487 const struct rte_flow_item_port_id *port_spec = item->spec;
488 const struct rte_flow_item_port_id *port_mask = item->mask;
490 int32_t rc = BNXT_TF_RC_PARSE_ERR;
494 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
498 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
501 mask = port_mask->id;
503 /* perform the conversion from dpdk port to bnxt ifindex */
504 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
507 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
510 /* Update the SVIF details */
511 return ulp_rte_parser_svif_set(params, ifindex, mask);
514 /* Function to handle the parsing of RTE Flow item phy port Header. */
516 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
517 struct ulp_rte_parser_params *params)
519 const struct rte_flow_item_phy_port *port_spec = item->spec;
520 const struct rte_flow_item_phy_port *port_mask = item->mask;
522 int32_t rc = BNXT_TF_RC_ERROR;
524 enum bnxt_ulp_direction_type dir;
525 struct ulp_rte_hdr_field *hdr_field;
527 /* Copy the rte_flow_item for phy port into hdr_field */
529 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
533 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
536 mask = port_mask->index;
538 /* Update the match port type */
539 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
540 BNXT_ULP_INTF_TYPE_PHY_PORT);
542 /* Compute the Hw direction */
543 bnxt_ulp_rte_parser_direction_compute(params);
545 /* Direction validation */
546 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
547 if (dir == BNXT_ULP_DIR_EGRESS) {
549 "Parse Err:Phy ports are valid only for ingress\n");
550 return BNXT_TF_RC_PARSE_ERR;
553 /* Get the physical port details from port db */
554 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
557 BNXT_TF_DBG(ERR, "Failed to get port details\n");
558 return BNXT_TF_RC_PARSE_ERR;
561 /* Update the SVIF details */
562 svif = rte_cpu_to_be_16(svif);
563 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
564 memcpy(hdr_field->spec, &svif, sizeof(svif));
565 memcpy(hdr_field->mask, &mask, sizeof(mask));
566 hdr_field->size = sizeof(svif);
567 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
568 rte_be_to_cpu_16(svif));
569 return BNXT_TF_RC_SUCCESS;
572 /* Function to handle the update of proto header based on field values */
574 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
575 uint16_t type, uint32_t in_flag)
577 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
579 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
580 BNXT_ULP_HDR_BIT_I_IPV4);
581 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
583 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
584 BNXT_ULP_HDR_BIT_O_IPV4);
585 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
587 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
589 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
590 BNXT_ULP_HDR_BIT_I_IPV6);
591 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
593 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
594 BNXT_ULP_HDR_BIT_O_IPV6);
595 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
600 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
602 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
603 struct ulp_rte_parser_params *params)
605 const struct rte_flow_item_eth *eth_spec = item->spec;
606 const struct rte_flow_item_eth *eth_mask = item->mask;
607 struct ulp_rte_hdr_field *field;
608 uint32_t idx = params->field_idx;
610 uint16_t eth_type = 0;
611 uint32_t inner_flag = 0;
614 * Copy the rte_flow_item for eth into hdr_field using ethernet
618 size = sizeof(eth_spec->dst.addr_bytes);
619 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
620 eth_spec->dst.addr_bytes,
622 size = sizeof(eth_spec->src.addr_bytes);
623 field = ulp_rte_parser_fld_copy(field,
624 eth_spec->src.addr_bytes,
626 field = ulp_rte_parser_fld_copy(field,
628 sizeof(eth_spec->type));
629 eth_type = eth_spec->type;
632 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
633 sizeof(eth_mask->dst.addr_bytes));
634 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
635 sizeof(eth_mask->src.addr_bytes));
636 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
637 sizeof(eth_mask->type));
639 /* Add number of vlan header elements */
640 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
641 params->vlan_idx = params->field_idx;
642 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
644 /* Update the protocol hdr bitmap */
645 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
646 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
649 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
651 /* Update the field protocol hdr bitmap */
652 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
654 return BNXT_TF_RC_SUCCESS;
657 /* Function to handle the parsing of RTE Flow item Vlan Header. */
659 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
660 struct ulp_rte_parser_params *params)
662 const struct rte_flow_item_vlan *vlan_spec = item->spec;
663 const struct rte_flow_item_vlan *vlan_mask = item->mask;
664 struct ulp_rte_hdr_field *field;
665 struct ulp_rte_hdr_bitmap *hdr_bit;
666 uint32_t idx = params->vlan_idx;
667 uint16_t vlan_tag, priority;
668 uint32_t outer_vtag_num;
669 uint32_t inner_vtag_num;
670 uint16_t eth_type = 0;
671 uint32_t inner_flag = 0;
674 * Copy the rte_flow_item for vlan into hdr_field using Vlan
678 vlan_tag = ntohs(vlan_spec->tci);
679 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
680 vlan_tag &= ULP_VLAN_TAG_MASK;
681 vlan_tag = htons(vlan_tag);
683 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
686 field = ulp_rte_parser_fld_copy(field,
689 field = ulp_rte_parser_fld_copy(field,
690 &vlan_spec->inner_type,
691 sizeof(vlan_spec->inner_type));
692 eth_type = vlan_spec->inner_type;
696 vlan_tag = ntohs(vlan_mask->tci);
697 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
701 * the storage for priority and vlan tag is 2 bytes
702 * The mask of priority which is 3 bits if it is all 1's
703 * then make the rest bits 13 bits as 1's
704 * so that it is matched as exact match.
706 if (priority == ULP_VLAN_PRIORITY_MASK)
707 priority |= ~ULP_VLAN_PRIORITY_MASK;
708 if (vlan_tag == ULP_VLAN_TAG_MASK)
709 vlan_tag |= ~ULP_VLAN_TAG_MASK;
710 vlan_tag = htons(vlan_tag);
713 * The priority field is ignored since OVS is setting it as
714 * wild card match and it is not supported. This is a work
715 * around and shall be addressed in the future.
719 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
721 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
722 sizeof(vlan_mask->inner_type));
724 /* Set the vlan index to new incremented value */
725 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
727 /* Get the outer tag and inner tag counts */
728 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
729 BNXT_ULP_CF_IDX_O_VTAG_NUM);
730 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
731 BNXT_ULP_CF_IDX_I_VTAG_NUM);
733 /* Update the hdr_bitmap of the vlans */
734 hdr_bit = ¶ms->hdr_bitmap;
735 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
736 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
738 /* Update the vlan tag num */
740 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
742 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
743 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
744 ULP_BITMAP_SET(params->hdr_bitmap.bits,
745 BNXT_ULP_HDR_BIT_OO_VLAN);
746 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
747 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
748 outer_vtag_num == 1) {
749 /* update the vlan tag num */
751 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
753 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
754 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
755 ULP_BITMAP_SET(params->hdr_bitmap.bits,
756 BNXT_ULP_HDR_BIT_OI_VLAN);
757 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
758 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
760 /* update the vlan tag num */
762 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
764 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
765 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
766 ULP_BITMAP_SET(params->hdr_bitmap.bits,
767 BNXT_ULP_HDR_BIT_IO_VLAN);
769 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
770 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
771 inner_vtag_num == 1) {
772 /* update the vlan tag num */
774 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
776 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
777 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
778 ULP_BITMAP_SET(params->hdr_bitmap.bits,
779 BNXT_ULP_HDR_BIT_II_VLAN);
782 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
783 return BNXT_TF_RC_ERROR;
785 /* Update the field protocol hdr bitmap */
786 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
787 return BNXT_TF_RC_SUCCESS;
790 /* Function to handle the update of proto header based on field values */
792 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
793 uint8_t proto, uint32_t in_flag)
795 if (proto == IPPROTO_UDP) {
797 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
798 BNXT_ULP_HDR_BIT_I_UDP);
799 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
801 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
802 BNXT_ULP_HDR_BIT_O_UDP);
803 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
805 } else if (proto == IPPROTO_TCP) {
807 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
808 BNXT_ULP_HDR_BIT_I_TCP);
809 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
811 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
812 BNXT_ULP_HDR_BIT_O_TCP);
813 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
818 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
820 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
821 struct ulp_rte_parser_params *params)
823 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
824 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
825 struct ulp_rte_hdr_field *field;
826 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
827 uint32_t idx = params->field_idx;
830 uint32_t inner_flag = 0;
833 /* validate there are no 3rd L3 header */
834 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
836 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
837 return BNXT_TF_RC_ERROR;
841 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
845 size = sizeof(ipv4_spec->hdr.version_ihl);
846 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
847 &ipv4_spec->hdr.version_ihl,
849 size = sizeof(ipv4_spec->hdr.type_of_service);
850 field = ulp_rte_parser_fld_copy(field,
851 &ipv4_spec->hdr.type_of_service,
853 size = sizeof(ipv4_spec->hdr.total_length);
854 field = ulp_rte_parser_fld_copy(field,
855 &ipv4_spec->hdr.total_length,
857 size = sizeof(ipv4_spec->hdr.packet_id);
858 field = ulp_rte_parser_fld_copy(field,
859 &ipv4_spec->hdr.packet_id,
861 size = sizeof(ipv4_spec->hdr.fragment_offset);
862 field = ulp_rte_parser_fld_copy(field,
863 &ipv4_spec->hdr.fragment_offset,
865 size = sizeof(ipv4_spec->hdr.time_to_live);
866 field = ulp_rte_parser_fld_copy(field,
867 &ipv4_spec->hdr.time_to_live,
869 size = sizeof(ipv4_spec->hdr.next_proto_id);
870 field = ulp_rte_parser_fld_copy(field,
871 &ipv4_spec->hdr.next_proto_id,
873 proto = ipv4_spec->hdr.next_proto_id;
874 size = sizeof(ipv4_spec->hdr.hdr_checksum);
875 field = ulp_rte_parser_fld_copy(field,
876 &ipv4_spec->hdr.hdr_checksum,
878 size = sizeof(ipv4_spec->hdr.src_addr);
879 field = ulp_rte_parser_fld_copy(field,
880 &ipv4_spec->hdr.src_addr,
882 size = sizeof(ipv4_spec->hdr.dst_addr);
883 field = ulp_rte_parser_fld_copy(field,
884 &ipv4_spec->hdr.dst_addr,
888 ulp_rte_prsr_mask_copy(params, &idx,
889 &ipv4_mask->hdr.version_ihl,
890 sizeof(ipv4_mask->hdr.version_ihl));
892 * The tos field is ignored since OVS is setting it as wild card
893 * match and it is not supported. This is a work around and
894 * shall be addressed in the future.
898 ulp_rte_prsr_mask_copy(params, &idx,
899 &ipv4_mask->hdr.total_length,
900 sizeof(ipv4_mask->hdr.total_length));
901 ulp_rte_prsr_mask_copy(params, &idx,
902 &ipv4_mask->hdr.packet_id,
903 sizeof(ipv4_mask->hdr.packet_id));
904 ulp_rte_prsr_mask_copy(params, &idx,
905 &ipv4_mask->hdr.fragment_offset,
906 sizeof(ipv4_mask->hdr.fragment_offset));
907 ulp_rte_prsr_mask_copy(params, &idx,
908 &ipv4_mask->hdr.time_to_live,
909 sizeof(ipv4_mask->hdr.time_to_live));
910 ulp_rte_prsr_mask_copy(params, &idx,
911 &ipv4_mask->hdr.next_proto_id,
912 sizeof(ipv4_mask->hdr.next_proto_id));
913 ulp_rte_prsr_mask_copy(params, &idx,
914 &ipv4_mask->hdr.hdr_checksum,
915 sizeof(ipv4_mask->hdr.hdr_checksum));
916 ulp_rte_prsr_mask_copy(params, &idx,
917 &ipv4_mask->hdr.src_addr,
918 sizeof(ipv4_mask->hdr.src_addr));
919 ulp_rte_prsr_mask_copy(params, &idx,
920 &ipv4_mask->hdr.dst_addr,
921 sizeof(ipv4_mask->hdr.dst_addr));
923 /* Add the number of ipv4 header elements */
924 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
926 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
927 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
928 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
929 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
930 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
933 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
934 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
937 /* Update the field protocol hdr bitmap */
938 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
939 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
940 return BNXT_TF_RC_SUCCESS;
943 /* Function to handle the parsing of RTE Flow item IPV6 Header */
945 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
946 struct ulp_rte_parser_params *params)
948 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
949 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
950 struct ulp_rte_hdr_field *field;
951 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
952 uint32_t idx = params->field_idx;
954 uint32_t vtcf, vtcf_mask;
956 uint32_t inner_flag = 0;
959 /* validate there are no 3rd L3 header */
960 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
962 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
963 return BNXT_TF_RC_ERROR;
967 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
971 size = sizeof(ipv6_spec->hdr.vtc_flow);
973 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
974 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
978 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
979 field = ulp_rte_parser_fld_copy(field,
983 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
984 field = ulp_rte_parser_fld_copy(field,
988 size = sizeof(ipv6_spec->hdr.payload_len);
989 field = ulp_rte_parser_fld_copy(field,
990 &ipv6_spec->hdr.payload_len,
992 size = sizeof(ipv6_spec->hdr.proto);
993 field = ulp_rte_parser_fld_copy(field,
994 &ipv6_spec->hdr.proto,
996 proto = ipv6_spec->hdr.proto;
997 size = sizeof(ipv6_spec->hdr.hop_limits);
998 field = ulp_rte_parser_fld_copy(field,
999 &ipv6_spec->hdr.hop_limits,
1001 size = sizeof(ipv6_spec->hdr.src_addr);
1002 field = ulp_rte_parser_fld_copy(field,
1003 &ipv6_spec->hdr.src_addr,
1005 size = sizeof(ipv6_spec->hdr.dst_addr);
1006 field = ulp_rte_parser_fld_copy(field,
1007 &ipv6_spec->hdr.dst_addr,
1011 size = sizeof(ipv6_mask->hdr.vtc_flow);
1013 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1014 ulp_rte_prsr_mask_copy(params, &idx,
1018 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1019 ulp_rte_prsr_mask_copy(params, &idx,
1024 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1025 ulp_rte_prsr_mask_copy(params, &idx,
1029 ulp_rte_prsr_mask_copy(params, &idx,
1030 &ipv6_mask->hdr.payload_len,
1031 sizeof(ipv6_mask->hdr.payload_len));
1032 ulp_rte_prsr_mask_copy(params, &idx,
1033 &ipv6_mask->hdr.proto,
1034 sizeof(ipv6_mask->hdr.proto));
1035 ulp_rte_prsr_mask_copy(params, &idx,
1036 &ipv6_mask->hdr.hop_limits,
1037 sizeof(ipv6_mask->hdr.hop_limits));
1038 ulp_rte_prsr_mask_copy(params, &idx,
1039 &ipv6_mask->hdr.src_addr,
1040 sizeof(ipv6_mask->hdr.src_addr));
1041 ulp_rte_prsr_mask_copy(params, &idx,
1042 &ipv6_mask->hdr.dst_addr,
1043 sizeof(ipv6_mask->hdr.dst_addr));
1045 /* add number of ipv6 header elements */
1046 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1048 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1049 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1050 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1051 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1052 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1055 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1056 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1059 /* Update the field protocol hdr bitmap */
1060 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1061 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1063 return BNXT_TF_RC_SUCCESS;
1066 /* Function to handle the update of proto header based on field values */
1068 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1071 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1072 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1073 BNXT_ULP_HDR_BIT_T_VXLAN);
1076 /* Function to handle the parsing of RTE Flow item UDP Header. */
1078 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1079 struct ulp_rte_parser_params *params)
1081 const struct rte_flow_item_udp *udp_spec = item->spec;
1082 const struct rte_flow_item_udp *udp_mask = item->mask;
1083 struct ulp_rte_hdr_field *field;
1084 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1085 uint32_t idx = params->field_idx;
1087 uint16_t dst_port = 0;
1090 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1092 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1093 return BNXT_TF_RC_ERROR;
1097 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1101 size = sizeof(udp_spec->hdr.src_port);
1102 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1103 &udp_spec->hdr.src_port,
1105 size = sizeof(udp_spec->hdr.dst_port);
1106 field = ulp_rte_parser_fld_copy(field,
1107 &udp_spec->hdr.dst_port,
1109 dst_port = udp_spec->hdr.dst_port;
1110 size = sizeof(udp_spec->hdr.dgram_len);
1111 field = ulp_rte_parser_fld_copy(field,
1112 &udp_spec->hdr.dgram_len,
1114 size = sizeof(udp_spec->hdr.dgram_cksum);
1115 field = ulp_rte_parser_fld_copy(field,
1116 &udp_spec->hdr.dgram_cksum,
1120 ulp_rte_prsr_mask_copy(params, &idx,
1121 &udp_mask->hdr.src_port,
1122 sizeof(udp_mask->hdr.src_port));
1123 ulp_rte_prsr_mask_copy(params, &idx,
1124 &udp_mask->hdr.dst_port,
1125 sizeof(udp_mask->hdr.dst_port));
1126 ulp_rte_prsr_mask_copy(params, &idx,
1127 &udp_mask->hdr.dgram_len,
1128 sizeof(udp_mask->hdr.dgram_len));
1129 ulp_rte_prsr_mask_copy(params, &idx,
1130 &udp_mask->hdr.dgram_cksum,
1131 sizeof(udp_mask->hdr.dgram_cksum));
1134 /* Add number of UDP header elements */
1135 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1137 /* Set the udp header bitmap and computed l4 header bitmaps */
1138 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1139 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1140 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1141 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1143 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1144 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1145 /* Update the field protocol hdr bitmap */
1146 ulp_rte_l4_proto_type_update(params, dst_port);
1148 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1149 return BNXT_TF_RC_SUCCESS;
1152 /* Function to handle the parsing of RTE Flow item TCP Header. */
1154 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1155 struct ulp_rte_parser_params *params)
1157 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1158 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1159 struct ulp_rte_hdr_field *field;
1160 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1161 uint32_t idx = params->field_idx;
1165 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1167 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1168 return BNXT_TF_RC_ERROR;
1172 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1176 size = sizeof(tcp_spec->hdr.src_port);
1177 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1178 &tcp_spec->hdr.src_port,
1180 size = sizeof(tcp_spec->hdr.dst_port);
1181 field = ulp_rte_parser_fld_copy(field,
1182 &tcp_spec->hdr.dst_port,
1184 size = sizeof(tcp_spec->hdr.sent_seq);
1185 field = ulp_rte_parser_fld_copy(field,
1186 &tcp_spec->hdr.sent_seq,
1188 size = sizeof(tcp_spec->hdr.recv_ack);
1189 field = ulp_rte_parser_fld_copy(field,
1190 &tcp_spec->hdr.recv_ack,
1192 size = sizeof(tcp_spec->hdr.data_off);
1193 field = ulp_rte_parser_fld_copy(field,
1194 &tcp_spec->hdr.data_off,
1196 size = sizeof(tcp_spec->hdr.tcp_flags);
1197 field = ulp_rte_parser_fld_copy(field,
1198 &tcp_spec->hdr.tcp_flags,
1200 size = sizeof(tcp_spec->hdr.rx_win);
1201 field = ulp_rte_parser_fld_copy(field,
1202 &tcp_spec->hdr.rx_win,
1204 size = sizeof(tcp_spec->hdr.cksum);
1205 field = ulp_rte_parser_fld_copy(field,
1206 &tcp_spec->hdr.cksum,
1208 size = sizeof(tcp_spec->hdr.tcp_urp);
1209 field = ulp_rte_parser_fld_copy(field,
1210 &tcp_spec->hdr.tcp_urp,
1213 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1217 ulp_rte_prsr_mask_copy(params, &idx,
1218 &tcp_mask->hdr.src_port,
1219 sizeof(tcp_mask->hdr.src_port));
1220 ulp_rte_prsr_mask_copy(params, &idx,
1221 &tcp_mask->hdr.dst_port,
1222 sizeof(tcp_mask->hdr.dst_port));
1223 ulp_rte_prsr_mask_copy(params, &idx,
1224 &tcp_mask->hdr.sent_seq,
1225 sizeof(tcp_mask->hdr.sent_seq));
1226 ulp_rte_prsr_mask_copy(params, &idx,
1227 &tcp_mask->hdr.recv_ack,
1228 sizeof(tcp_mask->hdr.recv_ack));
1229 ulp_rte_prsr_mask_copy(params, &idx,
1230 &tcp_mask->hdr.data_off,
1231 sizeof(tcp_mask->hdr.data_off));
1232 ulp_rte_prsr_mask_copy(params, &idx,
1233 &tcp_mask->hdr.tcp_flags,
1234 sizeof(tcp_mask->hdr.tcp_flags));
1235 ulp_rte_prsr_mask_copy(params, &idx,
1236 &tcp_mask->hdr.rx_win,
1237 sizeof(tcp_mask->hdr.rx_win));
1238 ulp_rte_prsr_mask_copy(params, &idx,
1239 &tcp_mask->hdr.cksum,
1240 sizeof(tcp_mask->hdr.cksum));
1241 ulp_rte_prsr_mask_copy(params, &idx,
1242 &tcp_mask->hdr.tcp_urp,
1243 sizeof(tcp_mask->hdr.tcp_urp));
1245 /* add number of TCP header elements */
1246 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1248 /* Set the udp header bitmap and computed l4 header bitmaps */
1249 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1250 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1251 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1254 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1258 return BNXT_TF_RC_SUCCESS;
1261 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1263 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1264 struct ulp_rte_parser_params *params)
1266 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1267 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1268 struct ulp_rte_hdr_field *field;
1269 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1270 uint32_t idx = params->field_idx;
1274 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1278 size = sizeof(vxlan_spec->flags);
1279 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1282 size = sizeof(vxlan_spec->rsvd0);
1283 field = ulp_rte_parser_fld_copy(field,
1286 size = sizeof(vxlan_spec->vni);
1287 field = ulp_rte_parser_fld_copy(field,
1290 size = sizeof(vxlan_spec->rsvd1);
1291 field = ulp_rte_parser_fld_copy(field,
1296 ulp_rte_prsr_mask_copy(params, &idx,
1298 sizeof(vxlan_mask->flags));
1299 ulp_rte_prsr_mask_copy(params, &idx,
1301 sizeof(vxlan_mask->rsvd0));
1302 ulp_rte_prsr_mask_copy(params, &idx,
1304 sizeof(vxlan_mask->vni));
1305 ulp_rte_prsr_mask_copy(params, &idx,
1307 sizeof(vxlan_mask->rsvd1));
1309 /* Add number of vxlan header elements */
1310 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1312 /* Update the hdr_bitmap with vxlan */
1313 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1314 return BNXT_TF_RC_SUCCESS;
1317 /* Function to handle the parsing of RTE Flow item void Header */
1319 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1320 struct ulp_rte_parser_params *params __rte_unused)
1322 return BNXT_TF_RC_SUCCESS;
1325 /* Function to handle the parsing of RTE Flow action void Header. */
1327 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1328 struct ulp_rte_parser_params *params __rte_unused)
1330 return BNXT_TF_RC_SUCCESS;
1333 /* Function to handle the parsing of RTE Flow action Mark Header. */
1335 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1336 struct ulp_rte_parser_params *param)
1338 const struct rte_flow_action_mark *mark;
1339 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1342 mark = action_item->conf;
1344 mark_id = tfp_cpu_to_be_32(mark->id);
1345 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1346 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1348 /* Update the hdr_bitmap with vxlan */
1349 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1350 return BNXT_TF_RC_SUCCESS;
1352 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1353 return BNXT_TF_RC_ERROR;
1356 /* Function to handle the parsing of RTE Flow action RSS Header. */
1358 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1359 struct ulp_rte_parser_params *param)
1361 const struct rte_flow_action_rss *rss = action_item->conf;
1364 /* Update the hdr_bitmap with vxlan */
1365 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1366 return BNXT_TF_RC_SUCCESS;
1368 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1369 return BNXT_TF_RC_ERROR;
1372 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1374 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1375 struct ulp_rte_parser_params *params)
1377 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1378 const struct rte_flow_item *item;
1379 const struct rte_flow_item_eth *eth_spec;
1380 const struct rte_flow_item_ipv4 *ipv4_spec;
1381 const struct rte_flow_item_ipv6 *ipv6_spec;
1382 struct rte_flow_item_vxlan vxlan_spec;
1383 uint32_t vlan_num = 0, vlan_size = 0;
1384 uint32_t ip_size = 0, ip_type = 0;
1385 uint32_t vxlan_size = 0;
1387 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1388 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1390 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1391 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1393 vxlan_encap = action_item->conf;
1395 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1396 return BNXT_TF_RC_ERROR;
1399 item = vxlan_encap->definition;
1401 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1402 return BNXT_TF_RC_ERROR;
1405 if (!ulp_rte_item_skip_void(&item, 0))
1406 return BNXT_TF_RC_ERROR;
1408 /* must have ethernet header */
1409 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1410 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1411 return BNXT_TF_RC_ERROR;
1413 eth_spec = item->spec;
1414 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1415 ulp_encap_buffer_copy(buff,
1416 eth_spec->dst.addr_bytes,
1417 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1419 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1420 ulp_encap_buffer_copy(buff,
1421 eth_spec->src.addr_bytes,
1422 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1424 /* Goto the next item */
1425 if (!ulp_rte_item_skip_void(&item, 1))
1426 return BNXT_TF_RC_ERROR;
1428 /* May have vlan header */
1429 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1431 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1432 ulp_encap_buffer_copy(buff,
1434 sizeof(struct rte_flow_item_vlan));
1436 if (!ulp_rte_item_skip_void(&item, 1))
1437 return BNXT_TF_RC_ERROR;
1440 /* may have two vlan headers */
1441 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1443 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1444 sizeof(struct rte_flow_item_vlan)],
1446 sizeof(struct rte_flow_item_vlan));
1447 if (!ulp_rte_item_skip_void(&item, 1))
1448 return BNXT_TF_RC_ERROR;
1450 /* Update the vlan count and size of more than one */
1452 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1453 vlan_num = tfp_cpu_to_be_32(vlan_num);
1454 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1457 vlan_size = tfp_cpu_to_be_32(vlan_size);
1458 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1463 /* L3 must be IPv4, IPv6 */
1464 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1465 ipv4_spec = item->spec;
1466 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1468 /* copy the ipv4 details */
1469 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1470 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1471 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1472 ulp_encap_buffer_copy(buff,
1474 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1475 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1477 const uint8_t *tmp_buff;
1479 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1480 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1481 ulp_encap_buffer_copy(buff,
1483 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1484 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1485 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1486 ulp_encap_buffer_copy(buff,
1487 &ipv4_spec->hdr.version_ihl,
1488 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1490 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1491 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1492 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1493 ulp_encap_buffer_copy(buff,
1494 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1495 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1497 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1498 ulp_encap_buffer_copy(buff,
1499 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1500 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1502 /* Update the ip size details */
1503 ip_size = tfp_cpu_to_be_32(ip_size);
1504 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1505 &ip_size, sizeof(uint32_t));
1507 /* update the ip type */
1508 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1509 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1510 &ip_type, sizeof(uint32_t));
1512 /* update the computed field to notify it is ipv4 header */
1513 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1516 if (!ulp_rte_item_skip_void(&item, 1))
1517 return BNXT_TF_RC_ERROR;
1518 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1519 ipv6_spec = item->spec;
1520 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1522 /* copy the ipv4 details */
1523 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1524 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1526 /* Update the ip size details */
1527 ip_size = tfp_cpu_to_be_32(ip_size);
1528 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1529 &ip_size, sizeof(uint32_t));
1531 /* update the ip type */
1532 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1533 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1534 &ip_type, sizeof(uint32_t));
1536 /* update the computed field to notify it is ipv6 header */
1537 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1540 if (!ulp_rte_item_skip_void(&item, 1))
1541 return BNXT_TF_RC_ERROR;
1543 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1544 return BNXT_TF_RC_ERROR;
1548 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1549 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1550 return BNXT_TF_RC_ERROR;
1552 /* copy the udp details */
1553 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1554 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1556 if (!ulp_rte_item_skip_void(&item, 1))
1557 return BNXT_TF_RC_ERROR;
1560 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1561 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1562 return BNXT_TF_RC_ERROR;
1564 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1565 /* copy the vxlan details */
1566 memcpy(&vxlan_spec, item->spec, vxlan_size);
1567 vxlan_spec.flags = 0x08;
1568 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1569 (const uint8_t *)&vxlan_spec,
1571 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1572 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1573 &vxlan_size, sizeof(uint32_t));
1575 /* update the hdr_bitmap with vxlan */
1576 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1577 return BNXT_TF_RC_SUCCESS;
1580 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1582 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1584 struct ulp_rte_parser_params *params)
1586 /* update the hdr_bitmap with vxlan */
1587 ULP_BITMAP_SET(params->act_bitmap.bits,
1588 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1589 return BNXT_TF_RC_SUCCESS;
1592 /* Function to handle the parsing of RTE Flow action drop Header. */
1594 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1595 struct ulp_rte_parser_params *params)
1597 /* Update the hdr_bitmap with drop */
1598 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1599 return BNXT_TF_RC_SUCCESS;
1602 /* Function to handle the parsing of RTE Flow action count. */
1604 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1605 struct ulp_rte_parser_params *params)
1608 const struct rte_flow_action_count *act_count;
1609 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1611 act_count = action_item->conf;
1613 if (act_count->shared) {
1615 "Parse Error:Shared count not supported\n");
1616 return BNXT_TF_RC_PARSE_ERR;
1618 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1620 BNXT_ULP_ACT_PROP_SZ_COUNT);
1623 /* Update the hdr_bitmap with count */
1624 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1625 return BNXT_TF_RC_SUCCESS;
1628 /* Function to handle the parsing of action ports. */
1630 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1633 enum bnxt_ulp_direction_type dir;
1636 struct ulp_rte_act_prop *act = ¶m->act_prop;
1637 enum bnxt_ulp_intf_type port_type;
1640 /* Get the direction */
1641 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1642 if (dir == BNXT_ULP_DIR_EGRESS) {
1643 /* For egress direction, fill vport */
1644 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1645 return BNXT_TF_RC_ERROR;
1648 pid = rte_cpu_to_be_32(pid);
1649 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1650 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1652 /* For ingress direction, fill vnic */
1653 port_type = ULP_COMP_FLD_IDX_RD(param,
1654 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1655 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1656 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1658 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1660 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1662 return BNXT_TF_RC_ERROR;
1665 pid = rte_cpu_to_be_32(pid);
1666 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1667 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1670 /* Update the action port set bit */
1671 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1672 return BNXT_TF_RC_SUCCESS;
1675 /* Function to handle the parsing of RTE Flow action PF. */
1677 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1678 struct ulp_rte_parser_params *params)
1682 enum bnxt_ulp_intf_type intf_type;
1684 /* Get the port id of the current device */
1685 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1687 /* Get the port db ifindex */
1688 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1690 BNXT_TF_DBG(ERR, "Invalid port id\n");
1691 return BNXT_TF_RC_ERROR;
1694 /* Check the port is PF port */
1695 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1696 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1697 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1698 return BNXT_TF_RC_ERROR;
1700 /* Update the action properties */
1701 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1702 return ulp_rte_parser_act_port_set(params, ifindex);
1705 /* Function to handle the parsing of RTE Flow action VF. */
1707 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1708 struct ulp_rte_parser_params *params)
1710 const struct rte_flow_action_vf *vf_action;
1712 enum bnxt_ulp_intf_type intf_type;
1714 vf_action = action_item->conf;
1716 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1717 return BNXT_TF_RC_PARSE_ERR;
1720 if (vf_action->original) {
1721 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1722 return BNXT_TF_RC_PARSE_ERR;
1725 /* Check the port is VF port */
1726 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1728 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1729 return BNXT_TF_RC_ERROR;
1731 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1732 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1733 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1734 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1735 return BNXT_TF_RC_ERROR;
1738 /* Update the action properties */
1739 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1740 return ulp_rte_parser_act_port_set(params, ifindex);
1743 /* Function to handle the parsing of RTE Flow action port_id. */
1745 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1746 struct ulp_rte_parser_params *param)
1748 const struct rte_flow_action_port_id *port_id = act_item->conf;
1750 enum bnxt_ulp_intf_type intf_type;
1754 "ParseErr: Invalid Argument\n");
1755 return BNXT_TF_RC_PARSE_ERR;
1757 if (port_id->original) {
1759 "ParseErr:Portid Original not supported\n");
1760 return BNXT_TF_RC_PARSE_ERR;
1763 /* Get the port db ifindex */
1764 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1766 BNXT_TF_DBG(ERR, "Invalid port id\n");
1767 return BNXT_TF_RC_ERROR;
1770 /* Get the intf type */
1771 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1773 BNXT_TF_DBG(ERR, "Invalid port type\n");
1774 return BNXT_TF_RC_ERROR;
1777 /* Set the action port */
1778 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1779 return ulp_rte_parser_act_port_set(param, ifindex);
1782 /* Function to handle the parsing of RTE Flow action phy_port. */
1784 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1785 struct ulp_rte_parser_params *prm)
1787 const struct rte_flow_action_phy_port *phy_port;
1791 enum bnxt_ulp_direction_type dir;
1793 phy_port = action_item->conf;
1796 "ParseErr: Invalid Argument\n");
1797 return BNXT_TF_RC_PARSE_ERR;
1800 if (phy_port->original) {
1802 "Parse Err:Port Original not supported\n");
1803 return BNXT_TF_RC_PARSE_ERR;
1805 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1806 if (dir != BNXT_ULP_DIR_EGRESS) {
1808 "Parse Err:Phy ports are valid only for egress\n");
1809 return BNXT_TF_RC_PARSE_ERR;
1811 /* Get the physical port details from port db */
1812 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1815 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1820 pid = rte_cpu_to_be_32(pid);
1821 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1822 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1824 /* Update the action port set bit */
1825 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1826 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1827 BNXT_ULP_INTF_TYPE_PHY_PORT);
1828 return BNXT_TF_RC_SUCCESS;
1831 /* Function to handle the parsing of RTE Flow action pop vlan. */
1833 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1834 struct ulp_rte_parser_params *params)
1836 /* Update the act_bitmap with pop */
1837 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1838 return BNXT_TF_RC_SUCCESS;
1841 /* Function to handle the parsing of RTE Flow action push vlan. */
1843 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1844 struct ulp_rte_parser_params *params)
1846 const struct rte_flow_action_of_push_vlan *push_vlan;
1848 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1850 push_vlan = action_item->conf;
1852 ethertype = push_vlan->ethertype;
1853 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1855 "Parse Err: Ethertype not supported\n");
1856 return BNXT_TF_RC_PARSE_ERR;
1858 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1859 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1860 /* Update the hdr_bitmap with push vlan */
1861 ULP_BITMAP_SET(params->act_bitmap.bits,
1862 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1863 return BNXT_TF_RC_SUCCESS;
1865 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1866 return BNXT_TF_RC_ERROR;
1869 /* Function to handle the parsing of RTE Flow action set vlan id. */
1871 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1872 struct ulp_rte_parser_params *params)
1874 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1876 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1878 vlan_vid = action_item->conf;
1879 if (vlan_vid && vlan_vid->vlan_vid) {
1880 vid = vlan_vid->vlan_vid;
1881 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1882 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1883 /* Update the hdr_bitmap with vlan vid */
1884 ULP_BITMAP_SET(params->act_bitmap.bits,
1885 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1886 return BNXT_TF_RC_SUCCESS;
1888 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1889 return BNXT_TF_RC_ERROR;
1892 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1894 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1895 struct ulp_rte_parser_params *params)
1897 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1899 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1901 vlan_pcp = action_item->conf;
1903 pcp = vlan_pcp->vlan_pcp;
1904 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1905 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1906 /* Update the hdr_bitmap with vlan vid */
1907 ULP_BITMAP_SET(params->act_bitmap.bits,
1908 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1909 return BNXT_TF_RC_SUCCESS;
1911 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1912 return BNXT_TF_RC_ERROR;
1915 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1917 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1918 struct ulp_rte_parser_params *params)
1920 const struct rte_flow_action_set_ipv4 *set_ipv4;
1921 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1923 set_ipv4 = action_item->conf;
1925 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1926 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1927 /* Update the hdr_bitmap with set ipv4 src */
1928 ULP_BITMAP_SET(params->act_bitmap.bits,
1929 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1930 return BNXT_TF_RC_SUCCESS;
1932 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1933 return BNXT_TF_RC_ERROR;
1936 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1938 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1939 struct ulp_rte_parser_params *params)
1941 const struct rte_flow_action_set_ipv4 *set_ipv4;
1942 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1944 set_ipv4 = action_item->conf;
1946 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1947 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1948 /* Update the hdr_bitmap with set ipv4 dst */
1949 ULP_BITMAP_SET(params->act_bitmap.bits,
1950 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1951 return BNXT_TF_RC_SUCCESS;
1953 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1954 return BNXT_TF_RC_ERROR;
1957 /* Function to handle the parsing of RTE Flow action set tp src.*/
1959 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1960 struct ulp_rte_parser_params *params)
1962 const struct rte_flow_action_set_tp *set_tp;
1963 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1965 set_tp = action_item->conf;
1967 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1968 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1969 /* Update the hdr_bitmap with set tp src */
1970 ULP_BITMAP_SET(params->act_bitmap.bits,
1971 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1972 return BNXT_TF_RC_SUCCESS;
1975 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1976 return BNXT_TF_RC_ERROR;
1979 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1981 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1982 struct ulp_rte_parser_params *params)
1984 const struct rte_flow_action_set_tp *set_tp;
1985 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1987 set_tp = action_item->conf;
1989 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1990 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1991 /* Update the hdr_bitmap with set tp dst */
1992 ULP_BITMAP_SET(params->act_bitmap.bits,
1993 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1994 return BNXT_TF_RC_SUCCESS;
1997 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1998 return BNXT_TF_RC_ERROR;
2001 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2003 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2004 struct ulp_rte_parser_params *params)
2006 /* Update the act_bitmap with dec ttl */
2007 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2008 return BNXT_TF_RC_SUCCESS;