1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the computed
163 * fields for the interface.
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
169 uint16_t port_id, parif;
170 enum bnxt_ulp_direction_type dir;
172 /* get the direction details */
173 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
175 if (dir == BNXT_ULP_DIR_INGRESS) {
176 /* read the port id details */
177 port_id = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_INCOMING_IF);
179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
186 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
187 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
188 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
191 /* Parif needs to be reset to a free partition */
192 parif += BNXT_ULP_FREE_PARIF_BASE;
193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
199 * Function to handle the post processing of the parsing details
202 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
204 enum bnxt_ulp_direction_type dir;
205 enum bnxt_ulp_intf_type match_port_type, act_port_type;
206 uint32_t act_port_set;
208 /* Get the computed details */
209 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
210 match_port_type = ULP_COMP_FLD_IDX_RD(params,
211 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
212 act_port_type = ULP_COMP_FLD_IDX_RD(params,
213 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
214 act_port_set = ULP_COMP_FLD_IDX_RD(params,
215 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
217 /* set the flow direction in the proto and action header */
218 if (dir == BNXT_ULP_DIR_EGRESS) {
219 ULP_BITMAP_SET(params->hdr_bitmap.bits,
220 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
221 ULP_BITMAP_SET(params->act_bitmap.bits,
222 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
225 /* calculate the VF to VF flag */
226 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
227 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
228 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
230 /* Update the decrement ttl computational fields */
231 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
232 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
234 * Check that vxlan proto is included and vxlan decap
235 * action is not set then decrement tunnel ttl.
236 * Similarly add GRE and NVGRE in future.
238 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
239 BNXT_ULP_HDR_BIT_T_VXLAN) &&
240 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
241 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
242 ULP_COMP_FLD_IDX_WR(params,
243 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
245 ULP_COMP_FLD_IDX_WR(params,
246 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
250 /* Merge the hdr_fp_bit into the proto header bit */
251 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
253 /* Update the computed interface parameters */
254 bnxt_ulp_comp_fld_intf_update(params);
256 /* TBD: Handle the flow rejection scenarios */
261 * Function to compute the flow direction based on the match port details
264 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
266 enum bnxt_ulp_intf_type match_port_type;
268 /* Get the match port type */
269 match_port_type = ULP_COMP_FLD_IDX_RD(params,
270 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
272 /* If ingress flow and matchport is vf rep then dir is egress*/
273 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
274 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
276 BNXT_ULP_DIR_EGRESS);
278 /* Assign the input direction */
279 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
281 BNXT_ULP_DIR_INGRESS);
283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
284 BNXT_ULP_DIR_EGRESS);
288 /* Function to handle the parsing of RTE Flow item PF Header. */
290 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
295 enum bnxt_ulp_direction_type dir;
296 struct ulp_rte_hdr_field *hdr_field;
297 enum bnxt_ulp_svif_type svif_type;
298 enum bnxt_ulp_intf_type port_type;
300 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
301 BNXT_ULP_INVALID_SVIF_VAL) {
303 "SVIF already set,multiple source not support'd\n");
304 return BNXT_TF_RC_ERROR;
307 /* Get port type details */
308 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
309 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
310 BNXT_TF_DBG(ERR, "Invalid port type\n");
311 return BNXT_TF_RC_ERROR;
314 /* Update the match port type */
315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
317 /* compute the direction */
318 bnxt_ulp_rte_parser_direction_compute(params);
320 /* Get the computed direction */
321 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
322 if (dir == BNXT_ULP_DIR_INGRESS) {
323 svif_type = BNXT_ULP_PHY_PORT_SVIF;
325 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
326 svif_type = BNXT_ULP_VF_FUNC_SVIF;
328 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
330 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
332 svif = rte_cpu_to_be_16(svif);
333 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
334 memcpy(hdr_field->spec, &svif, sizeof(svif));
335 memcpy(hdr_field->mask, &mask, sizeof(mask));
336 hdr_field->size = sizeof(svif);
337 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
338 rte_be_to_cpu_16(svif));
339 return BNXT_TF_RC_SUCCESS;
342 /* Function to handle the parsing of the RTE port id */
344 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
346 uint16_t port_id = 0;
347 uint16_t svif_mask = 0xFFFF;
349 int32_t rc = BNXT_TF_RC_ERROR;
351 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
352 BNXT_ULP_INVALID_SVIF_VAL)
353 return BNXT_TF_RC_SUCCESS;
355 /* SVIF not set. So get the port id */
356 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
358 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
361 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
365 /* Update the SVIF details */
366 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
370 /* Function to handle the implicit action port id */
372 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
374 struct rte_flow_action action_item = {0};
375 struct rte_flow_action_port_id port_id = {0};
377 /* Read the action port set bit */
378 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
379 /* Already set, so just exit */
380 return BNXT_TF_RC_SUCCESS;
382 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
383 action_item.conf = &port_id;
385 /* Update the action port based on incoming port */
386 ulp_rte_port_id_act_handler(&action_item, params);
388 /* Reset the action port set bit */
389 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
390 return BNXT_TF_RC_SUCCESS;
393 /* Function to handle the parsing of RTE Flow item PF Header. */
395 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
396 struct ulp_rte_parser_params *params)
398 uint16_t port_id = 0;
399 uint16_t svif_mask = 0xFFFF;
402 /* Get the implicit port id */
403 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
405 /* perform the conversion from dpdk port to bnxt ifindex */
406 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
409 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
410 return BNXT_TF_RC_ERROR;
413 /* Update the SVIF details */
414 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
417 /* Function to handle the parsing of RTE Flow item VF Header. */
419 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
420 struct ulp_rte_parser_params *params)
422 const struct rte_flow_item_vf *vf_spec = item->spec;
423 const struct rte_flow_item_vf *vf_mask = item->mask;
426 int32_t rc = BNXT_TF_RC_PARSE_ERR;
428 /* Get VF rte_flow_item for Port details */
430 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
434 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
439 /* perform the conversion from VF Func id to bnxt ifindex */
440 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
443 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
446 /* Update the SVIF details */
447 return ulp_rte_parser_svif_set(params, ifindex, mask);
450 /* Function to handle the parsing of RTE Flow item port id Header. */
452 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
453 struct ulp_rte_parser_params *params)
455 const struct rte_flow_item_port_id *port_spec = item->spec;
456 const struct rte_flow_item_port_id *port_mask = item->mask;
458 int32_t rc = BNXT_TF_RC_PARSE_ERR;
462 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
466 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
469 mask = port_mask->id;
471 /* perform the conversion from dpdk port to bnxt ifindex */
472 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
475 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
478 /* Update the SVIF details */
479 return ulp_rte_parser_svif_set(params, ifindex, mask);
482 /* Function to handle the parsing of RTE Flow item phy port Header. */
484 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
485 struct ulp_rte_parser_params *params)
487 const struct rte_flow_item_phy_port *port_spec = item->spec;
488 const struct rte_flow_item_phy_port *port_mask = item->mask;
490 int32_t rc = BNXT_TF_RC_ERROR;
492 enum bnxt_ulp_direction_type dir;
493 struct ulp_rte_hdr_field *hdr_field;
495 /* Copy the rte_flow_item for phy port into hdr_field */
497 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
501 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
504 mask = port_mask->index;
506 /* Update the match port type */
507 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
508 BNXT_ULP_INTF_TYPE_PHY_PORT);
510 /* Compute the Hw direction */
511 bnxt_ulp_rte_parser_direction_compute(params);
513 /* Direction validation */
514 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
515 if (dir == BNXT_ULP_DIR_EGRESS) {
517 "Parse Err:Phy ports are valid only for ingress\n");
518 return BNXT_TF_RC_PARSE_ERR;
521 /* Get the physical port details from port db */
522 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
525 BNXT_TF_DBG(ERR, "Failed to get port details\n");
526 return BNXT_TF_RC_PARSE_ERR;
529 /* Update the SVIF details */
530 svif = rte_cpu_to_be_16(svif);
531 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
532 memcpy(hdr_field->spec, &svif, sizeof(svif));
533 memcpy(hdr_field->mask, &mask, sizeof(mask));
534 hdr_field->size = sizeof(svif);
535 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
536 rte_be_to_cpu_16(svif));
537 return BNXT_TF_RC_SUCCESS;
540 /* Function to handle the update of proto header based on field values */
542 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
543 uint16_t type, uint32_t in_flag)
545 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
547 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
548 BNXT_ULP_HDR_BIT_I_IPV4);
549 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
551 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
552 BNXT_ULP_HDR_BIT_O_IPV4);
553 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
555 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
557 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
558 BNXT_ULP_HDR_BIT_I_IPV6);
559 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
561 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
562 BNXT_ULP_HDR_BIT_O_IPV6);
563 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
568 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
570 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
571 struct ulp_rte_parser_params *params)
573 const struct rte_flow_item_eth *eth_spec = item->spec;
574 const struct rte_flow_item_eth *eth_mask = item->mask;
575 struct ulp_rte_hdr_field *field;
576 uint32_t idx = params->field_idx;
578 uint16_t eth_type = 0;
579 uint32_t inner_flag = 0;
582 * Copy the rte_flow_item for eth into hdr_field using ethernet
586 size = sizeof(eth_spec->dst.addr_bytes);
587 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
588 eth_spec->dst.addr_bytes,
590 size = sizeof(eth_spec->src.addr_bytes);
591 field = ulp_rte_parser_fld_copy(field,
592 eth_spec->src.addr_bytes,
594 field = ulp_rte_parser_fld_copy(field,
596 sizeof(eth_spec->type));
597 eth_type = eth_spec->type;
600 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
601 sizeof(eth_mask->dst.addr_bytes));
602 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
603 sizeof(eth_mask->src.addr_bytes));
604 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
605 sizeof(eth_mask->type));
607 /* Add number of vlan header elements */
608 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
609 params->vlan_idx = params->field_idx;
610 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
612 /* Update the protocol hdr bitmap */
613 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
614 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
617 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
619 /* Update the field protocol hdr bitmap */
620 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
622 return BNXT_TF_RC_SUCCESS;
625 /* Function to handle the parsing of RTE Flow item Vlan Header. */
627 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
628 struct ulp_rte_parser_params *params)
630 const struct rte_flow_item_vlan *vlan_spec = item->spec;
631 const struct rte_flow_item_vlan *vlan_mask = item->mask;
632 struct ulp_rte_hdr_field *field;
633 struct ulp_rte_hdr_bitmap *hdr_bit;
634 uint32_t idx = params->vlan_idx;
635 uint16_t vlan_tag, priority;
636 uint32_t outer_vtag_num;
637 uint32_t inner_vtag_num;
638 uint16_t eth_type = 0;
639 uint32_t inner_flag = 0;
642 * Copy the rte_flow_item for vlan into hdr_field using Vlan
646 vlan_tag = ntohs(vlan_spec->tci);
647 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
648 vlan_tag &= ULP_VLAN_TAG_MASK;
649 vlan_tag = htons(vlan_tag);
651 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
654 field = ulp_rte_parser_fld_copy(field,
657 field = ulp_rte_parser_fld_copy(field,
658 &vlan_spec->inner_type,
659 sizeof(vlan_spec->inner_type));
660 eth_type = vlan_spec->inner_type;
664 vlan_tag = ntohs(vlan_mask->tci);
665 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
669 * the storage for priority and vlan tag is 2 bytes
670 * The mask of priority which is 3 bits if it is all 1's
671 * then make the rest bits 13 bits as 1's
672 * so that it is matched as exact match.
674 if (priority == ULP_VLAN_PRIORITY_MASK)
675 priority |= ~ULP_VLAN_PRIORITY_MASK;
676 if (vlan_tag == ULP_VLAN_TAG_MASK)
677 vlan_tag |= ~ULP_VLAN_TAG_MASK;
678 vlan_tag = htons(vlan_tag);
680 ulp_rte_prsr_mask_copy(params, &idx, &priority,
682 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
684 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
685 sizeof(vlan_mask->inner_type));
687 /* Set the vlan index to new incremented value */
688 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
690 /* Get the outer tag and inner tag counts */
691 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
692 BNXT_ULP_CF_IDX_O_VTAG_NUM);
693 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
694 BNXT_ULP_CF_IDX_I_VTAG_NUM);
696 /* Update the hdr_bitmap of the vlans */
697 hdr_bit = ¶ms->hdr_bitmap;
698 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
699 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
701 /* Update the vlan tag num */
703 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
705 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
706 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
707 ULP_BITMAP_SET(params->hdr_bitmap.bits,
708 BNXT_ULP_HDR_BIT_OO_VLAN);
709 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
710 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
711 outer_vtag_num == 1) {
712 /* update the vlan tag num */
714 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
716 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
717 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
718 ULP_BITMAP_SET(params->hdr_bitmap.bits,
719 BNXT_ULP_HDR_BIT_OI_VLAN);
720 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
721 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
723 /* update the vlan tag num */
725 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
727 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
728 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
729 ULP_BITMAP_SET(params->hdr_bitmap.bits,
730 BNXT_ULP_HDR_BIT_IO_VLAN);
732 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
733 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
734 inner_vtag_num == 1) {
735 /* update the vlan tag num */
737 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
739 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
740 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
741 ULP_BITMAP_SET(params->hdr_bitmap.bits,
742 BNXT_ULP_HDR_BIT_II_VLAN);
745 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
746 return BNXT_TF_RC_ERROR;
748 /* Update the field protocol hdr bitmap */
749 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
750 return BNXT_TF_RC_SUCCESS;
753 /* Function to handle the update of proto header based on field values */
755 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
756 uint8_t proto, uint32_t in_flag)
758 if (proto == IPPROTO_UDP) {
760 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
761 BNXT_ULP_HDR_BIT_I_UDP);
762 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
764 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
765 BNXT_ULP_HDR_BIT_O_UDP);
766 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
768 } else if (proto == IPPROTO_TCP) {
770 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
771 BNXT_ULP_HDR_BIT_I_TCP);
772 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
774 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
775 BNXT_ULP_HDR_BIT_O_TCP);
776 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
781 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
783 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
784 struct ulp_rte_parser_params *params)
786 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
787 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
788 struct ulp_rte_hdr_field *field;
789 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
790 uint32_t idx = params->field_idx;
793 uint32_t inner_flag = 0;
796 /* validate there are no 3rd L3 header */
797 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
799 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
800 return BNXT_TF_RC_ERROR;
804 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
808 size = sizeof(ipv4_spec->hdr.version_ihl);
809 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
810 &ipv4_spec->hdr.version_ihl,
812 size = sizeof(ipv4_spec->hdr.type_of_service);
813 field = ulp_rte_parser_fld_copy(field,
814 &ipv4_spec->hdr.type_of_service,
816 size = sizeof(ipv4_spec->hdr.total_length);
817 field = ulp_rte_parser_fld_copy(field,
818 &ipv4_spec->hdr.total_length,
820 size = sizeof(ipv4_spec->hdr.packet_id);
821 field = ulp_rte_parser_fld_copy(field,
822 &ipv4_spec->hdr.packet_id,
824 size = sizeof(ipv4_spec->hdr.fragment_offset);
825 field = ulp_rte_parser_fld_copy(field,
826 &ipv4_spec->hdr.fragment_offset,
828 size = sizeof(ipv4_spec->hdr.time_to_live);
829 field = ulp_rte_parser_fld_copy(field,
830 &ipv4_spec->hdr.time_to_live,
832 size = sizeof(ipv4_spec->hdr.next_proto_id);
833 field = ulp_rte_parser_fld_copy(field,
834 &ipv4_spec->hdr.next_proto_id,
836 proto = ipv4_spec->hdr.next_proto_id;
837 size = sizeof(ipv4_spec->hdr.hdr_checksum);
838 field = ulp_rte_parser_fld_copy(field,
839 &ipv4_spec->hdr.hdr_checksum,
841 size = sizeof(ipv4_spec->hdr.src_addr);
842 field = ulp_rte_parser_fld_copy(field,
843 &ipv4_spec->hdr.src_addr,
845 size = sizeof(ipv4_spec->hdr.dst_addr);
846 field = ulp_rte_parser_fld_copy(field,
847 &ipv4_spec->hdr.dst_addr,
851 ulp_rte_prsr_mask_copy(params, &idx,
852 &ipv4_mask->hdr.version_ihl,
853 sizeof(ipv4_mask->hdr.version_ihl));
854 #ifdef ULP_DONT_IGNORE_TOS
855 ulp_rte_prsr_mask_copy(params, &idx,
856 &ipv4_mask->hdr.type_of_service,
857 sizeof(ipv4_mask->hdr.type_of_service));
860 * The tos field is ignored since OVS is setting it as wild card
861 * match and it is not supported. This is a work around and
862 * shall be addressed in the future.
867 ulp_rte_prsr_mask_copy(params, &idx,
868 &ipv4_mask->hdr.total_length,
869 sizeof(ipv4_mask->hdr.total_length));
870 ulp_rte_prsr_mask_copy(params, &idx,
871 &ipv4_mask->hdr.packet_id,
872 sizeof(ipv4_mask->hdr.packet_id));
873 ulp_rte_prsr_mask_copy(params, &idx,
874 &ipv4_mask->hdr.fragment_offset,
875 sizeof(ipv4_mask->hdr.fragment_offset));
876 ulp_rte_prsr_mask_copy(params, &idx,
877 &ipv4_mask->hdr.time_to_live,
878 sizeof(ipv4_mask->hdr.time_to_live));
879 ulp_rte_prsr_mask_copy(params, &idx,
880 &ipv4_mask->hdr.next_proto_id,
881 sizeof(ipv4_mask->hdr.next_proto_id));
882 ulp_rte_prsr_mask_copy(params, &idx,
883 &ipv4_mask->hdr.hdr_checksum,
884 sizeof(ipv4_mask->hdr.hdr_checksum));
885 ulp_rte_prsr_mask_copy(params, &idx,
886 &ipv4_mask->hdr.src_addr,
887 sizeof(ipv4_mask->hdr.src_addr));
888 ulp_rte_prsr_mask_copy(params, &idx,
889 &ipv4_mask->hdr.dst_addr,
890 sizeof(ipv4_mask->hdr.dst_addr));
892 /* Add the number of ipv4 header elements */
893 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
895 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
896 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
897 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
898 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
899 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
902 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
903 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
906 /* Update the field protocol hdr bitmap */
907 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
908 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
909 return BNXT_TF_RC_SUCCESS;
912 /* Function to handle the parsing of RTE Flow item IPV6 Header */
914 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
915 struct ulp_rte_parser_params *params)
917 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
918 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
919 struct ulp_rte_hdr_field *field;
920 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
921 uint32_t idx = params->field_idx;
923 uint32_t vtcf, vtcf_mask;
925 uint32_t inner_flag = 0;
928 /* validate there are no 3rd L3 header */
929 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
931 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
932 return BNXT_TF_RC_ERROR;
936 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
940 size = sizeof(ipv6_spec->hdr.vtc_flow);
942 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
943 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
947 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
948 field = ulp_rte_parser_fld_copy(field,
952 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
953 field = ulp_rte_parser_fld_copy(field,
957 size = sizeof(ipv6_spec->hdr.payload_len);
958 field = ulp_rte_parser_fld_copy(field,
959 &ipv6_spec->hdr.payload_len,
961 size = sizeof(ipv6_spec->hdr.proto);
962 field = ulp_rte_parser_fld_copy(field,
963 &ipv6_spec->hdr.proto,
965 proto = ipv6_spec->hdr.proto;
966 size = sizeof(ipv6_spec->hdr.hop_limits);
967 field = ulp_rte_parser_fld_copy(field,
968 &ipv6_spec->hdr.hop_limits,
970 size = sizeof(ipv6_spec->hdr.src_addr);
971 field = ulp_rte_parser_fld_copy(field,
972 &ipv6_spec->hdr.src_addr,
974 size = sizeof(ipv6_spec->hdr.dst_addr);
975 field = ulp_rte_parser_fld_copy(field,
976 &ipv6_spec->hdr.dst_addr,
980 size = sizeof(ipv6_mask->hdr.vtc_flow);
982 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
983 ulp_rte_prsr_mask_copy(params, &idx,
987 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
988 ulp_rte_prsr_mask_copy(params, &idx,
993 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
994 ulp_rte_prsr_mask_copy(params, &idx,
998 ulp_rte_prsr_mask_copy(params, &idx,
999 &ipv6_mask->hdr.payload_len,
1000 sizeof(ipv6_mask->hdr.payload_len));
1001 ulp_rte_prsr_mask_copy(params, &idx,
1002 &ipv6_mask->hdr.proto,
1003 sizeof(ipv6_mask->hdr.proto));
1004 ulp_rte_prsr_mask_copy(params, &idx,
1005 &ipv6_mask->hdr.hop_limits,
1006 sizeof(ipv6_mask->hdr.hop_limits));
1007 ulp_rte_prsr_mask_copy(params, &idx,
1008 &ipv6_mask->hdr.src_addr,
1009 sizeof(ipv6_mask->hdr.src_addr));
1010 ulp_rte_prsr_mask_copy(params, &idx,
1011 &ipv6_mask->hdr.dst_addr,
1012 sizeof(ipv6_mask->hdr.dst_addr));
1014 /* add number of ipv6 header elements */
1015 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1017 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1018 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1019 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1020 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1021 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1024 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1025 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1028 /* Update the field protocol hdr bitmap */
1029 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1030 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1032 return BNXT_TF_RC_SUCCESS;
1035 /* Function to handle the update of proto header based on field values */
1037 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1040 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1041 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1042 BNXT_ULP_HDR_BIT_T_VXLAN);
1045 /* Function to handle the parsing of RTE Flow item UDP Header. */
1047 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1048 struct ulp_rte_parser_params *params)
1050 const struct rte_flow_item_udp *udp_spec = item->spec;
1051 const struct rte_flow_item_udp *udp_mask = item->mask;
1052 struct ulp_rte_hdr_field *field;
1053 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1054 uint32_t idx = params->field_idx;
1056 uint16_t dst_port = 0;
1059 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1061 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1062 return BNXT_TF_RC_ERROR;
1066 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1070 size = sizeof(udp_spec->hdr.src_port);
1071 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1072 &udp_spec->hdr.src_port,
1074 size = sizeof(udp_spec->hdr.dst_port);
1075 field = ulp_rte_parser_fld_copy(field,
1076 &udp_spec->hdr.dst_port,
1078 dst_port = udp_spec->hdr.dst_port;
1079 size = sizeof(udp_spec->hdr.dgram_len);
1080 field = ulp_rte_parser_fld_copy(field,
1081 &udp_spec->hdr.dgram_len,
1083 size = sizeof(udp_spec->hdr.dgram_cksum);
1084 field = ulp_rte_parser_fld_copy(field,
1085 &udp_spec->hdr.dgram_cksum,
1089 ulp_rte_prsr_mask_copy(params, &idx,
1090 &udp_mask->hdr.src_port,
1091 sizeof(udp_mask->hdr.src_port));
1092 ulp_rte_prsr_mask_copy(params, &idx,
1093 &udp_mask->hdr.dst_port,
1094 sizeof(udp_mask->hdr.dst_port));
1095 ulp_rte_prsr_mask_copy(params, &idx,
1096 &udp_mask->hdr.dgram_len,
1097 sizeof(udp_mask->hdr.dgram_len));
1098 ulp_rte_prsr_mask_copy(params, &idx,
1099 &udp_mask->hdr.dgram_cksum,
1100 sizeof(udp_mask->hdr.dgram_cksum));
1103 /* Add number of UDP header elements */
1104 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1106 /* Set the udp header bitmap and computed l4 header bitmaps */
1107 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1108 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1109 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1110 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1112 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1113 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1114 /* Update the field protocol hdr bitmap */
1115 ulp_rte_l4_proto_type_update(params, dst_port);
1117 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1118 return BNXT_TF_RC_SUCCESS;
1121 /* Function to handle the parsing of RTE Flow item TCP Header. */
1123 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1124 struct ulp_rte_parser_params *params)
1126 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1127 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1128 struct ulp_rte_hdr_field *field;
1129 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1130 uint32_t idx = params->field_idx;
1134 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1136 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1137 return BNXT_TF_RC_ERROR;
1141 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1145 size = sizeof(tcp_spec->hdr.src_port);
1146 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1147 &tcp_spec->hdr.src_port,
1149 size = sizeof(tcp_spec->hdr.dst_port);
1150 field = ulp_rte_parser_fld_copy(field,
1151 &tcp_spec->hdr.dst_port,
1153 size = sizeof(tcp_spec->hdr.sent_seq);
1154 field = ulp_rte_parser_fld_copy(field,
1155 &tcp_spec->hdr.sent_seq,
1157 size = sizeof(tcp_spec->hdr.recv_ack);
1158 field = ulp_rte_parser_fld_copy(field,
1159 &tcp_spec->hdr.recv_ack,
1161 size = sizeof(tcp_spec->hdr.data_off);
1162 field = ulp_rte_parser_fld_copy(field,
1163 &tcp_spec->hdr.data_off,
1165 size = sizeof(tcp_spec->hdr.tcp_flags);
1166 field = ulp_rte_parser_fld_copy(field,
1167 &tcp_spec->hdr.tcp_flags,
1169 size = sizeof(tcp_spec->hdr.rx_win);
1170 field = ulp_rte_parser_fld_copy(field,
1171 &tcp_spec->hdr.rx_win,
1173 size = sizeof(tcp_spec->hdr.cksum);
1174 field = ulp_rte_parser_fld_copy(field,
1175 &tcp_spec->hdr.cksum,
1177 size = sizeof(tcp_spec->hdr.tcp_urp);
1178 field = ulp_rte_parser_fld_copy(field,
1179 &tcp_spec->hdr.tcp_urp,
1182 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1186 ulp_rte_prsr_mask_copy(params, &idx,
1187 &tcp_mask->hdr.src_port,
1188 sizeof(tcp_mask->hdr.src_port));
1189 ulp_rte_prsr_mask_copy(params, &idx,
1190 &tcp_mask->hdr.dst_port,
1191 sizeof(tcp_mask->hdr.dst_port));
1192 ulp_rte_prsr_mask_copy(params, &idx,
1193 &tcp_mask->hdr.sent_seq,
1194 sizeof(tcp_mask->hdr.sent_seq));
1195 ulp_rte_prsr_mask_copy(params, &idx,
1196 &tcp_mask->hdr.recv_ack,
1197 sizeof(tcp_mask->hdr.recv_ack));
1198 ulp_rte_prsr_mask_copy(params, &idx,
1199 &tcp_mask->hdr.data_off,
1200 sizeof(tcp_mask->hdr.data_off));
1201 ulp_rte_prsr_mask_copy(params, &idx,
1202 &tcp_mask->hdr.tcp_flags,
1203 sizeof(tcp_mask->hdr.tcp_flags));
1204 ulp_rte_prsr_mask_copy(params, &idx,
1205 &tcp_mask->hdr.rx_win,
1206 sizeof(tcp_mask->hdr.rx_win));
1207 ulp_rte_prsr_mask_copy(params, &idx,
1208 &tcp_mask->hdr.cksum,
1209 sizeof(tcp_mask->hdr.cksum));
1210 ulp_rte_prsr_mask_copy(params, &idx,
1211 &tcp_mask->hdr.tcp_urp,
1212 sizeof(tcp_mask->hdr.tcp_urp));
1214 /* add number of TCP header elements */
1215 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1217 /* Set the udp header bitmap and computed l4 header bitmaps */
1218 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1219 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1220 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1221 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1223 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1224 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1226 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1227 return BNXT_TF_RC_SUCCESS;
1230 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1232 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1233 struct ulp_rte_parser_params *params)
1235 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1236 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1237 struct ulp_rte_hdr_field *field;
1238 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1239 uint32_t idx = params->field_idx;
1243 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1247 size = sizeof(vxlan_spec->flags);
1248 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1251 size = sizeof(vxlan_spec->rsvd0);
1252 field = ulp_rte_parser_fld_copy(field,
1255 size = sizeof(vxlan_spec->vni);
1256 field = ulp_rte_parser_fld_copy(field,
1259 size = sizeof(vxlan_spec->rsvd1);
1260 field = ulp_rte_parser_fld_copy(field,
1265 ulp_rte_prsr_mask_copy(params, &idx,
1267 sizeof(vxlan_mask->flags));
1268 ulp_rte_prsr_mask_copy(params, &idx,
1270 sizeof(vxlan_mask->rsvd0));
1271 ulp_rte_prsr_mask_copy(params, &idx,
1273 sizeof(vxlan_mask->vni));
1274 ulp_rte_prsr_mask_copy(params, &idx,
1276 sizeof(vxlan_mask->rsvd1));
1278 /* Add number of vxlan header elements */
1279 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1281 /* Update the hdr_bitmap with vxlan */
1282 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1283 return BNXT_TF_RC_SUCCESS;
1286 /* Function to handle the parsing of RTE Flow item void Header */
1288 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1289 struct ulp_rte_parser_params *params __rte_unused)
1291 return BNXT_TF_RC_SUCCESS;
1294 /* Function to handle the parsing of RTE Flow action void Header. */
1296 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1297 struct ulp_rte_parser_params *params __rte_unused)
1299 return BNXT_TF_RC_SUCCESS;
1302 /* Function to handle the parsing of RTE Flow action Mark Header. */
1304 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1305 struct ulp_rte_parser_params *param)
1307 const struct rte_flow_action_mark *mark;
1308 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1311 mark = action_item->conf;
1313 mark_id = tfp_cpu_to_be_32(mark->id);
1314 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1315 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1317 /* Update the hdr_bitmap with vxlan */
1318 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1319 return BNXT_TF_RC_SUCCESS;
1321 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1322 return BNXT_TF_RC_ERROR;
1325 /* Function to handle the parsing of RTE Flow action RSS Header. */
1327 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1328 struct ulp_rte_parser_params *param)
1330 const struct rte_flow_action_rss *rss = action_item->conf;
1333 /* Update the hdr_bitmap with vxlan */
1334 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1335 return BNXT_TF_RC_SUCCESS;
1337 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1338 return BNXT_TF_RC_ERROR;
1341 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1343 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1344 struct ulp_rte_parser_params *params)
1346 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1347 const struct rte_flow_item *item;
1348 const struct rte_flow_item_eth *eth_spec;
1349 const struct rte_flow_item_ipv4 *ipv4_spec;
1350 const struct rte_flow_item_ipv6 *ipv6_spec;
1351 struct rte_flow_item_vxlan vxlan_spec;
1352 uint32_t vlan_num = 0, vlan_size = 0;
1353 uint32_t ip_size = 0, ip_type = 0;
1354 uint32_t vxlan_size = 0;
1356 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1357 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1359 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1360 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1362 vxlan_encap = action_item->conf;
1364 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1365 return BNXT_TF_RC_ERROR;
1368 item = vxlan_encap->definition;
1370 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1371 return BNXT_TF_RC_ERROR;
1374 if (!ulp_rte_item_skip_void(&item, 0))
1375 return BNXT_TF_RC_ERROR;
1377 /* must have ethernet header */
1378 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1379 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1380 return BNXT_TF_RC_ERROR;
1382 eth_spec = item->spec;
1383 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1384 ulp_encap_buffer_copy(buff,
1385 eth_spec->dst.addr_bytes,
1386 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1388 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1389 ulp_encap_buffer_copy(buff,
1390 eth_spec->src.addr_bytes,
1391 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1393 /* Goto the next item */
1394 if (!ulp_rte_item_skip_void(&item, 1))
1395 return BNXT_TF_RC_ERROR;
1397 /* May have vlan header */
1398 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1400 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1401 ulp_encap_buffer_copy(buff,
1403 sizeof(struct rte_flow_item_vlan));
1405 if (!ulp_rte_item_skip_void(&item, 1))
1406 return BNXT_TF_RC_ERROR;
1409 /* may have two vlan headers */
1410 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1412 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1413 sizeof(struct rte_flow_item_vlan)],
1415 sizeof(struct rte_flow_item_vlan));
1416 if (!ulp_rte_item_skip_void(&item, 1))
1417 return BNXT_TF_RC_ERROR;
1419 /* Update the vlan count and size of more than one */
1421 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1422 vlan_num = tfp_cpu_to_be_32(vlan_num);
1423 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1426 vlan_size = tfp_cpu_to_be_32(vlan_size);
1427 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1432 /* L3 must be IPv4, IPv6 */
1433 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1434 ipv4_spec = item->spec;
1435 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1437 /* copy the ipv4 details */
1438 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1439 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1440 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1441 ulp_encap_buffer_copy(buff,
1443 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1444 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1446 const uint8_t *tmp_buff;
1448 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1449 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1450 ulp_encap_buffer_copy(buff,
1452 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1453 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1454 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1455 ulp_encap_buffer_copy(buff,
1456 &ipv4_spec->hdr.version_ihl,
1457 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1459 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1460 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1461 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1462 ulp_encap_buffer_copy(buff,
1463 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1464 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1466 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1467 ulp_encap_buffer_copy(buff,
1468 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1469 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1471 /* Update the ip size details */
1472 ip_size = tfp_cpu_to_be_32(ip_size);
1473 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1474 &ip_size, sizeof(uint32_t));
1476 /* update the ip type */
1477 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1478 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1479 &ip_type, sizeof(uint32_t));
1481 /* update the computed field to notify it is ipv4 header */
1482 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1485 if (!ulp_rte_item_skip_void(&item, 1))
1486 return BNXT_TF_RC_ERROR;
1487 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1488 ipv6_spec = item->spec;
1489 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1491 /* copy the ipv4 details */
1492 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1493 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1495 /* Update the ip size details */
1496 ip_size = tfp_cpu_to_be_32(ip_size);
1497 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1498 &ip_size, sizeof(uint32_t));
1500 /* update the ip type */
1501 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1502 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1503 &ip_type, sizeof(uint32_t));
1505 /* update the computed field to notify it is ipv6 header */
1506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1509 if (!ulp_rte_item_skip_void(&item, 1))
1510 return BNXT_TF_RC_ERROR;
1512 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1513 return BNXT_TF_RC_ERROR;
1517 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1518 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1519 return BNXT_TF_RC_ERROR;
1521 /* copy the udp details */
1522 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1523 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1525 if (!ulp_rte_item_skip_void(&item, 1))
1526 return BNXT_TF_RC_ERROR;
1529 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1530 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1531 return BNXT_TF_RC_ERROR;
1533 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1534 /* copy the vxlan details */
1535 memcpy(&vxlan_spec, item->spec, vxlan_size);
1536 vxlan_spec.flags = 0x08;
1537 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1538 (const uint8_t *)&vxlan_spec,
1540 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1541 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1542 &vxlan_size, sizeof(uint32_t));
1544 /* update the hdr_bitmap with vxlan */
1545 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1546 return BNXT_TF_RC_SUCCESS;
1549 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1551 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1553 struct ulp_rte_parser_params *params)
1555 /* update the hdr_bitmap with vxlan */
1556 ULP_BITMAP_SET(params->act_bitmap.bits,
1557 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1558 return BNXT_TF_RC_SUCCESS;
1561 /* Function to handle the parsing of RTE Flow action drop Header. */
1563 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1564 struct ulp_rte_parser_params *params)
1566 /* Update the hdr_bitmap with drop */
1567 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1568 return BNXT_TF_RC_SUCCESS;
1571 /* Function to handle the parsing of RTE Flow action count. */
1573 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1574 struct ulp_rte_parser_params *params)
1577 const struct rte_flow_action_count *act_count;
1578 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1580 act_count = action_item->conf;
1582 if (act_count->shared) {
1584 "Parse Error:Shared count not supported\n");
1585 return BNXT_TF_RC_PARSE_ERR;
1587 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1589 BNXT_ULP_ACT_PROP_SZ_COUNT);
1592 /* Update the hdr_bitmap with count */
1593 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1594 return BNXT_TF_RC_SUCCESS;
1597 /* Function to handle the parsing of action ports. */
1599 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1602 enum bnxt_ulp_direction_type dir;
1605 struct ulp_rte_act_prop *act = ¶m->act_prop;
1606 enum bnxt_ulp_intf_type port_type;
1609 /* Get the direction */
1610 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1611 if (dir == BNXT_ULP_DIR_EGRESS) {
1612 /* For egress direction, fill vport */
1613 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1614 return BNXT_TF_RC_ERROR;
1617 pid = rte_cpu_to_be_32(pid);
1618 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1619 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1621 /* For ingress direction, fill vnic */
1622 port_type = ULP_COMP_FLD_IDX_RD(param,
1623 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1624 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1625 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1627 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1629 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1631 return BNXT_TF_RC_ERROR;
1634 pid = rte_cpu_to_be_32(pid);
1635 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1636 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1639 /* Update the action port set bit */
1640 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1641 return BNXT_TF_RC_SUCCESS;
1644 /* Function to handle the parsing of RTE Flow action PF. */
1646 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1647 struct ulp_rte_parser_params *params)
1651 enum bnxt_ulp_intf_type intf_type;
1653 /* Get the port id of the current device */
1654 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1656 /* Get the port db ifindex */
1657 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1659 BNXT_TF_DBG(ERR, "Invalid port id\n");
1660 return BNXT_TF_RC_ERROR;
1663 /* Check the port is PF port */
1664 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1665 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1666 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1667 return BNXT_TF_RC_ERROR;
1669 /* Update the action properties */
1670 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1671 return ulp_rte_parser_act_port_set(params, ifindex);
1674 /* Function to handle the parsing of RTE Flow action VF. */
1676 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1677 struct ulp_rte_parser_params *params)
1679 const struct rte_flow_action_vf *vf_action;
1681 enum bnxt_ulp_intf_type intf_type;
1683 vf_action = action_item->conf;
1685 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1686 return BNXT_TF_RC_PARSE_ERR;
1689 if (vf_action->original) {
1690 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1691 return BNXT_TF_RC_PARSE_ERR;
1694 /* Check the port is VF port */
1695 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1697 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1698 return BNXT_TF_RC_ERROR;
1700 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1701 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1702 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1703 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1704 return BNXT_TF_RC_ERROR;
1707 /* Update the action properties */
1708 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1709 return ulp_rte_parser_act_port_set(params, ifindex);
1712 /* Function to handle the parsing of RTE Flow action port_id. */
1714 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1715 struct ulp_rte_parser_params *param)
1717 const struct rte_flow_action_port_id *port_id = act_item->conf;
1719 enum bnxt_ulp_intf_type intf_type;
1723 "ParseErr: Invalid Argument\n");
1724 return BNXT_TF_RC_PARSE_ERR;
1726 if (port_id->original) {
1728 "ParseErr:Portid Original not supported\n");
1729 return BNXT_TF_RC_PARSE_ERR;
1732 /* Get the port db ifindex */
1733 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1735 BNXT_TF_DBG(ERR, "Invalid port id\n");
1736 return BNXT_TF_RC_ERROR;
1739 /* Get the intf type */
1740 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1742 BNXT_TF_DBG(ERR, "Invalid port type\n");
1743 return BNXT_TF_RC_ERROR;
1746 /* Set the action port */
1747 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1748 return ulp_rte_parser_act_port_set(param, ifindex);
1751 /* Function to handle the parsing of RTE Flow action phy_port. */
1753 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1754 struct ulp_rte_parser_params *prm)
1756 const struct rte_flow_action_phy_port *phy_port;
1760 enum bnxt_ulp_direction_type dir;
1762 phy_port = action_item->conf;
1765 "ParseErr: Invalid Argument\n");
1766 return BNXT_TF_RC_PARSE_ERR;
1769 if (phy_port->original) {
1771 "Parse Err:Port Original not supported\n");
1772 return BNXT_TF_RC_PARSE_ERR;
1774 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1775 if (dir != BNXT_ULP_DIR_EGRESS) {
1777 "Parse Err:Phy ports are valid only for egress\n");
1778 return BNXT_TF_RC_PARSE_ERR;
1780 /* Get the physical port details from port db */
1781 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1784 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1789 pid = rte_cpu_to_be_32(pid);
1790 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1791 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1793 /* Update the action port set bit */
1794 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1795 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1796 BNXT_ULP_INTF_TYPE_PHY_PORT);
1797 return BNXT_TF_RC_SUCCESS;
1800 /* Function to handle the parsing of RTE Flow action pop vlan. */
1802 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1803 struct ulp_rte_parser_params *params)
1805 /* Update the act_bitmap with pop */
1806 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1807 return BNXT_TF_RC_SUCCESS;
1810 /* Function to handle the parsing of RTE Flow action push vlan. */
1812 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1813 struct ulp_rte_parser_params *params)
1815 const struct rte_flow_action_of_push_vlan *push_vlan;
1817 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1819 push_vlan = action_item->conf;
1821 ethertype = push_vlan->ethertype;
1822 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1824 "Parse Err: Ethertype not supported\n");
1825 return BNXT_TF_RC_PARSE_ERR;
1827 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1828 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1829 /* Update the hdr_bitmap with push vlan */
1830 ULP_BITMAP_SET(params->act_bitmap.bits,
1831 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1832 return BNXT_TF_RC_SUCCESS;
1834 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1835 return BNXT_TF_RC_ERROR;
1838 /* Function to handle the parsing of RTE Flow action set vlan id. */
1840 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1841 struct ulp_rte_parser_params *params)
1843 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1845 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1847 vlan_vid = action_item->conf;
1848 if (vlan_vid && vlan_vid->vlan_vid) {
1849 vid = vlan_vid->vlan_vid;
1850 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1851 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1852 /* Update the hdr_bitmap with vlan vid */
1853 ULP_BITMAP_SET(params->act_bitmap.bits,
1854 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1855 return BNXT_TF_RC_SUCCESS;
1857 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1858 return BNXT_TF_RC_ERROR;
1861 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1863 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1864 struct ulp_rte_parser_params *params)
1866 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1868 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1870 vlan_pcp = action_item->conf;
1872 pcp = vlan_pcp->vlan_pcp;
1873 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1874 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1875 /* Update the hdr_bitmap with vlan vid */
1876 ULP_BITMAP_SET(params->act_bitmap.bits,
1877 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1878 return BNXT_TF_RC_SUCCESS;
1880 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1881 return BNXT_TF_RC_ERROR;
1884 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1886 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1887 struct ulp_rte_parser_params *params)
1889 const struct rte_flow_action_set_ipv4 *set_ipv4;
1890 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1892 set_ipv4 = action_item->conf;
1894 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1895 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1896 /* Update the hdr_bitmap with set ipv4 src */
1897 ULP_BITMAP_SET(params->act_bitmap.bits,
1898 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1899 return BNXT_TF_RC_SUCCESS;
1901 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1902 return BNXT_TF_RC_ERROR;
1905 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1907 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1908 struct ulp_rte_parser_params *params)
1910 const struct rte_flow_action_set_ipv4 *set_ipv4;
1911 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1913 set_ipv4 = action_item->conf;
1915 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1916 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1917 /* Update the hdr_bitmap with set ipv4 dst */
1918 ULP_BITMAP_SET(params->act_bitmap.bits,
1919 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1920 return BNXT_TF_RC_SUCCESS;
1922 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1923 return BNXT_TF_RC_ERROR;
1926 /* Function to handle the parsing of RTE Flow action set tp src.*/
1928 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1929 struct ulp_rte_parser_params *params)
1931 const struct rte_flow_action_set_tp *set_tp;
1932 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1934 set_tp = action_item->conf;
1936 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1937 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1938 /* Update the hdr_bitmap with set tp src */
1939 ULP_BITMAP_SET(params->act_bitmap.bits,
1940 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1941 return BNXT_TF_RC_SUCCESS;
1944 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1945 return BNXT_TF_RC_ERROR;
1948 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1950 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1951 struct ulp_rte_parser_params *params)
1953 const struct rte_flow_action_set_tp *set_tp;
1954 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1956 set_tp = action_item->conf;
1958 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1959 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1960 /* Update the hdr_bitmap with set tp dst */
1961 ULP_BITMAP_SET(params->act_bitmap.bits,
1962 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1963 return BNXT_TF_RC_SUCCESS;
1966 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1967 return BNXT_TF_RC_ERROR;
1970 /* Function to handle the parsing of RTE Flow action dec ttl.*/
1972 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
1973 struct ulp_rte_parser_params *params)
1975 /* Update the act_bitmap with dec ttl */
1976 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
1977 return BNXT_TF_RC_SUCCESS;