1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
81 /* Utility function to ignore field masks items */
83 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
85 const void *buffer __rte_unused,
86 uint32_t size __rte_unused)
92 * Function to handle the parsing of RTE Flows and placing
93 * the RTE flow items into the ulp structures.
96 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
97 struct ulp_rte_parser_params *params)
99 const struct rte_flow_item *item = pattern;
100 struct bnxt_ulp_rte_hdr_info *hdr_info;
102 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
104 /* Set the computed flags for no vlan tags before parsing */
105 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
106 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
108 /* Parse all the items in the pattern */
109 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
110 /* get the header information from the flow_hdr_info table */
111 hdr_info = &ulp_hdr_info[item->type];
112 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
114 "Truflow parser does not support type %d\n",
116 return BNXT_TF_RC_PARSE_ERR;
117 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
118 /* call the registered callback handler */
119 if (hdr_info->proto_hdr_func) {
120 if (hdr_info->proto_hdr_func(item, params) !=
121 BNXT_TF_RC_SUCCESS) {
122 return BNXT_TF_RC_ERROR;
128 /* update the implied SVIF */
129 return ulp_rte_parser_implicit_match_port_process(params);
133 * Function to handle the parsing of RTE Flows and placing
134 * the RTE flow actions into the ulp structures.
137 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
138 struct ulp_rte_parser_params *params)
140 const struct rte_flow_action *action_item = actions;
141 struct bnxt_ulp_rte_act_info *hdr_info;
143 /* Parse all the items in the pattern */
144 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
145 /* get the header information from the flow_hdr_info table */
146 hdr_info = &ulp_act_info[action_item->type];
147 if (hdr_info->act_type ==
148 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
150 "Truflow parser does not support act %u\n",
152 return BNXT_TF_RC_ERROR;
153 } else if (hdr_info->act_type ==
154 BNXT_ULP_ACT_TYPE_SUPPORTED) {
155 /* call the registered callback handler */
156 if (hdr_info->proto_act_func) {
157 if (hdr_info->proto_act_func(action_item,
159 BNXT_TF_RC_SUCCESS) {
160 return BNXT_TF_RC_ERROR;
166 /* update the implied port details */
167 ulp_rte_parser_implicit_act_port_process(params);
168 return BNXT_TF_RC_SUCCESS;
172 * Function to handle the post processing of the computed
173 * fields for the interface.
176 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
179 uint16_t port_id, parif;
181 enum bnxt_ulp_direction_type dir;
183 /* get the direction details */
184 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
186 /* read the port id details */
187 port_id = ULP_COMP_FLD_IDX_RD(params,
188 BNXT_ULP_CF_IDX_INCOMING_IF);
189 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
192 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
196 if (dir == BNXT_ULP_DIR_INGRESS) {
198 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
199 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
200 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
203 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
206 /* Get the match port type */
207 mtype = ULP_COMP_FLD_IDX_RD(params,
208 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
209 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
210 ULP_COMP_FLD_IDX_WR(params,
211 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
213 /* Set VF func PARIF */
214 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
215 BNXT_ULP_VF_FUNC_PARIF,
218 "ParseErr:ifindex is not valid\n");
221 ULP_COMP_FLD_IDX_WR(params,
222 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
225 /* populate the loopback parif */
226 ULP_COMP_FLD_IDX_WR(params,
227 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
228 BNXT_ULP_SYM_VF_FUNC_PARIF);
231 /* Set DRV func PARIF */
232 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233 BNXT_ULP_DRV_FUNC_PARIF,
236 "ParseErr:ifindex is not valid\n");
239 ULP_COMP_FLD_IDX_WR(params,
240 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247 * Function to handle the post processing of the parsing details
250 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
252 enum bnxt_ulp_direction_type dir;
253 enum bnxt_ulp_intf_type match_port_type, act_port_type;
254 uint32_t act_port_set;
256 /* Get the computed details */
257 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
258 match_port_type = ULP_COMP_FLD_IDX_RD(params,
259 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
260 act_port_type = ULP_COMP_FLD_IDX_RD(params,
261 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
262 act_port_set = ULP_COMP_FLD_IDX_RD(params,
263 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
265 /* set the flow direction in the proto and action header */
266 if (dir == BNXT_ULP_DIR_EGRESS) {
267 ULP_BITMAP_SET(params->hdr_bitmap.bits,
268 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
269 ULP_BITMAP_SET(params->act_bitmap.bits,
270 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
273 /* calculate the VF to VF flag */
274 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
275 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
278 /* Update the decrement ttl computational fields */
279 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
280 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
282 * Check that vxlan proto is included and vxlan decap
283 * action is not set then decrement tunnel ttl.
284 * Similarly add GRE and NVGRE in future.
286 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
287 BNXT_ULP_HDR_BIT_T_VXLAN) &&
288 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
289 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
290 ULP_COMP_FLD_IDX_WR(params,
291 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
293 ULP_COMP_FLD_IDX_WR(params,
294 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
298 /* Merge the hdr_fp_bit into the proto header bit */
299 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
301 /* Update the computed interface parameters */
302 bnxt_ulp_comp_fld_intf_update(params);
304 /* TBD: Handle the flow rejection scenarios */
309 * Function to compute the flow direction based on the match port details
312 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
314 enum bnxt_ulp_intf_type match_port_type;
316 /* Get the match port type */
317 match_port_type = ULP_COMP_FLD_IDX_RD(params,
318 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
320 /* If ingress flow and matchport is vf rep then dir is egress*/
321 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
322 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
323 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
324 BNXT_ULP_DIR_EGRESS);
326 /* Assign the input direction */
327 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
328 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
329 BNXT_ULP_DIR_INGRESS);
331 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
332 BNXT_ULP_DIR_EGRESS);
336 /* Function to handle the parsing of RTE Flow item PF Header. */
338 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
343 enum bnxt_ulp_direction_type dir;
344 struct ulp_rte_hdr_field *hdr_field;
345 enum bnxt_ulp_svif_type svif_type;
346 enum bnxt_ulp_intf_type port_type;
348 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
349 BNXT_ULP_INVALID_SVIF_VAL) {
351 "SVIF already set,multiple source not support'd\n");
352 return BNXT_TF_RC_ERROR;
355 /* Get port type details */
356 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
357 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
358 BNXT_TF_DBG(ERR, "Invalid port type\n");
359 return BNXT_TF_RC_ERROR;
362 /* Update the match port type */
363 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
365 /* compute the direction */
366 bnxt_ulp_rte_parser_direction_compute(params);
368 /* Get the computed direction */
369 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
370 if (dir == BNXT_ULP_DIR_INGRESS) {
371 svif_type = BNXT_ULP_PHY_PORT_SVIF;
373 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
374 svif_type = BNXT_ULP_VF_FUNC_SVIF;
376 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
378 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
380 svif = rte_cpu_to_be_16(svif);
381 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
382 memcpy(hdr_field->spec, &svif, sizeof(svif));
383 memcpy(hdr_field->mask, &mask, sizeof(mask));
384 hdr_field->size = sizeof(svif);
385 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
386 rte_be_to_cpu_16(svif));
387 return BNXT_TF_RC_SUCCESS;
390 /* Function to handle the parsing of the RTE port id */
392 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
394 uint16_t port_id = 0;
395 uint16_t svif_mask = 0xFFFF;
397 int32_t rc = BNXT_TF_RC_ERROR;
399 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
400 BNXT_ULP_INVALID_SVIF_VAL)
401 return BNXT_TF_RC_SUCCESS;
403 /* SVIF not set. So get the port id */
404 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
406 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
409 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
413 /* Update the SVIF details */
414 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
418 /* Function to handle the implicit action port id */
420 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
422 struct rte_flow_action action_item = {0};
423 struct rte_flow_action_port_id port_id = {0};
425 /* Read the action port set bit */
426 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
427 /* Already set, so just exit */
428 return BNXT_TF_RC_SUCCESS;
430 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
431 action_item.conf = &port_id;
433 /* Update the action port based on incoming port */
434 ulp_rte_port_id_act_handler(&action_item, params);
436 /* Reset the action port set bit */
437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
438 return BNXT_TF_RC_SUCCESS;
441 /* Function to handle the parsing of RTE Flow item PF Header. */
443 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
444 struct ulp_rte_parser_params *params)
446 uint16_t port_id = 0;
447 uint16_t svif_mask = 0xFFFF;
450 /* Get the implicit port id */
451 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
453 /* perform the conversion from dpdk port to bnxt ifindex */
454 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
457 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
458 return BNXT_TF_RC_ERROR;
461 /* Update the SVIF details */
462 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
465 /* Function to handle the parsing of RTE Flow item VF Header. */
467 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
468 struct ulp_rte_parser_params *params)
470 const struct rte_flow_item_vf *vf_spec = item->spec;
471 const struct rte_flow_item_vf *vf_mask = item->mask;
474 int32_t rc = BNXT_TF_RC_PARSE_ERR;
476 /* Get VF rte_flow_item for Port details */
478 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
482 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
487 /* perform the conversion from VF Func id to bnxt ifindex */
488 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
491 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
494 /* Update the SVIF details */
495 return ulp_rte_parser_svif_set(params, ifindex, mask);
498 /* Function to handle the parsing of RTE Flow item port id Header. */
500 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
501 struct ulp_rte_parser_params *params)
503 const struct rte_flow_item_port_id *port_spec = item->spec;
504 const struct rte_flow_item_port_id *port_mask = item->mask;
506 int32_t rc = BNXT_TF_RC_PARSE_ERR;
510 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
514 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
517 mask = port_mask->id;
519 /* perform the conversion from dpdk port to bnxt ifindex */
520 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
523 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
526 /* Update the SVIF details */
527 return ulp_rte_parser_svif_set(params, ifindex, mask);
530 /* Function to handle the parsing of RTE Flow item phy port Header. */
532 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
533 struct ulp_rte_parser_params *params)
535 const struct rte_flow_item_phy_port *port_spec = item->spec;
536 const struct rte_flow_item_phy_port *port_mask = item->mask;
538 int32_t rc = BNXT_TF_RC_ERROR;
540 enum bnxt_ulp_direction_type dir;
541 struct ulp_rte_hdr_field *hdr_field;
543 /* Copy the rte_flow_item for phy port into hdr_field */
545 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
549 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
552 mask = port_mask->index;
554 /* Update the match port type */
555 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
556 BNXT_ULP_INTF_TYPE_PHY_PORT);
558 /* Compute the Hw direction */
559 bnxt_ulp_rte_parser_direction_compute(params);
561 /* Direction validation */
562 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
563 if (dir == BNXT_ULP_DIR_EGRESS) {
565 "Parse Err:Phy ports are valid only for ingress\n");
566 return BNXT_TF_RC_PARSE_ERR;
569 /* Get the physical port details from port db */
570 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
573 BNXT_TF_DBG(ERR, "Failed to get port details\n");
574 return BNXT_TF_RC_PARSE_ERR;
577 /* Update the SVIF details */
578 svif = rte_cpu_to_be_16(svif);
579 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
580 memcpy(hdr_field->spec, &svif, sizeof(svif));
581 memcpy(hdr_field->mask, &mask, sizeof(mask));
582 hdr_field->size = sizeof(svif);
583 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
584 rte_be_to_cpu_16(svif));
585 return BNXT_TF_RC_SUCCESS;
588 /* Function to handle the update of proto header based on field values */
590 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
591 uint16_t type, uint32_t in_flag)
593 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
595 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
596 BNXT_ULP_HDR_BIT_I_IPV4);
597 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
599 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
600 BNXT_ULP_HDR_BIT_O_IPV4);
601 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
603 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
605 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606 BNXT_ULP_HDR_BIT_I_IPV6);
607 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
609 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610 BNXT_ULP_HDR_BIT_O_IPV6);
611 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
616 /* Internal Function to identify broadcast or multicast packets */
618 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
620 if (rte_is_multicast_ether_addr(eth_addr) ||
621 rte_is_broadcast_ether_addr(eth_addr)) {
623 "No support for bcast or mcast addr offload\n");
629 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
631 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
632 struct ulp_rte_parser_params *params)
634 const struct rte_flow_item_eth *eth_spec = item->spec;
635 const struct rte_flow_item_eth *eth_mask = item->mask;
636 struct ulp_rte_hdr_field *field;
637 uint32_t idx = params->field_idx;
639 uint16_t eth_type = 0;
640 uint32_t inner_flag = 0;
643 * Copy the rte_flow_item for eth into hdr_field using ethernet
647 size = sizeof(eth_spec->dst.addr_bytes);
648 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
649 eth_spec->dst.addr_bytes,
651 /* Todo: work around to avoid multicast and broadcast addr */
652 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
653 return BNXT_TF_RC_PARSE_ERR;
655 size = sizeof(eth_spec->src.addr_bytes);
656 field = ulp_rte_parser_fld_copy(field,
657 eth_spec->src.addr_bytes,
659 /* Todo: work around to avoid multicast and broadcast addr */
660 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
661 return BNXT_TF_RC_PARSE_ERR;
663 field = ulp_rte_parser_fld_copy(field,
665 sizeof(eth_spec->type));
666 eth_type = eth_spec->type;
669 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
670 sizeof(eth_mask->dst.addr_bytes));
671 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
672 sizeof(eth_mask->src.addr_bytes));
673 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
674 sizeof(eth_mask->type));
676 /* Add number of vlan header elements */
677 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
678 params->vlan_idx = params->field_idx;
679 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
681 /* Update the protocol hdr bitmap */
682 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
683 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
686 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
688 /* Update the field protocol hdr bitmap */
689 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
691 return BNXT_TF_RC_SUCCESS;
694 /* Function to handle the parsing of RTE Flow item Vlan Header. */
696 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
697 struct ulp_rte_parser_params *params)
699 const struct rte_flow_item_vlan *vlan_spec = item->spec;
700 const struct rte_flow_item_vlan *vlan_mask = item->mask;
701 struct ulp_rte_hdr_field *field;
702 struct ulp_rte_hdr_bitmap *hdr_bit;
703 uint32_t idx = params->vlan_idx;
704 uint16_t vlan_tag, priority;
705 uint32_t outer_vtag_num;
706 uint32_t inner_vtag_num;
707 uint16_t eth_type = 0;
708 uint32_t inner_flag = 0;
711 * Copy the rte_flow_item for vlan into hdr_field using Vlan
715 vlan_tag = ntohs(vlan_spec->tci);
716 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
717 vlan_tag &= ULP_VLAN_TAG_MASK;
718 vlan_tag = htons(vlan_tag);
720 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
723 field = ulp_rte_parser_fld_copy(field,
726 field = ulp_rte_parser_fld_copy(field,
727 &vlan_spec->inner_type,
728 sizeof(vlan_spec->inner_type));
729 eth_type = vlan_spec->inner_type;
733 vlan_tag = ntohs(vlan_mask->tci);
734 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
738 * the storage for priority and vlan tag is 2 bytes
739 * The mask of priority which is 3 bits if it is all 1's
740 * then make the rest bits 13 bits as 1's
741 * so that it is matched as exact match.
743 if (priority == ULP_VLAN_PRIORITY_MASK)
744 priority |= ~ULP_VLAN_PRIORITY_MASK;
745 if (vlan_tag == ULP_VLAN_TAG_MASK)
746 vlan_tag |= ~ULP_VLAN_TAG_MASK;
747 vlan_tag = htons(vlan_tag);
750 * The priority field is ignored since OVS is setting it as
751 * wild card match and it is not supported. This is a work
752 * around and shall be addressed in the future.
754 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
757 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
759 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
760 sizeof(vlan_mask->inner_type));
762 /* Set the vlan index to new incremented value */
763 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
765 /* Get the outer tag and inner tag counts */
766 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
767 BNXT_ULP_CF_IDX_O_VTAG_NUM);
768 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
769 BNXT_ULP_CF_IDX_I_VTAG_NUM);
771 /* Update the hdr_bitmap of the vlans */
772 hdr_bit = ¶ms->hdr_bitmap;
773 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
774 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
776 /* Update the vlan tag num */
778 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
780 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
781 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
782 ULP_BITMAP_SET(params->hdr_bitmap.bits,
783 BNXT_ULP_HDR_BIT_OO_VLAN);
784 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
785 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
786 outer_vtag_num == 1) {
787 /* update the vlan tag num */
789 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
791 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
792 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
793 ULP_BITMAP_SET(params->hdr_bitmap.bits,
794 BNXT_ULP_HDR_BIT_OI_VLAN);
795 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
796 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
798 /* update the vlan tag num */
800 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
803 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
804 ULP_BITMAP_SET(params->hdr_bitmap.bits,
805 BNXT_ULP_HDR_BIT_IO_VLAN);
807 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
808 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
809 inner_vtag_num == 1) {
810 /* update the vlan tag num */
812 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
814 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
815 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
816 ULP_BITMAP_SET(params->hdr_bitmap.bits,
817 BNXT_ULP_HDR_BIT_II_VLAN);
820 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
821 return BNXT_TF_RC_ERROR;
823 /* Update the field protocol hdr bitmap */
824 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
825 return BNXT_TF_RC_SUCCESS;
828 /* Function to handle the update of proto header based on field values */
830 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
831 uint8_t proto, uint32_t in_flag)
833 if (proto == IPPROTO_UDP) {
835 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
836 BNXT_ULP_HDR_BIT_I_UDP);
837 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
839 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
840 BNXT_ULP_HDR_BIT_O_UDP);
841 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
843 } else if (proto == IPPROTO_TCP) {
845 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
846 BNXT_ULP_HDR_BIT_I_TCP);
847 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
849 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
850 BNXT_ULP_HDR_BIT_O_TCP);
851 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
856 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
858 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
859 struct ulp_rte_parser_params *params)
861 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
862 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
863 struct ulp_rte_hdr_field *field;
864 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
865 uint32_t idx = params->field_idx;
868 uint32_t inner_flag = 0;
871 /* validate there are no 3rd L3 header */
872 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
874 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
875 return BNXT_TF_RC_ERROR;
879 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
883 size = sizeof(ipv4_spec->hdr.version_ihl);
884 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
885 &ipv4_spec->hdr.version_ihl,
887 size = sizeof(ipv4_spec->hdr.type_of_service);
888 field = ulp_rte_parser_fld_copy(field,
889 &ipv4_spec->hdr.type_of_service,
891 size = sizeof(ipv4_spec->hdr.total_length);
892 field = ulp_rte_parser_fld_copy(field,
893 &ipv4_spec->hdr.total_length,
895 size = sizeof(ipv4_spec->hdr.packet_id);
896 field = ulp_rte_parser_fld_copy(field,
897 &ipv4_spec->hdr.packet_id,
899 size = sizeof(ipv4_spec->hdr.fragment_offset);
900 field = ulp_rte_parser_fld_copy(field,
901 &ipv4_spec->hdr.fragment_offset,
903 size = sizeof(ipv4_spec->hdr.time_to_live);
904 field = ulp_rte_parser_fld_copy(field,
905 &ipv4_spec->hdr.time_to_live,
907 size = sizeof(ipv4_spec->hdr.next_proto_id);
908 field = ulp_rte_parser_fld_copy(field,
909 &ipv4_spec->hdr.next_proto_id,
911 proto = ipv4_spec->hdr.next_proto_id;
912 size = sizeof(ipv4_spec->hdr.hdr_checksum);
913 field = ulp_rte_parser_fld_copy(field,
914 &ipv4_spec->hdr.hdr_checksum,
916 size = sizeof(ipv4_spec->hdr.src_addr);
917 field = ulp_rte_parser_fld_copy(field,
918 &ipv4_spec->hdr.src_addr,
920 size = sizeof(ipv4_spec->hdr.dst_addr);
921 field = ulp_rte_parser_fld_copy(field,
922 &ipv4_spec->hdr.dst_addr,
926 ulp_rte_prsr_mask_copy(params, &idx,
927 &ipv4_mask->hdr.version_ihl,
928 sizeof(ipv4_mask->hdr.version_ihl));
930 * The tos field is ignored since OVS is setting it as wild card
931 * match and it is not supported. This is a work around and
932 * shall be addressed in the future.
934 ulp_rte_prsr_mask_ignore(params, &idx,
935 &ipv4_mask->hdr.type_of_service,
936 sizeof(ipv4_mask->hdr.type_of_service)
939 ulp_rte_prsr_mask_copy(params, &idx,
940 &ipv4_mask->hdr.total_length,
941 sizeof(ipv4_mask->hdr.total_length));
942 ulp_rte_prsr_mask_copy(params, &idx,
943 &ipv4_mask->hdr.packet_id,
944 sizeof(ipv4_mask->hdr.packet_id));
945 ulp_rte_prsr_mask_copy(params, &idx,
946 &ipv4_mask->hdr.fragment_offset,
947 sizeof(ipv4_mask->hdr.fragment_offset));
948 ulp_rte_prsr_mask_copy(params, &idx,
949 &ipv4_mask->hdr.time_to_live,
950 sizeof(ipv4_mask->hdr.time_to_live));
951 ulp_rte_prsr_mask_copy(params, &idx,
952 &ipv4_mask->hdr.next_proto_id,
953 sizeof(ipv4_mask->hdr.next_proto_id));
954 ulp_rte_prsr_mask_copy(params, &idx,
955 &ipv4_mask->hdr.hdr_checksum,
956 sizeof(ipv4_mask->hdr.hdr_checksum));
957 ulp_rte_prsr_mask_copy(params, &idx,
958 &ipv4_mask->hdr.src_addr,
959 sizeof(ipv4_mask->hdr.src_addr));
960 ulp_rte_prsr_mask_copy(params, &idx,
961 &ipv4_mask->hdr.dst_addr,
962 sizeof(ipv4_mask->hdr.dst_addr));
964 /* Add the number of ipv4 header elements */
965 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
967 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
968 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
969 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
970 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
971 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
974 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
975 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
978 /* Update the field protocol hdr bitmap */
979 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
980 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
981 return BNXT_TF_RC_SUCCESS;
984 /* Function to handle the parsing of RTE Flow item IPV6 Header */
986 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
987 struct ulp_rte_parser_params *params)
989 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
990 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
991 struct ulp_rte_hdr_field *field;
992 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
993 uint32_t idx = params->field_idx;
995 uint32_t vtcf, vtcf_mask;
997 uint32_t inner_flag = 0;
1000 /* validate there are no 3rd L3 header */
1001 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1003 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1004 return BNXT_TF_RC_ERROR;
1008 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1012 size = sizeof(ipv6_spec->hdr.vtc_flow);
1014 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1015 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1019 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1020 field = ulp_rte_parser_fld_copy(field,
1024 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1025 field = ulp_rte_parser_fld_copy(field,
1029 size = sizeof(ipv6_spec->hdr.payload_len);
1030 field = ulp_rte_parser_fld_copy(field,
1031 &ipv6_spec->hdr.payload_len,
1033 size = sizeof(ipv6_spec->hdr.proto);
1034 field = ulp_rte_parser_fld_copy(field,
1035 &ipv6_spec->hdr.proto,
1037 proto = ipv6_spec->hdr.proto;
1038 size = sizeof(ipv6_spec->hdr.hop_limits);
1039 field = ulp_rte_parser_fld_copy(field,
1040 &ipv6_spec->hdr.hop_limits,
1042 size = sizeof(ipv6_spec->hdr.src_addr);
1043 field = ulp_rte_parser_fld_copy(field,
1044 &ipv6_spec->hdr.src_addr,
1046 size = sizeof(ipv6_spec->hdr.dst_addr);
1047 field = ulp_rte_parser_fld_copy(field,
1048 &ipv6_spec->hdr.dst_addr,
1052 size = sizeof(ipv6_mask->hdr.vtc_flow);
1054 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1055 ulp_rte_prsr_mask_copy(params, &idx,
1059 * The TC and flow label field are ignored since OVS is
1060 * setting it for match and it is not supported.
1061 * This is a work around and
1062 * shall be addressed in the future.
1064 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1065 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1067 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1068 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1070 ulp_rte_prsr_mask_copy(params, &idx,
1071 &ipv6_mask->hdr.payload_len,
1072 sizeof(ipv6_mask->hdr.payload_len));
1073 ulp_rte_prsr_mask_copy(params, &idx,
1074 &ipv6_mask->hdr.proto,
1075 sizeof(ipv6_mask->hdr.proto));
1076 ulp_rte_prsr_mask_copy(params, &idx,
1077 &ipv6_mask->hdr.hop_limits,
1078 sizeof(ipv6_mask->hdr.hop_limits));
1079 ulp_rte_prsr_mask_copy(params, &idx,
1080 &ipv6_mask->hdr.src_addr,
1081 sizeof(ipv6_mask->hdr.src_addr));
1082 ulp_rte_prsr_mask_copy(params, &idx,
1083 &ipv6_mask->hdr.dst_addr,
1084 sizeof(ipv6_mask->hdr.dst_addr));
1086 /* add number of ipv6 header elements */
1087 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1089 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1090 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1091 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1092 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1093 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1096 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1097 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1100 /* Update the field protocol hdr bitmap */
1101 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1102 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1104 return BNXT_TF_RC_SUCCESS;
1107 /* Function to handle the update of proto header based on field values */
1109 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1112 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1113 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1114 BNXT_ULP_HDR_BIT_T_VXLAN);
1117 /* Function to handle the parsing of RTE Flow item UDP Header. */
1119 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1120 struct ulp_rte_parser_params *params)
1122 const struct rte_flow_item_udp *udp_spec = item->spec;
1123 const struct rte_flow_item_udp *udp_mask = item->mask;
1124 struct ulp_rte_hdr_field *field;
1125 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1126 uint32_t idx = params->field_idx;
1128 uint16_t dst_port = 0;
1131 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1133 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1134 return BNXT_TF_RC_ERROR;
1138 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1142 size = sizeof(udp_spec->hdr.src_port);
1143 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1144 &udp_spec->hdr.src_port,
1146 size = sizeof(udp_spec->hdr.dst_port);
1147 field = ulp_rte_parser_fld_copy(field,
1148 &udp_spec->hdr.dst_port,
1150 dst_port = udp_spec->hdr.dst_port;
1151 size = sizeof(udp_spec->hdr.dgram_len);
1152 field = ulp_rte_parser_fld_copy(field,
1153 &udp_spec->hdr.dgram_len,
1155 size = sizeof(udp_spec->hdr.dgram_cksum);
1156 field = ulp_rte_parser_fld_copy(field,
1157 &udp_spec->hdr.dgram_cksum,
1161 ulp_rte_prsr_mask_copy(params, &idx,
1162 &udp_mask->hdr.src_port,
1163 sizeof(udp_mask->hdr.src_port));
1164 ulp_rte_prsr_mask_copy(params, &idx,
1165 &udp_mask->hdr.dst_port,
1166 sizeof(udp_mask->hdr.dst_port));
1167 ulp_rte_prsr_mask_copy(params, &idx,
1168 &udp_mask->hdr.dgram_len,
1169 sizeof(udp_mask->hdr.dgram_len));
1170 ulp_rte_prsr_mask_copy(params, &idx,
1171 &udp_mask->hdr.dgram_cksum,
1172 sizeof(udp_mask->hdr.dgram_cksum));
1175 /* Add number of UDP header elements */
1176 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1178 /* Set the udp header bitmap and computed l4 header bitmaps */
1179 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1180 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1181 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1182 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1184 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1185 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1186 /* Update the field protocol hdr bitmap */
1187 ulp_rte_l4_proto_type_update(params, dst_port);
1189 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1190 return BNXT_TF_RC_SUCCESS;
1193 /* Function to handle the parsing of RTE Flow item TCP Header. */
1195 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1196 struct ulp_rte_parser_params *params)
1198 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1199 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1200 struct ulp_rte_hdr_field *field;
1201 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1202 uint32_t idx = params->field_idx;
1206 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1208 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1209 return BNXT_TF_RC_ERROR;
1213 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1217 size = sizeof(tcp_spec->hdr.src_port);
1218 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1219 &tcp_spec->hdr.src_port,
1221 size = sizeof(tcp_spec->hdr.dst_port);
1222 field = ulp_rte_parser_fld_copy(field,
1223 &tcp_spec->hdr.dst_port,
1225 size = sizeof(tcp_spec->hdr.sent_seq);
1226 field = ulp_rte_parser_fld_copy(field,
1227 &tcp_spec->hdr.sent_seq,
1229 size = sizeof(tcp_spec->hdr.recv_ack);
1230 field = ulp_rte_parser_fld_copy(field,
1231 &tcp_spec->hdr.recv_ack,
1233 size = sizeof(tcp_spec->hdr.data_off);
1234 field = ulp_rte_parser_fld_copy(field,
1235 &tcp_spec->hdr.data_off,
1237 size = sizeof(tcp_spec->hdr.tcp_flags);
1238 field = ulp_rte_parser_fld_copy(field,
1239 &tcp_spec->hdr.tcp_flags,
1241 size = sizeof(tcp_spec->hdr.rx_win);
1242 field = ulp_rte_parser_fld_copy(field,
1243 &tcp_spec->hdr.rx_win,
1245 size = sizeof(tcp_spec->hdr.cksum);
1246 field = ulp_rte_parser_fld_copy(field,
1247 &tcp_spec->hdr.cksum,
1249 size = sizeof(tcp_spec->hdr.tcp_urp);
1250 field = ulp_rte_parser_fld_copy(field,
1251 &tcp_spec->hdr.tcp_urp,
1254 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1258 ulp_rte_prsr_mask_copy(params, &idx,
1259 &tcp_mask->hdr.src_port,
1260 sizeof(tcp_mask->hdr.src_port));
1261 ulp_rte_prsr_mask_copy(params, &idx,
1262 &tcp_mask->hdr.dst_port,
1263 sizeof(tcp_mask->hdr.dst_port));
1264 ulp_rte_prsr_mask_copy(params, &idx,
1265 &tcp_mask->hdr.sent_seq,
1266 sizeof(tcp_mask->hdr.sent_seq));
1267 ulp_rte_prsr_mask_copy(params, &idx,
1268 &tcp_mask->hdr.recv_ack,
1269 sizeof(tcp_mask->hdr.recv_ack));
1270 ulp_rte_prsr_mask_copy(params, &idx,
1271 &tcp_mask->hdr.data_off,
1272 sizeof(tcp_mask->hdr.data_off));
1273 ulp_rte_prsr_mask_copy(params, &idx,
1274 &tcp_mask->hdr.tcp_flags,
1275 sizeof(tcp_mask->hdr.tcp_flags));
1276 ulp_rte_prsr_mask_copy(params, &idx,
1277 &tcp_mask->hdr.rx_win,
1278 sizeof(tcp_mask->hdr.rx_win));
1279 ulp_rte_prsr_mask_copy(params, &idx,
1280 &tcp_mask->hdr.cksum,
1281 sizeof(tcp_mask->hdr.cksum));
1282 ulp_rte_prsr_mask_copy(params, &idx,
1283 &tcp_mask->hdr.tcp_urp,
1284 sizeof(tcp_mask->hdr.tcp_urp));
1286 /* add number of TCP header elements */
1287 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1289 /* Set the udp header bitmap and computed l4 header bitmaps */
1290 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1291 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1292 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1295 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1296 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1299 return BNXT_TF_RC_SUCCESS;
1302 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1304 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1305 struct ulp_rte_parser_params *params)
1307 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1308 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1309 struct ulp_rte_hdr_field *field;
1310 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1311 uint32_t idx = params->field_idx;
1315 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1319 size = sizeof(vxlan_spec->flags);
1320 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1323 size = sizeof(vxlan_spec->rsvd0);
1324 field = ulp_rte_parser_fld_copy(field,
1327 size = sizeof(vxlan_spec->vni);
1328 field = ulp_rte_parser_fld_copy(field,
1331 size = sizeof(vxlan_spec->rsvd1);
1332 field = ulp_rte_parser_fld_copy(field,
1337 ulp_rte_prsr_mask_copy(params, &idx,
1339 sizeof(vxlan_mask->flags));
1340 ulp_rte_prsr_mask_copy(params, &idx,
1342 sizeof(vxlan_mask->rsvd0));
1343 ulp_rte_prsr_mask_copy(params, &idx,
1345 sizeof(vxlan_mask->vni));
1346 ulp_rte_prsr_mask_copy(params, &idx,
1348 sizeof(vxlan_mask->rsvd1));
1350 /* Add number of vxlan header elements */
1351 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1353 /* Update the hdr_bitmap with vxlan */
1354 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1355 return BNXT_TF_RC_SUCCESS;
1358 /* Function to handle the parsing of RTE Flow item void Header */
1360 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1361 struct ulp_rte_parser_params *params __rte_unused)
1363 return BNXT_TF_RC_SUCCESS;
1366 /* Function to handle the parsing of RTE Flow action void Header. */
1368 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1369 struct ulp_rte_parser_params *params __rte_unused)
1371 return BNXT_TF_RC_SUCCESS;
1374 /* Function to handle the parsing of RTE Flow action Mark Header. */
1376 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1377 struct ulp_rte_parser_params *param)
1379 const struct rte_flow_action_mark *mark;
1380 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1383 mark = action_item->conf;
1385 mark_id = tfp_cpu_to_be_32(mark->id);
1386 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1387 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1389 /* Update the hdr_bitmap with vxlan */
1390 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1391 return BNXT_TF_RC_SUCCESS;
1393 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1394 return BNXT_TF_RC_ERROR;
1397 /* Function to handle the parsing of RTE Flow action RSS Header. */
1399 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1400 struct ulp_rte_parser_params *param)
1402 const struct rte_flow_action_rss *rss = action_item->conf;
1405 /* Update the hdr_bitmap with vxlan */
1406 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1407 return BNXT_TF_RC_SUCCESS;
1409 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1410 return BNXT_TF_RC_ERROR;
1413 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1415 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1416 struct ulp_rte_parser_params *params)
1418 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1419 const struct rte_flow_item *item;
1420 const struct rte_flow_item_eth *eth_spec;
1421 const struct rte_flow_item_ipv4 *ipv4_spec;
1422 const struct rte_flow_item_ipv6 *ipv6_spec;
1423 struct rte_flow_item_vxlan vxlan_spec;
1424 uint32_t vlan_num = 0, vlan_size = 0;
1425 uint32_t ip_size = 0, ip_type = 0;
1426 uint32_t vxlan_size = 0;
1428 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1429 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1431 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1432 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1434 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1435 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1436 const uint8_t *tmp_buff;
1438 vxlan_encap = action_item->conf;
1440 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1441 return BNXT_TF_RC_ERROR;
1444 item = vxlan_encap->definition;
1446 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1447 return BNXT_TF_RC_ERROR;
1450 if (!ulp_rte_item_skip_void(&item, 0))
1451 return BNXT_TF_RC_ERROR;
1453 /* must have ethernet header */
1454 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1455 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1456 return BNXT_TF_RC_ERROR;
1458 eth_spec = item->spec;
1459 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1460 ulp_encap_buffer_copy(buff,
1461 eth_spec->dst.addr_bytes,
1462 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1463 ULP_BUFFER_ALIGN_8_BYTE);
1465 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1466 ulp_encap_buffer_copy(buff,
1467 eth_spec->src.addr_bytes,
1468 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1469 ULP_BUFFER_ALIGN_8_BYTE);
1471 /* Goto the next item */
1472 if (!ulp_rte_item_skip_void(&item, 1))
1473 return BNXT_TF_RC_ERROR;
1475 /* May have vlan header */
1476 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1478 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1479 ulp_encap_buffer_copy(buff,
1481 sizeof(struct rte_flow_item_vlan),
1482 ULP_BUFFER_ALIGN_8_BYTE);
1484 if (!ulp_rte_item_skip_void(&item, 1))
1485 return BNXT_TF_RC_ERROR;
1488 /* may have two vlan headers */
1489 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1491 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1492 sizeof(struct rte_flow_item_vlan)],
1494 sizeof(struct rte_flow_item_vlan));
1495 if (!ulp_rte_item_skip_void(&item, 1))
1496 return BNXT_TF_RC_ERROR;
1498 /* Update the vlan count and size of more than one */
1500 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1501 vlan_num = tfp_cpu_to_be_32(vlan_num);
1502 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1505 vlan_size = tfp_cpu_to_be_32(vlan_size);
1506 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1511 /* L3 must be IPv4, IPv6 */
1512 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1513 ipv4_spec = item->spec;
1514 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1516 /* copy the ipv4 details */
1517 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1518 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1519 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1520 ulp_encap_buffer_copy(buff,
1522 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1523 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1524 ULP_BUFFER_ALIGN_8_BYTE);
1526 /* Total length being ignored in the ip hdr. */
1527 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1528 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1529 ulp_encap_buffer_copy(buff,
1531 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1532 ULP_BUFFER_ALIGN_8_BYTE);
1533 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1534 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1535 ulp_encap_buffer_copy(buff,
1536 &ipv4_spec->hdr.version_ihl,
1537 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1538 ULP_BUFFER_ALIGN_8_BYTE);
1541 /* Update the dst ip address in ip encap buffer */
1542 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1543 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1544 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1545 ulp_encap_buffer_copy(buff,
1546 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1547 sizeof(ipv4_spec->hdr.dst_addr),
1548 ULP_BUFFER_ALIGN_8_BYTE);
1550 /* Update the src ip address */
1551 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1552 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1553 sizeof(ipv4_spec->hdr.src_addr)];
1554 ulp_encap_buffer_copy(buff,
1555 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1556 sizeof(ipv4_spec->hdr.src_addr),
1557 ULP_BUFFER_ALIGN_8_BYTE);
1559 /* Update the ip size details */
1560 ip_size = tfp_cpu_to_be_32(ip_size);
1561 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1562 &ip_size, sizeof(uint32_t));
1564 /* update the ip type */
1565 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1566 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1567 &ip_type, sizeof(uint32_t));
1569 /* update the computed field to notify it is ipv4 header */
1570 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1573 if (!ulp_rte_item_skip_void(&item, 1))
1574 return BNXT_TF_RC_ERROR;
1575 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1576 ipv6_spec = item->spec;
1577 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1579 /* copy the ipv6 details */
1580 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1581 if (ulp_buffer_is_empty(tmp_buff,
1582 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1583 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1584 ulp_encap_buffer_copy(buff,
1586 sizeof(def_ipv6_hdr),
1587 ULP_BUFFER_ALIGN_8_BYTE);
1589 /* The payload length being ignored in the ip hdr. */
1590 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1591 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1592 ulp_encap_buffer_copy(buff,
1594 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1595 ULP_BUFFER_ALIGN_8_BYTE);
1596 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1597 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1598 BNXT_ULP_ENCAP_IPV6_DO];
1599 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1600 ulp_encap_buffer_copy(buff,
1602 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1603 ULP_BUFFER_ALIGN_8_BYTE);
1605 /* Update the dst ip address in ip encap buffer */
1606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1607 sizeof(def_ipv6_hdr)];
1608 ulp_encap_buffer_copy(buff,
1609 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1610 sizeof(ipv6_spec->hdr.dst_addr),
1611 ULP_BUFFER_ALIGN_8_BYTE);
1613 /* Update the src ip address */
1614 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1615 ulp_encap_buffer_copy(buff,
1616 (const uint8_t *)ipv6_spec->hdr.src_addr,
1617 sizeof(ipv6_spec->hdr.src_addr),
1618 ULP_BUFFER_ALIGN_16_BYTE);
1620 /* Update the ip size details */
1621 ip_size = tfp_cpu_to_be_32(ip_size);
1622 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1623 &ip_size, sizeof(uint32_t));
1625 /* update the ip type */
1626 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1627 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1628 &ip_type, sizeof(uint32_t));
1630 /* update the computed field to notify it is ipv6 header */
1631 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1634 if (!ulp_rte_item_skip_void(&item, 1))
1635 return BNXT_TF_RC_ERROR;
1637 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1638 return BNXT_TF_RC_ERROR;
1642 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1643 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1644 return BNXT_TF_RC_ERROR;
1646 /* copy the udp details */
1647 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1648 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1649 ULP_BUFFER_ALIGN_8_BYTE);
1651 if (!ulp_rte_item_skip_void(&item, 1))
1652 return BNXT_TF_RC_ERROR;
1655 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1656 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1657 return BNXT_TF_RC_ERROR;
1659 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1660 /* copy the vxlan details */
1661 memcpy(&vxlan_spec, item->spec, vxlan_size);
1662 vxlan_spec.flags = 0x08;
1663 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1664 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1665 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1666 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1668 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1669 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1670 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1671 (const uint8_t *)&vxlan_spec.vni,
1672 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1674 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1675 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1676 &vxlan_size, sizeof(uint32_t));
1678 /* update the hdr_bitmap with vxlan */
1679 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1680 return BNXT_TF_RC_SUCCESS;
1683 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1685 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1687 struct ulp_rte_parser_params *params)
1689 /* update the hdr_bitmap with vxlan */
1690 ULP_BITMAP_SET(params->act_bitmap.bits,
1691 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1692 return BNXT_TF_RC_SUCCESS;
1695 /* Function to handle the parsing of RTE Flow action drop Header. */
1697 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1698 struct ulp_rte_parser_params *params)
1700 /* Update the hdr_bitmap with drop */
1701 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1702 return BNXT_TF_RC_SUCCESS;
1705 /* Function to handle the parsing of RTE Flow action count. */
1707 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1708 struct ulp_rte_parser_params *params)
1711 const struct rte_flow_action_count *act_count;
1712 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1714 act_count = action_item->conf;
1716 if (act_count->shared) {
1718 "Parse Error:Shared count not supported\n");
1719 return BNXT_TF_RC_PARSE_ERR;
1721 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1723 BNXT_ULP_ACT_PROP_SZ_COUNT);
1726 /* Update the hdr_bitmap with count */
1727 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1728 return BNXT_TF_RC_SUCCESS;
1731 /* Function to handle the parsing of action ports. */
1733 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1736 enum bnxt_ulp_direction_type dir;
1739 struct ulp_rte_act_prop *act = ¶m->act_prop;
1740 enum bnxt_ulp_intf_type port_type;
1743 /* Get the direction */
1744 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1745 if (dir == BNXT_ULP_DIR_EGRESS) {
1746 /* For egress direction, fill vport */
1747 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1748 return BNXT_TF_RC_ERROR;
1751 pid = rte_cpu_to_be_32(pid);
1752 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1753 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1755 /* For ingress direction, fill vnic */
1756 port_type = ULP_COMP_FLD_IDX_RD(param,
1757 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1758 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1759 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1761 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1763 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1765 return BNXT_TF_RC_ERROR;
1768 pid = rte_cpu_to_be_32(pid);
1769 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1770 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1773 /* Update the action port set bit */
1774 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1775 return BNXT_TF_RC_SUCCESS;
1778 /* Function to handle the parsing of RTE Flow action PF. */
1780 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1781 struct ulp_rte_parser_params *params)
1785 enum bnxt_ulp_intf_type intf_type;
1787 /* Get the port id of the current device */
1788 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1790 /* Get the port db ifindex */
1791 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1793 BNXT_TF_DBG(ERR, "Invalid port id\n");
1794 return BNXT_TF_RC_ERROR;
1797 /* Check the port is PF port */
1798 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1799 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1800 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1801 return BNXT_TF_RC_ERROR;
1803 /* Update the action properties */
1804 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1805 return ulp_rte_parser_act_port_set(params, ifindex);
1808 /* Function to handle the parsing of RTE Flow action VF. */
1810 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1811 struct ulp_rte_parser_params *params)
1813 const struct rte_flow_action_vf *vf_action;
1815 enum bnxt_ulp_intf_type intf_type;
1817 vf_action = action_item->conf;
1819 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1820 return BNXT_TF_RC_PARSE_ERR;
1823 if (vf_action->original) {
1824 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1825 return BNXT_TF_RC_PARSE_ERR;
1828 /* Check the port is VF port */
1829 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1831 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1832 return BNXT_TF_RC_ERROR;
1834 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1835 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1836 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1837 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1838 return BNXT_TF_RC_ERROR;
1841 /* Update the action properties */
1842 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1843 return ulp_rte_parser_act_port_set(params, ifindex);
1846 /* Function to handle the parsing of RTE Flow action port_id. */
1848 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1849 struct ulp_rte_parser_params *param)
1851 const struct rte_flow_action_port_id *port_id = act_item->conf;
1853 enum bnxt_ulp_intf_type intf_type;
1857 "ParseErr: Invalid Argument\n");
1858 return BNXT_TF_RC_PARSE_ERR;
1860 if (port_id->original) {
1862 "ParseErr:Portid Original not supported\n");
1863 return BNXT_TF_RC_PARSE_ERR;
1866 /* Get the port db ifindex */
1867 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1869 BNXT_TF_DBG(ERR, "Invalid port id\n");
1870 return BNXT_TF_RC_ERROR;
1873 /* Get the intf type */
1874 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1876 BNXT_TF_DBG(ERR, "Invalid port type\n");
1877 return BNXT_TF_RC_ERROR;
1880 /* Set the action port */
1881 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1882 return ulp_rte_parser_act_port_set(param, ifindex);
1885 /* Function to handle the parsing of RTE Flow action phy_port. */
1887 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1888 struct ulp_rte_parser_params *prm)
1890 const struct rte_flow_action_phy_port *phy_port;
1894 enum bnxt_ulp_direction_type dir;
1896 phy_port = action_item->conf;
1899 "ParseErr: Invalid Argument\n");
1900 return BNXT_TF_RC_PARSE_ERR;
1903 if (phy_port->original) {
1905 "Parse Err:Port Original not supported\n");
1906 return BNXT_TF_RC_PARSE_ERR;
1908 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1909 if (dir != BNXT_ULP_DIR_EGRESS) {
1911 "Parse Err:Phy ports are valid only for egress\n");
1912 return BNXT_TF_RC_PARSE_ERR;
1914 /* Get the physical port details from port db */
1915 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1918 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1923 pid = rte_cpu_to_be_32(pid);
1924 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1925 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1927 /* Update the action port set bit */
1928 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1929 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1930 BNXT_ULP_INTF_TYPE_PHY_PORT);
1931 return BNXT_TF_RC_SUCCESS;
1934 /* Function to handle the parsing of RTE Flow action pop vlan. */
1936 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1937 struct ulp_rte_parser_params *params)
1939 /* Update the act_bitmap with pop */
1940 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1941 return BNXT_TF_RC_SUCCESS;
1944 /* Function to handle the parsing of RTE Flow action push vlan. */
1946 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1947 struct ulp_rte_parser_params *params)
1949 const struct rte_flow_action_of_push_vlan *push_vlan;
1951 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1953 push_vlan = action_item->conf;
1955 ethertype = push_vlan->ethertype;
1956 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1958 "Parse Err: Ethertype not supported\n");
1959 return BNXT_TF_RC_PARSE_ERR;
1961 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1962 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1963 /* Update the hdr_bitmap with push vlan */
1964 ULP_BITMAP_SET(params->act_bitmap.bits,
1965 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1966 return BNXT_TF_RC_SUCCESS;
1968 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1969 return BNXT_TF_RC_ERROR;
1972 /* Function to handle the parsing of RTE Flow action set vlan id. */
1974 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1975 struct ulp_rte_parser_params *params)
1977 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1979 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1981 vlan_vid = action_item->conf;
1982 if (vlan_vid && vlan_vid->vlan_vid) {
1983 vid = vlan_vid->vlan_vid;
1984 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1985 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1986 /* Update the hdr_bitmap with vlan vid */
1987 ULP_BITMAP_SET(params->act_bitmap.bits,
1988 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1989 return BNXT_TF_RC_SUCCESS;
1991 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1992 return BNXT_TF_RC_ERROR;
1995 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1997 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1998 struct ulp_rte_parser_params *params)
2000 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2002 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2004 vlan_pcp = action_item->conf;
2006 pcp = vlan_pcp->vlan_pcp;
2007 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2008 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2009 /* Update the hdr_bitmap with vlan vid */
2010 ULP_BITMAP_SET(params->act_bitmap.bits,
2011 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2012 return BNXT_TF_RC_SUCCESS;
2014 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2015 return BNXT_TF_RC_ERROR;
2018 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2020 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2021 struct ulp_rte_parser_params *params)
2023 const struct rte_flow_action_set_ipv4 *set_ipv4;
2024 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2026 set_ipv4 = action_item->conf;
2028 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2029 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2030 /* Update the hdr_bitmap with set ipv4 src */
2031 ULP_BITMAP_SET(params->act_bitmap.bits,
2032 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2033 return BNXT_TF_RC_SUCCESS;
2035 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2036 return BNXT_TF_RC_ERROR;
2039 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2041 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2042 struct ulp_rte_parser_params *params)
2044 const struct rte_flow_action_set_ipv4 *set_ipv4;
2045 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2047 set_ipv4 = action_item->conf;
2049 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2050 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2051 /* Update the hdr_bitmap with set ipv4 dst */
2052 ULP_BITMAP_SET(params->act_bitmap.bits,
2053 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2054 return BNXT_TF_RC_SUCCESS;
2056 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2057 return BNXT_TF_RC_ERROR;
2060 /* Function to handle the parsing of RTE Flow action set tp src.*/
2062 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2063 struct ulp_rte_parser_params *params)
2065 const struct rte_flow_action_set_tp *set_tp;
2066 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2068 set_tp = action_item->conf;
2070 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2071 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2072 /* Update the hdr_bitmap with set tp src */
2073 ULP_BITMAP_SET(params->act_bitmap.bits,
2074 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2075 return BNXT_TF_RC_SUCCESS;
2078 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2079 return BNXT_TF_RC_ERROR;
2082 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2084 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2085 struct ulp_rte_parser_params *params)
2087 const struct rte_flow_action_set_tp *set_tp;
2088 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2090 set_tp = action_item->conf;
2092 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2093 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2094 /* Update the hdr_bitmap with set tp dst */
2095 ULP_BITMAP_SET(params->act_bitmap.bits,
2096 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2097 return BNXT_TF_RC_SUCCESS;
2100 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2101 return BNXT_TF_RC_ERROR;
2104 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2106 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2107 struct ulp_rte_parser_params *params)
2109 /* Update the act_bitmap with dec ttl */
2110 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2111 return BNXT_TF_RC_SUCCESS;