1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
20 /* Utility function to skip the void items. */
22 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
28 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35 /* Utility function to update the field_bitmap */
37 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
40 struct ulp_rte_hdr_field *field;
42 field = ¶ms->hdr_field[idx];
43 if (ulp_bitmap_notzero(field->mask, field->size)) {
44 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
46 if (!ulp_bitmap_is_ones(field->mask, field->size))
47 ULP_BITMAP_SET(params->fld_bitmap.bits,
48 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
50 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
54 /* Utility function to copy field spec items */
55 static struct ulp_rte_hdr_field *
56 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
61 memcpy(field->spec, buffer, field->size);
66 /* Utility function to copy field masks items */
68 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
73 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
75 memcpy(field->mask, buffer, size);
76 ulp_rte_parser_field_bitmap_update(params, *idx);
81 * Function to handle the parsing of RTE Flows and placing
82 * the RTE flow items into the ulp structures.
85 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
86 struct ulp_rte_parser_params *params)
88 const struct rte_flow_item *item = pattern;
89 struct bnxt_ulp_rte_hdr_info *hdr_info;
91 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
93 /* Set the computed flags for no vlan tags before parsing */
94 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
97 /* Parse all the items in the pattern */
98 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
99 /* get the header information from the flow_hdr_info table */
100 hdr_info = &ulp_hdr_info[item->type];
101 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
103 "Truflow parser does not support type %d\n",
105 return BNXT_TF_RC_PARSE_ERR;
106 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
107 /* call the registered callback handler */
108 if (hdr_info->proto_hdr_func) {
109 if (hdr_info->proto_hdr_func(item, params) !=
110 BNXT_TF_RC_SUCCESS) {
111 return BNXT_TF_RC_ERROR;
117 /* update the implied SVIF */
118 return ulp_rte_parser_implicit_match_port_process(params);
122 * Function to handle the parsing of RTE Flows and placing
123 * the RTE flow actions into the ulp structures.
126 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
127 struct ulp_rte_parser_params *params)
129 const struct rte_flow_action *action_item = actions;
130 struct bnxt_ulp_rte_act_info *hdr_info;
132 /* Parse all the items in the pattern */
133 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
134 /* get the header information from the flow_hdr_info table */
135 hdr_info = &ulp_act_info[action_item->type];
136 if (hdr_info->act_type ==
137 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
139 "Truflow parser does not support act %u\n",
141 return BNXT_TF_RC_ERROR;
142 } else if (hdr_info->act_type ==
143 BNXT_ULP_ACT_TYPE_SUPPORTED) {
144 /* call the registered callback handler */
145 if (hdr_info->proto_act_func) {
146 if (hdr_info->proto_act_func(action_item,
148 BNXT_TF_RC_SUCCESS) {
149 return BNXT_TF_RC_ERROR;
155 /* update the implied port details */
156 ulp_rte_parser_implicit_act_port_process(params);
157 return BNXT_TF_RC_SUCCESS;
161 * Function to handle the post processing of the parsing details
164 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
166 enum bnxt_ulp_direction_type dir;
167 enum bnxt_ulp_intf_type match_port_type, act_port_type;
168 uint32_t act_port_set;
170 /* Get the computed details */
171 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
172 match_port_type = ULP_COMP_FLD_IDX_RD(params,
173 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
174 act_port_type = ULP_COMP_FLD_IDX_RD(params,
175 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
176 act_port_set = ULP_COMP_FLD_IDX_RD(params,
177 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
179 /* set the flow direction in the proto and action header */
180 if (dir == BNXT_ULP_DIR_EGRESS) {
181 ULP_BITMAP_SET(params->hdr_bitmap.bits,
182 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
183 ULP_BITMAP_SET(params->act_bitmap.bits,
184 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
187 /* calculate the VF to VF flag */
188 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
189 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
190 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
192 /* Update the decrement ttl computational fields */
193 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
194 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
196 * Check that vxlan proto is included and vxlan decap
197 * action is not set then decrement tunnel ttl.
198 * Similarly add GRE and NVGRE in future.
200 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
201 BNXT_ULP_HDR_BIT_T_VXLAN) &&
202 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
203 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
204 ULP_COMP_FLD_IDX_WR(params,
205 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
207 ULP_COMP_FLD_IDX_WR(params,
208 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
212 /* TBD: Handle the flow rejection scenarios */
217 * Function to compute the flow direction based on the match port details
220 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
222 enum bnxt_ulp_intf_type match_port_type;
224 /* Get the match port type */
225 match_port_type = ULP_COMP_FLD_IDX_RD(params,
226 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
228 /* If ingress flow and matchport is vf rep then dir is egress*/
229 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
230 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
231 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
232 BNXT_ULP_DIR_EGRESS);
234 /* Assign the input direction */
235 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
236 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
237 BNXT_ULP_DIR_INGRESS);
239 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
240 BNXT_ULP_DIR_EGRESS);
244 /* Function to handle the parsing of RTE Flow item PF Header. */
246 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
251 enum bnxt_ulp_direction_type dir;
252 struct ulp_rte_hdr_field *hdr_field;
253 enum bnxt_ulp_svif_type svif_type;
254 enum bnxt_ulp_intf_type port_type;
256 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
257 BNXT_ULP_INVALID_SVIF_VAL) {
259 "SVIF already set,multiple source not support'd\n");
260 return BNXT_TF_RC_ERROR;
263 /* Get port type details */
264 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
265 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
266 BNXT_TF_DBG(ERR, "Invalid port type\n");
267 return BNXT_TF_RC_ERROR;
270 /* Update the match port type */
271 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
273 /* compute the direction */
274 bnxt_ulp_rte_parser_direction_compute(params);
276 /* Get the computed direction */
277 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
278 if (dir == BNXT_ULP_DIR_INGRESS) {
279 svif_type = BNXT_ULP_PHY_PORT_SVIF;
281 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
282 svif_type = BNXT_ULP_VF_FUNC_SVIF;
284 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
286 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
288 svif = rte_cpu_to_be_16(svif);
289 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
290 memcpy(hdr_field->spec, &svif, sizeof(svif));
291 memcpy(hdr_field->mask, &mask, sizeof(mask));
292 hdr_field->size = sizeof(svif);
293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
294 rte_be_to_cpu_16(svif));
295 return BNXT_TF_RC_SUCCESS;
298 /* Function to handle the parsing of the RTE port id */
300 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
302 uint16_t port_id = 0;
303 uint16_t svif_mask = 0xFFFF;
305 int32_t rc = BNXT_TF_RC_ERROR;
307 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
308 BNXT_ULP_INVALID_SVIF_VAL)
309 return BNXT_TF_RC_SUCCESS;
311 /* SVIF not set. So get the port id */
312 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
314 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
317 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
321 /* Update the SVIF details */
322 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
326 /* Function to handle the implicit action port id */
328 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
330 struct rte_flow_action action_item = {0};
331 struct rte_flow_action_port_id port_id = {0};
333 /* Read the action port set bit */
334 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
335 /* Already set, so just exit */
336 return BNXT_TF_RC_SUCCESS;
338 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
339 action_item.conf = &port_id;
341 /* Update the action port based on incoming port */
342 ulp_rte_port_id_act_handler(&action_item, params);
344 /* Reset the action port set bit */
345 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
346 return BNXT_TF_RC_SUCCESS;
349 /* Function to handle the parsing of RTE Flow item PF Header. */
351 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
352 struct ulp_rte_parser_params *params)
354 uint16_t port_id = 0;
355 uint16_t svif_mask = 0xFFFF;
358 /* Get the implicit port id */
359 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
361 /* perform the conversion from dpdk port to bnxt ifindex */
362 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
365 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
366 return BNXT_TF_RC_ERROR;
369 /* Update the SVIF details */
370 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
373 /* Function to handle the parsing of RTE Flow item VF Header. */
375 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
376 struct ulp_rte_parser_params *params)
378 const struct rte_flow_item_vf *vf_spec = item->spec;
379 const struct rte_flow_item_vf *vf_mask = item->mask;
382 int32_t rc = BNXT_TF_RC_PARSE_ERR;
384 /* Get VF rte_flow_item for Port details */
386 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
390 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
395 /* perform the conversion from VF Func id to bnxt ifindex */
396 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
399 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
402 /* Update the SVIF details */
403 return ulp_rte_parser_svif_set(params, ifindex, mask);
406 /* Function to handle the parsing of RTE Flow item port id Header. */
408 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
409 struct ulp_rte_parser_params *params)
411 const struct rte_flow_item_port_id *port_spec = item->spec;
412 const struct rte_flow_item_port_id *port_mask = item->mask;
414 int32_t rc = BNXT_TF_RC_PARSE_ERR;
418 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
422 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
425 mask = port_mask->id;
427 /* perform the conversion from dpdk port to bnxt ifindex */
428 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
431 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
434 /* Update the SVIF details */
435 return ulp_rte_parser_svif_set(params, ifindex, mask);
438 /* Function to handle the parsing of RTE Flow item phy port Header. */
440 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
441 struct ulp_rte_parser_params *params)
443 const struct rte_flow_item_phy_port *port_spec = item->spec;
444 const struct rte_flow_item_phy_port *port_mask = item->mask;
446 int32_t rc = BNXT_TF_RC_ERROR;
448 enum bnxt_ulp_direction_type dir;
449 struct ulp_rte_hdr_field *hdr_field;
451 /* Copy the rte_flow_item for phy port into hdr_field */
453 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
457 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
460 mask = port_mask->index;
462 /* Update the match port type */
463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
464 BNXT_ULP_INTF_TYPE_PHY_PORT);
466 /* Compute the Hw direction */
467 bnxt_ulp_rte_parser_direction_compute(params);
469 /* Direction validation */
470 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
471 if (dir == BNXT_ULP_DIR_EGRESS) {
473 "Parse Err:Phy ports are valid only for ingress\n");
474 return BNXT_TF_RC_PARSE_ERR;
477 /* Get the physical port details from port db */
478 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
481 BNXT_TF_DBG(ERR, "Failed to get port details\n");
482 return BNXT_TF_RC_PARSE_ERR;
485 /* Update the SVIF details */
486 svif = rte_cpu_to_be_16(svif);
487 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
488 memcpy(hdr_field->spec, &svif, sizeof(svif));
489 memcpy(hdr_field->mask, &mask, sizeof(mask));
490 hdr_field->size = sizeof(svif);
491 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
492 rte_be_to_cpu_16(svif));
493 return BNXT_TF_RC_SUCCESS;
496 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
498 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
499 struct ulp_rte_parser_params *params)
501 const struct rte_flow_item_eth *eth_spec = item->spec;
502 const struct rte_flow_item_eth *eth_mask = item->mask;
503 struct ulp_rte_hdr_field *field;
504 uint32_t idx = params->field_idx;
505 uint64_t set_flag = 0;
509 * Copy the rte_flow_item for eth into hdr_field using ethernet
513 size = sizeof(eth_spec->dst.addr_bytes);
514 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
515 eth_spec->dst.addr_bytes,
517 size = sizeof(eth_spec->src.addr_bytes);
518 field = ulp_rte_parser_fld_copy(field,
519 eth_spec->src.addr_bytes,
521 field = ulp_rte_parser_fld_copy(field,
523 sizeof(eth_spec->type));
526 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
527 sizeof(eth_mask->dst.addr_bytes));
528 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
529 sizeof(eth_mask->src.addr_bytes));
530 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
531 sizeof(eth_mask->type));
533 /* Add number of vlan header elements */
534 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
535 params->vlan_idx = params->field_idx;
536 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
538 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
539 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
540 BNXT_ULP_HDR_BIT_O_ETH);
542 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
544 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
545 BNXT_ULP_HDR_BIT_I_ETH);
547 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
548 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
550 return BNXT_TF_RC_SUCCESS;
553 /* Function to handle the parsing of RTE Flow item Vlan Header. */
555 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
556 struct ulp_rte_parser_params *params)
558 const struct rte_flow_item_vlan *vlan_spec = item->spec;
559 const struct rte_flow_item_vlan *vlan_mask = item->mask;
560 struct ulp_rte_hdr_field *field;
561 struct ulp_rte_hdr_bitmap *hdr_bit;
562 uint32_t idx = params->vlan_idx;
563 uint16_t vlan_tag, priority;
564 uint32_t outer_vtag_num;
565 uint32_t inner_vtag_num;
568 * Copy the rte_flow_item for vlan into hdr_field using Vlan
572 vlan_tag = ntohs(vlan_spec->tci);
573 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
574 vlan_tag &= ULP_VLAN_TAG_MASK;
575 vlan_tag = htons(vlan_tag);
577 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
580 field = ulp_rte_parser_fld_copy(field,
583 field = ulp_rte_parser_fld_copy(field,
584 &vlan_spec->inner_type,
585 sizeof(vlan_spec->inner_type));
589 vlan_tag = ntohs(vlan_mask->tci);
590 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
594 * the storage for priority and vlan tag is 2 bytes
595 * The mask of priority which is 3 bits if it is all 1's
596 * then make the rest bits 13 bits as 1's
597 * so that it is matched as exact match.
599 if (priority == ULP_VLAN_PRIORITY_MASK)
600 priority |= ~ULP_VLAN_PRIORITY_MASK;
601 if (vlan_tag == ULP_VLAN_TAG_MASK)
602 vlan_tag |= ~ULP_VLAN_TAG_MASK;
603 vlan_tag = htons(vlan_tag);
605 ulp_rte_prsr_mask_copy(params, &idx, &priority,
607 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
609 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
610 sizeof(vlan_mask->inner_type));
612 /* Set the vlan index to new incremented value */
613 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
615 /* Get the outer tag and inner tag counts */
616 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
617 BNXT_ULP_CF_IDX_O_VTAG_NUM);
618 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
619 BNXT_ULP_CF_IDX_I_VTAG_NUM);
621 /* Update the hdr_bitmap of the vlans */
622 hdr_bit = ¶ms->hdr_bitmap;
623 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
624 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
626 /* Update the vlan tag num */
628 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
630 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
631 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
632 ULP_BITMAP_SET(params->hdr_bitmap.bits,
633 BNXT_ULP_HDR_BIT_OO_VLAN);
634 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
635 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
636 outer_vtag_num == 1) {
637 /* update the vlan tag num */
639 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
641 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
642 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
643 ULP_BITMAP_SET(params->hdr_bitmap.bits,
644 BNXT_ULP_HDR_BIT_OI_VLAN);
645 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
646 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
648 /* update the vlan tag num */
650 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
652 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
653 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
654 ULP_BITMAP_SET(params->hdr_bitmap.bits,
655 BNXT_ULP_HDR_BIT_IO_VLAN);
656 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
657 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
658 inner_vtag_num == 1) {
659 /* update the vlan tag num */
661 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
663 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
664 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
665 ULP_BITMAP_SET(params->hdr_bitmap.bits,
666 BNXT_ULP_HDR_BIT_II_VLAN);
668 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
669 return BNXT_TF_RC_ERROR;
671 return BNXT_TF_RC_SUCCESS;
674 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
676 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
677 struct ulp_rte_parser_params *params)
679 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
680 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
681 struct ulp_rte_hdr_field *field;
682 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
683 uint32_t idx = params->field_idx;
685 uint32_t inner_l3, outer_l3;
687 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
689 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
690 return BNXT_TF_RC_ERROR;
694 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
698 size = sizeof(ipv4_spec->hdr.version_ihl);
699 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
700 &ipv4_spec->hdr.version_ihl,
702 size = sizeof(ipv4_spec->hdr.type_of_service);
703 field = ulp_rte_parser_fld_copy(field,
704 &ipv4_spec->hdr.type_of_service,
706 size = sizeof(ipv4_spec->hdr.total_length);
707 field = ulp_rte_parser_fld_copy(field,
708 &ipv4_spec->hdr.total_length,
710 size = sizeof(ipv4_spec->hdr.packet_id);
711 field = ulp_rte_parser_fld_copy(field,
712 &ipv4_spec->hdr.packet_id,
714 size = sizeof(ipv4_spec->hdr.fragment_offset);
715 field = ulp_rte_parser_fld_copy(field,
716 &ipv4_spec->hdr.fragment_offset,
718 size = sizeof(ipv4_spec->hdr.time_to_live);
719 field = ulp_rte_parser_fld_copy(field,
720 &ipv4_spec->hdr.time_to_live,
722 size = sizeof(ipv4_spec->hdr.next_proto_id);
723 field = ulp_rte_parser_fld_copy(field,
724 &ipv4_spec->hdr.next_proto_id,
726 size = sizeof(ipv4_spec->hdr.hdr_checksum);
727 field = ulp_rte_parser_fld_copy(field,
728 &ipv4_spec->hdr.hdr_checksum,
730 size = sizeof(ipv4_spec->hdr.src_addr);
731 field = ulp_rte_parser_fld_copy(field,
732 &ipv4_spec->hdr.src_addr,
734 size = sizeof(ipv4_spec->hdr.dst_addr);
735 field = ulp_rte_parser_fld_copy(field,
736 &ipv4_spec->hdr.dst_addr,
740 ulp_rte_prsr_mask_copy(params, &idx,
741 &ipv4_mask->hdr.version_ihl,
742 sizeof(ipv4_mask->hdr.version_ihl));
743 #ifdef ULP_DONT_IGNORE_TOS
744 ulp_rte_prsr_mask_copy(params, &idx,
745 &ipv4_mask->hdr.type_of_service,
746 sizeof(ipv4_mask->hdr.type_of_service));
749 * The tos field is ignored since OVS is setting it as wild card
750 * match and it is not supported. This is a work around and
751 * shall be addressed in the future.
756 ulp_rte_prsr_mask_copy(params, &idx,
757 &ipv4_mask->hdr.total_length,
758 sizeof(ipv4_mask->hdr.total_length));
759 ulp_rte_prsr_mask_copy(params, &idx,
760 &ipv4_mask->hdr.packet_id,
761 sizeof(ipv4_mask->hdr.packet_id));
762 ulp_rte_prsr_mask_copy(params, &idx,
763 &ipv4_mask->hdr.fragment_offset,
764 sizeof(ipv4_mask->hdr.fragment_offset));
765 ulp_rte_prsr_mask_copy(params, &idx,
766 &ipv4_mask->hdr.time_to_live,
767 sizeof(ipv4_mask->hdr.time_to_live));
768 ulp_rte_prsr_mask_copy(params, &idx,
769 &ipv4_mask->hdr.next_proto_id,
770 sizeof(ipv4_mask->hdr.next_proto_id));
771 ulp_rte_prsr_mask_copy(params, &idx,
772 &ipv4_mask->hdr.hdr_checksum,
773 sizeof(ipv4_mask->hdr.hdr_checksum));
774 ulp_rte_prsr_mask_copy(params, &idx,
775 &ipv4_mask->hdr.src_addr,
776 sizeof(ipv4_mask->hdr.src_addr));
777 ulp_rte_prsr_mask_copy(params, &idx,
778 &ipv4_mask->hdr.dst_addr,
779 sizeof(ipv4_mask->hdr.dst_addr));
781 /* Add the number of ipv4 header elements */
782 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
784 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
785 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
787 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
788 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
789 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
791 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
793 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
795 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
797 return BNXT_TF_RC_SUCCESS;
800 /* Function to handle the parsing of RTE Flow item IPV6 Header */
802 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
803 struct ulp_rte_parser_params *params)
805 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
806 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
807 struct ulp_rte_hdr_field *field;
808 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
809 uint32_t idx = params->field_idx;
811 uint32_t inner_l3, outer_l3;
812 uint32_t vtcf, vtcf_mask;
814 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
816 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
817 return BNXT_TF_RC_ERROR;
821 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
825 size = sizeof(ipv6_spec->hdr.vtc_flow);
827 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
828 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
832 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
833 field = ulp_rte_parser_fld_copy(field,
837 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
838 field = ulp_rte_parser_fld_copy(field,
842 size = sizeof(ipv6_spec->hdr.payload_len);
843 field = ulp_rte_parser_fld_copy(field,
844 &ipv6_spec->hdr.payload_len,
846 size = sizeof(ipv6_spec->hdr.proto);
847 field = ulp_rte_parser_fld_copy(field,
848 &ipv6_spec->hdr.proto,
850 size = sizeof(ipv6_spec->hdr.hop_limits);
851 field = ulp_rte_parser_fld_copy(field,
852 &ipv6_spec->hdr.hop_limits,
854 size = sizeof(ipv6_spec->hdr.src_addr);
855 field = ulp_rte_parser_fld_copy(field,
856 &ipv6_spec->hdr.src_addr,
858 size = sizeof(ipv6_spec->hdr.dst_addr);
859 field = ulp_rte_parser_fld_copy(field,
860 &ipv6_spec->hdr.dst_addr,
864 size = sizeof(ipv6_mask->hdr.vtc_flow);
866 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
867 ulp_rte_prsr_mask_copy(params, &idx,
871 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
872 ulp_rte_prsr_mask_copy(params, &idx,
877 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
878 ulp_rte_prsr_mask_copy(params, &idx,
882 ulp_rte_prsr_mask_copy(params, &idx,
883 &ipv6_mask->hdr.payload_len,
884 sizeof(ipv6_mask->hdr.payload_len));
885 ulp_rte_prsr_mask_copy(params, &idx,
886 &ipv6_mask->hdr.proto,
887 sizeof(ipv6_mask->hdr.proto));
888 ulp_rte_prsr_mask_copy(params, &idx,
889 &ipv6_mask->hdr.hop_limits,
890 sizeof(ipv6_mask->hdr.hop_limits));
891 ulp_rte_prsr_mask_copy(params, &idx,
892 &ipv6_mask->hdr.src_addr,
893 sizeof(ipv6_mask->hdr.src_addr));
894 ulp_rte_prsr_mask_copy(params, &idx,
895 &ipv6_mask->hdr.dst_addr,
896 sizeof(ipv6_mask->hdr.dst_addr));
898 /* add number of ipv6 header elements */
899 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
901 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
902 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
904 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
905 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
906 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
907 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
909 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
910 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
912 return BNXT_TF_RC_SUCCESS;
915 /* Function to handle the parsing of RTE Flow item UDP Header. */
917 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
918 struct ulp_rte_parser_params *params)
920 const struct rte_flow_item_udp *udp_spec = item->spec;
921 const struct rte_flow_item_udp *udp_mask = item->mask;
922 struct ulp_rte_hdr_field *field;
923 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
924 uint32_t idx = params->field_idx;
926 uint32_t inner_l4, outer_l4;
928 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
930 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
931 return BNXT_TF_RC_ERROR;
935 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
939 size = sizeof(udp_spec->hdr.src_port);
940 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
941 &udp_spec->hdr.src_port,
943 size = sizeof(udp_spec->hdr.dst_port);
944 field = ulp_rte_parser_fld_copy(field,
945 &udp_spec->hdr.dst_port,
947 size = sizeof(udp_spec->hdr.dgram_len);
948 field = ulp_rte_parser_fld_copy(field,
949 &udp_spec->hdr.dgram_len,
951 size = sizeof(udp_spec->hdr.dgram_cksum);
952 field = ulp_rte_parser_fld_copy(field,
953 &udp_spec->hdr.dgram_cksum,
957 ulp_rte_prsr_mask_copy(params, &idx,
958 &udp_mask->hdr.src_port,
959 sizeof(udp_mask->hdr.src_port));
960 ulp_rte_prsr_mask_copy(params, &idx,
961 &udp_mask->hdr.dst_port,
962 sizeof(udp_mask->hdr.dst_port));
963 ulp_rte_prsr_mask_copy(params, &idx,
964 &udp_mask->hdr.dgram_len,
965 sizeof(udp_mask->hdr.dgram_len));
966 ulp_rte_prsr_mask_copy(params, &idx,
967 &udp_mask->hdr.dgram_cksum,
968 sizeof(udp_mask->hdr.dgram_cksum));
971 /* Add number of UDP header elements */
972 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
974 /* Set the udp header bitmap and computed l4 header bitmaps */
975 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
977 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
978 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
979 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
980 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
982 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
983 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
985 return BNXT_TF_RC_SUCCESS;
988 /* Function to handle the parsing of RTE Flow item TCP Header. */
990 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
991 struct ulp_rte_parser_params *params)
993 const struct rte_flow_item_tcp *tcp_spec = item->spec;
994 const struct rte_flow_item_tcp *tcp_mask = item->mask;
995 struct ulp_rte_hdr_field *field;
996 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
997 uint32_t idx = params->field_idx;
999 uint32_t inner_l4, outer_l4;
1001 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
1003 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
1004 return BNXT_TF_RC_ERROR;
1008 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1012 size = sizeof(tcp_spec->hdr.src_port);
1013 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1014 &tcp_spec->hdr.src_port,
1016 size = sizeof(tcp_spec->hdr.dst_port);
1017 field = ulp_rte_parser_fld_copy(field,
1018 &tcp_spec->hdr.dst_port,
1020 size = sizeof(tcp_spec->hdr.sent_seq);
1021 field = ulp_rte_parser_fld_copy(field,
1022 &tcp_spec->hdr.sent_seq,
1024 size = sizeof(tcp_spec->hdr.recv_ack);
1025 field = ulp_rte_parser_fld_copy(field,
1026 &tcp_spec->hdr.recv_ack,
1028 size = sizeof(tcp_spec->hdr.data_off);
1029 field = ulp_rte_parser_fld_copy(field,
1030 &tcp_spec->hdr.data_off,
1032 size = sizeof(tcp_spec->hdr.tcp_flags);
1033 field = ulp_rte_parser_fld_copy(field,
1034 &tcp_spec->hdr.tcp_flags,
1036 size = sizeof(tcp_spec->hdr.rx_win);
1037 field = ulp_rte_parser_fld_copy(field,
1038 &tcp_spec->hdr.rx_win,
1040 size = sizeof(tcp_spec->hdr.cksum);
1041 field = ulp_rte_parser_fld_copy(field,
1042 &tcp_spec->hdr.cksum,
1044 size = sizeof(tcp_spec->hdr.tcp_urp);
1045 field = ulp_rte_parser_fld_copy(field,
1046 &tcp_spec->hdr.tcp_urp,
1049 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1053 ulp_rte_prsr_mask_copy(params, &idx,
1054 &tcp_mask->hdr.src_port,
1055 sizeof(tcp_mask->hdr.src_port));
1056 ulp_rte_prsr_mask_copy(params, &idx,
1057 &tcp_mask->hdr.dst_port,
1058 sizeof(tcp_mask->hdr.dst_port));
1059 ulp_rte_prsr_mask_copy(params, &idx,
1060 &tcp_mask->hdr.sent_seq,
1061 sizeof(tcp_mask->hdr.sent_seq));
1062 ulp_rte_prsr_mask_copy(params, &idx,
1063 &tcp_mask->hdr.recv_ack,
1064 sizeof(tcp_mask->hdr.recv_ack));
1065 ulp_rte_prsr_mask_copy(params, &idx,
1066 &tcp_mask->hdr.data_off,
1067 sizeof(tcp_mask->hdr.data_off));
1068 ulp_rte_prsr_mask_copy(params, &idx,
1069 &tcp_mask->hdr.tcp_flags,
1070 sizeof(tcp_mask->hdr.tcp_flags));
1071 ulp_rte_prsr_mask_copy(params, &idx,
1072 &tcp_mask->hdr.rx_win,
1073 sizeof(tcp_mask->hdr.rx_win));
1074 ulp_rte_prsr_mask_copy(params, &idx,
1075 &tcp_mask->hdr.cksum,
1076 sizeof(tcp_mask->hdr.cksum));
1077 ulp_rte_prsr_mask_copy(params, &idx,
1078 &tcp_mask->hdr.tcp_urp,
1079 sizeof(tcp_mask->hdr.tcp_urp));
1081 /* add number of TCP header elements */
1082 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1084 /* Set the udp header bitmap and computed l4 header bitmaps */
1085 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
1087 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1088 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1089 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1092 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1093 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1095 return BNXT_TF_RC_SUCCESS;
1098 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1100 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1101 struct ulp_rte_parser_params *params)
1103 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1104 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1105 struct ulp_rte_hdr_field *field;
1106 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1107 uint32_t idx = params->field_idx;
1111 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1115 size = sizeof(vxlan_spec->flags);
1116 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1119 size = sizeof(vxlan_spec->rsvd0);
1120 field = ulp_rte_parser_fld_copy(field,
1123 size = sizeof(vxlan_spec->vni);
1124 field = ulp_rte_parser_fld_copy(field,
1127 size = sizeof(vxlan_spec->rsvd1);
1128 field = ulp_rte_parser_fld_copy(field,
1133 ulp_rte_prsr_mask_copy(params, &idx,
1135 sizeof(vxlan_mask->flags));
1136 ulp_rte_prsr_mask_copy(params, &idx,
1138 sizeof(vxlan_mask->rsvd0));
1139 ulp_rte_prsr_mask_copy(params, &idx,
1141 sizeof(vxlan_mask->vni));
1142 ulp_rte_prsr_mask_copy(params, &idx,
1144 sizeof(vxlan_mask->rsvd1));
1146 /* Add number of vxlan header elements */
1147 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1149 /* Update the hdr_bitmap with vxlan */
1150 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1151 return BNXT_TF_RC_SUCCESS;
1154 /* Function to handle the parsing of RTE Flow item void Header */
1156 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1157 struct ulp_rte_parser_params *params __rte_unused)
1159 return BNXT_TF_RC_SUCCESS;
1162 /* Function to handle the parsing of RTE Flow action void Header. */
1164 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1165 struct ulp_rte_parser_params *params __rte_unused)
1167 return BNXT_TF_RC_SUCCESS;
1170 /* Function to handle the parsing of RTE Flow action Mark Header. */
1172 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1173 struct ulp_rte_parser_params *param)
1175 const struct rte_flow_action_mark *mark;
1176 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1179 mark = action_item->conf;
1181 mark_id = tfp_cpu_to_be_32(mark->id);
1182 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1183 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1185 /* Update the hdr_bitmap with vxlan */
1186 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1187 return BNXT_TF_RC_SUCCESS;
1189 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1190 return BNXT_TF_RC_ERROR;
1193 /* Function to handle the parsing of RTE Flow action RSS Header. */
1195 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1196 struct ulp_rte_parser_params *param)
1198 const struct rte_flow_action_rss *rss = action_item->conf;
1201 /* Update the hdr_bitmap with vxlan */
1202 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1203 return BNXT_TF_RC_SUCCESS;
1205 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1206 return BNXT_TF_RC_ERROR;
1209 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1211 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1212 struct ulp_rte_parser_params *params)
1214 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1215 const struct rte_flow_item *item;
1216 const struct rte_flow_item_eth *eth_spec;
1217 const struct rte_flow_item_ipv4 *ipv4_spec;
1218 const struct rte_flow_item_ipv6 *ipv6_spec;
1219 struct rte_flow_item_vxlan vxlan_spec;
1220 uint32_t vlan_num = 0, vlan_size = 0;
1221 uint32_t ip_size = 0, ip_type = 0;
1222 uint32_t vxlan_size = 0;
1224 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1225 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1227 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1228 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1230 vxlan_encap = action_item->conf;
1232 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1233 return BNXT_TF_RC_ERROR;
1236 item = vxlan_encap->definition;
1238 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1239 return BNXT_TF_RC_ERROR;
1242 if (!ulp_rte_item_skip_void(&item, 0))
1243 return BNXT_TF_RC_ERROR;
1245 /* must have ethernet header */
1246 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1247 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1248 return BNXT_TF_RC_ERROR;
1250 eth_spec = item->spec;
1251 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1252 ulp_encap_buffer_copy(buff,
1253 eth_spec->dst.addr_bytes,
1254 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1256 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1257 ulp_encap_buffer_copy(buff,
1258 eth_spec->src.addr_bytes,
1259 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1261 /* Goto the next item */
1262 if (!ulp_rte_item_skip_void(&item, 1))
1263 return BNXT_TF_RC_ERROR;
1265 /* May have vlan header */
1266 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1268 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1269 ulp_encap_buffer_copy(buff,
1271 sizeof(struct rte_flow_item_vlan));
1273 if (!ulp_rte_item_skip_void(&item, 1))
1274 return BNXT_TF_RC_ERROR;
1277 /* may have two vlan headers */
1278 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1280 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1281 sizeof(struct rte_flow_item_vlan)],
1283 sizeof(struct rte_flow_item_vlan));
1284 if (!ulp_rte_item_skip_void(&item, 1))
1285 return BNXT_TF_RC_ERROR;
1287 /* Update the vlan count and size of more than one */
1289 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1290 vlan_num = tfp_cpu_to_be_32(vlan_num);
1291 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1294 vlan_size = tfp_cpu_to_be_32(vlan_size);
1295 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1300 /* L3 must be IPv4, IPv6 */
1301 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1302 ipv4_spec = item->spec;
1303 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1305 /* copy the ipv4 details */
1306 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1307 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1308 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1309 ulp_encap_buffer_copy(buff,
1311 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1312 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1314 const uint8_t *tmp_buff;
1316 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1317 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1318 ulp_encap_buffer_copy(buff,
1320 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1321 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1322 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1323 ulp_encap_buffer_copy(buff,
1324 &ipv4_spec->hdr.version_ihl,
1325 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1327 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1328 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1329 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1330 ulp_encap_buffer_copy(buff,
1331 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1332 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1334 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1335 ulp_encap_buffer_copy(buff,
1336 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1337 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1339 /* Update the ip size details */
1340 ip_size = tfp_cpu_to_be_32(ip_size);
1341 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1342 &ip_size, sizeof(uint32_t));
1344 /* update the ip type */
1345 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1346 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1347 &ip_type, sizeof(uint32_t));
1349 /* update the computed field to notify it is ipv4 header */
1350 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1353 if (!ulp_rte_item_skip_void(&item, 1))
1354 return BNXT_TF_RC_ERROR;
1355 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1356 ipv6_spec = item->spec;
1357 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1359 /* copy the ipv4 details */
1360 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1361 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1363 /* Update the ip size details */
1364 ip_size = tfp_cpu_to_be_32(ip_size);
1365 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1366 &ip_size, sizeof(uint32_t));
1368 /* update the ip type */
1369 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1370 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1371 &ip_type, sizeof(uint32_t));
1373 /* update the computed field to notify it is ipv6 header */
1374 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1377 if (!ulp_rte_item_skip_void(&item, 1))
1378 return BNXT_TF_RC_ERROR;
1380 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1381 return BNXT_TF_RC_ERROR;
1385 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1386 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1387 return BNXT_TF_RC_ERROR;
1389 /* copy the udp details */
1390 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1391 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1393 if (!ulp_rte_item_skip_void(&item, 1))
1394 return BNXT_TF_RC_ERROR;
1397 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1398 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1399 return BNXT_TF_RC_ERROR;
1401 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1402 /* copy the vxlan details */
1403 memcpy(&vxlan_spec, item->spec, vxlan_size);
1404 vxlan_spec.flags = 0x08;
1405 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1406 (const uint8_t *)&vxlan_spec,
1408 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1409 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1410 &vxlan_size, sizeof(uint32_t));
1412 /* update the hdr_bitmap with vxlan */
1413 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1414 return BNXT_TF_RC_SUCCESS;
1417 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1419 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1421 struct ulp_rte_parser_params *params)
1423 /* update the hdr_bitmap with vxlan */
1424 ULP_BITMAP_SET(params->act_bitmap.bits,
1425 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1426 return BNXT_TF_RC_SUCCESS;
1429 /* Function to handle the parsing of RTE Flow action drop Header. */
1431 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1432 struct ulp_rte_parser_params *params)
1434 /* Update the hdr_bitmap with drop */
1435 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1436 return BNXT_TF_RC_SUCCESS;
1439 /* Function to handle the parsing of RTE Flow action count. */
1441 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1442 struct ulp_rte_parser_params *params)
1445 const struct rte_flow_action_count *act_count;
1446 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1448 act_count = action_item->conf;
1450 if (act_count->shared) {
1452 "Parse Error:Shared count not supported\n");
1453 return BNXT_TF_RC_PARSE_ERR;
1455 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1457 BNXT_ULP_ACT_PROP_SZ_COUNT);
1460 /* Update the hdr_bitmap with count */
1461 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1462 return BNXT_TF_RC_SUCCESS;
1465 /* Function to handle the parsing of action ports. */
1467 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1470 enum bnxt_ulp_direction_type dir;
1473 struct ulp_rte_act_prop *act = ¶m->act_prop;
1474 enum bnxt_ulp_intf_type port_type;
1477 /* Get the direction */
1478 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1479 if (dir == BNXT_ULP_DIR_EGRESS) {
1480 /* For egress direction, fill vport */
1481 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1482 return BNXT_TF_RC_ERROR;
1485 pid = rte_cpu_to_be_32(pid);
1486 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1487 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1489 /* For ingress direction, fill vnic */
1490 port_type = ULP_COMP_FLD_IDX_RD(param,
1491 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1492 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1493 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1495 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1497 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1499 return BNXT_TF_RC_ERROR;
1502 pid = rte_cpu_to_be_32(pid);
1503 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1504 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1507 /* Update the action port set bit */
1508 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1509 return BNXT_TF_RC_SUCCESS;
1512 /* Function to handle the parsing of RTE Flow action PF. */
1514 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1515 struct ulp_rte_parser_params *params)
1519 enum bnxt_ulp_intf_type intf_type;
1521 /* Get the port id of the current device */
1522 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1524 /* Get the port db ifindex */
1525 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1527 BNXT_TF_DBG(ERR, "Invalid port id\n");
1528 return BNXT_TF_RC_ERROR;
1531 /* Check the port is PF port */
1532 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1533 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1534 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1535 return BNXT_TF_RC_ERROR;
1537 /* Update the action properties */
1538 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1539 return ulp_rte_parser_act_port_set(params, ifindex);
1542 /* Function to handle the parsing of RTE Flow action VF. */
1544 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1545 struct ulp_rte_parser_params *params)
1547 const struct rte_flow_action_vf *vf_action;
1549 enum bnxt_ulp_intf_type intf_type;
1551 vf_action = action_item->conf;
1553 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1554 return BNXT_TF_RC_PARSE_ERR;
1557 if (vf_action->original) {
1558 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1559 return BNXT_TF_RC_PARSE_ERR;
1562 /* Check the port is VF port */
1563 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1565 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1566 return BNXT_TF_RC_ERROR;
1568 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1569 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1570 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1571 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1572 return BNXT_TF_RC_ERROR;
1575 /* Update the action properties */
1576 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1577 return ulp_rte_parser_act_port_set(params, ifindex);
1580 /* Function to handle the parsing of RTE Flow action port_id. */
1582 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1583 struct ulp_rte_parser_params *param)
1585 const struct rte_flow_action_port_id *port_id = act_item->conf;
1587 enum bnxt_ulp_intf_type intf_type;
1591 "ParseErr: Invalid Argument\n");
1592 return BNXT_TF_RC_PARSE_ERR;
1594 if (port_id->original) {
1596 "ParseErr:Portid Original not supported\n");
1597 return BNXT_TF_RC_PARSE_ERR;
1600 /* Get the port db ifindex */
1601 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1603 BNXT_TF_DBG(ERR, "Invalid port id\n");
1604 return BNXT_TF_RC_ERROR;
1607 /* Get the intf type */
1608 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1610 BNXT_TF_DBG(ERR, "Invalid port type\n");
1611 return BNXT_TF_RC_ERROR;
1614 /* Set the action port */
1615 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1616 return ulp_rte_parser_act_port_set(param, ifindex);
1619 /* Function to handle the parsing of RTE Flow action phy_port. */
1621 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1622 struct ulp_rte_parser_params *prm)
1624 const struct rte_flow_action_phy_port *phy_port;
1628 enum bnxt_ulp_direction_type dir;
1630 phy_port = action_item->conf;
1633 "ParseErr: Invalid Argument\n");
1634 return BNXT_TF_RC_PARSE_ERR;
1637 if (phy_port->original) {
1639 "Parse Err:Port Original not supported\n");
1640 return BNXT_TF_RC_PARSE_ERR;
1642 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1643 if (dir != BNXT_ULP_DIR_EGRESS) {
1645 "Parse Err:Phy ports are valid only for egress\n");
1646 return BNXT_TF_RC_PARSE_ERR;
1648 /* Get the physical port details from port db */
1649 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1652 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1657 pid = rte_cpu_to_be_32(pid);
1658 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1659 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1661 /* Update the action port set bit */
1662 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1663 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1664 BNXT_ULP_INTF_TYPE_PHY_PORT);
1665 return BNXT_TF_RC_SUCCESS;
1668 /* Function to handle the parsing of RTE Flow action pop vlan. */
1670 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1671 struct ulp_rte_parser_params *params)
1673 /* Update the act_bitmap with pop */
1674 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1675 return BNXT_TF_RC_SUCCESS;
1678 /* Function to handle the parsing of RTE Flow action push vlan. */
1680 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1681 struct ulp_rte_parser_params *params)
1683 const struct rte_flow_action_of_push_vlan *push_vlan;
1685 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1687 push_vlan = action_item->conf;
1689 ethertype = push_vlan->ethertype;
1690 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1692 "Parse Err: Ethertype not supported\n");
1693 return BNXT_TF_RC_PARSE_ERR;
1695 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1696 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1697 /* Update the hdr_bitmap with push vlan */
1698 ULP_BITMAP_SET(params->act_bitmap.bits,
1699 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1700 return BNXT_TF_RC_SUCCESS;
1702 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1703 return BNXT_TF_RC_ERROR;
1706 /* Function to handle the parsing of RTE Flow action set vlan id. */
1708 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1709 struct ulp_rte_parser_params *params)
1711 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1713 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1715 vlan_vid = action_item->conf;
1716 if (vlan_vid && vlan_vid->vlan_vid) {
1717 vid = vlan_vid->vlan_vid;
1718 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1719 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1720 /* Update the hdr_bitmap with vlan vid */
1721 ULP_BITMAP_SET(params->act_bitmap.bits,
1722 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1723 return BNXT_TF_RC_SUCCESS;
1725 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1726 return BNXT_TF_RC_ERROR;
1729 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1731 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1732 struct ulp_rte_parser_params *params)
1734 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1736 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1738 vlan_pcp = action_item->conf;
1740 pcp = vlan_pcp->vlan_pcp;
1741 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1742 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1743 /* Update the hdr_bitmap with vlan vid */
1744 ULP_BITMAP_SET(params->act_bitmap.bits,
1745 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1746 return BNXT_TF_RC_SUCCESS;
1748 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1749 return BNXT_TF_RC_ERROR;
1752 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1754 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1755 struct ulp_rte_parser_params *params)
1757 const struct rte_flow_action_set_ipv4 *set_ipv4;
1758 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1760 set_ipv4 = action_item->conf;
1762 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1763 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1764 /* Update the hdr_bitmap with set ipv4 src */
1765 ULP_BITMAP_SET(params->act_bitmap.bits,
1766 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1767 return BNXT_TF_RC_SUCCESS;
1769 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1770 return BNXT_TF_RC_ERROR;
1773 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1775 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1776 struct ulp_rte_parser_params *params)
1778 const struct rte_flow_action_set_ipv4 *set_ipv4;
1779 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1781 set_ipv4 = action_item->conf;
1783 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1784 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1785 /* Update the hdr_bitmap with set ipv4 dst */
1786 ULP_BITMAP_SET(params->act_bitmap.bits,
1787 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1788 return BNXT_TF_RC_SUCCESS;
1790 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1791 return BNXT_TF_RC_ERROR;
1794 /* Function to handle the parsing of RTE Flow action set tp src.*/
1796 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1797 struct ulp_rte_parser_params *params)
1799 const struct rte_flow_action_set_tp *set_tp;
1800 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1802 set_tp = action_item->conf;
1804 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1805 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1806 /* Update the hdr_bitmap with set tp src */
1807 ULP_BITMAP_SET(params->act_bitmap.bits,
1808 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1809 return BNXT_TF_RC_SUCCESS;
1812 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1813 return BNXT_TF_RC_ERROR;
1816 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1818 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1819 struct ulp_rte_parser_params *params)
1821 const struct rte_flow_action_set_tp *set_tp;
1822 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1824 set_tp = action_item->conf;
1826 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1827 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1828 /* Update the hdr_bitmap with set tp dst */
1829 ULP_BITMAP_SET(params->act_bitmap.bits,
1830 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1831 return BNXT_TF_RC_SUCCESS;
1834 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1835 return BNXT_TF_RC_ERROR;
1838 /* Function to handle the parsing of RTE Flow action dec ttl.*/
1840 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
1841 struct ulp_rte_parser_params *params)
1843 /* Update the act_bitmap with dec ttl */
1844 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
1845 return BNXT_TF_RC_SUCCESS;