1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK 0x700
18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN 4789
21 /* Utility function to skip the void items. */
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 /* Utility function to update the field_bitmap */
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
41 struct ulp_rte_hdr_field *field;
43 field = ¶ms->hdr_field[idx];
44 if (ulp_bitmap_notzero(field->mask, field->size)) {
45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
47 if (!ulp_bitmap_is_ones(field->mask, field->size))
48 ULP_BITMAP_SET(params->fld_bitmap.bits,
49 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
62 memcpy(field->spec, buffer, field->size);
67 /* Utility function to copy field masks items */
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
76 memcpy(field->mask, buffer, size);
77 ulp_rte_parser_field_bitmap_update(params, *idx);
82 * Function to handle the parsing of RTE Flows and placing
83 * the RTE flow items into the ulp structures.
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 struct ulp_rte_parser_params *params)
89 const struct rte_flow_item *item = pattern;
90 struct bnxt_ulp_rte_hdr_info *hdr_info;
92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
94 /* Set the computed flags for no vlan tags before parsing */
95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
98 /* Parse all the items in the pattern */
99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 /* get the header information from the flow_hdr_info table */
101 hdr_info = &ulp_hdr_info[item->type];
102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
104 "Truflow parser does not support type %d\n",
106 return BNXT_TF_RC_PARSE_ERR;
107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 /* call the registered callback handler */
109 if (hdr_info->proto_hdr_func) {
110 if (hdr_info->proto_hdr_func(item, params) !=
111 BNXT_TF_RC_SUCCESS) {
112 return BNXT_TF_RC_ERROR;
118 /* update the implied SVIF */
119 return ulp_rte_parser_implicit_match_port_process(params);
123 * Function to handle the parsing of RTE Flows and placing
124 * the RTE flow actions into the ulp structures.
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 struct ulp_rte_parser_params *params)
130 const struct rte_flow_action *action_item = actions;
131 struct bnxt_ulp_rte_act_info *hdr_info;
133 /* Parse all the items in the pattern */
134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 /* get the header information from the flow_hdr_info table */
136 hdr_info = &ulp_act_info[action_item->type];
137 if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
140 "Truflow parser does not support act %u\n",
142 return BNXT_TF_RC_ERROR;
143 } else if (hdr_info->act_type ==
144 BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 /* call the registered callback handler */
146 if (hdr_info->proto_act_func) {
147 if (hdr_info->proto_act_func(action_item,
149 BNXT_TF_RC_SUCCESS) {
150 return BNXT_TF_RC_ERROR;
156 /* update the implied port details */
157 ulp_rte_parser_implicit_act_port_process(params);
158 return BNXT_TF_RC_SUCCESS;
162 * Function to handle the post processing of the parsing details
165 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
167 enum bnxt_ulp_direction_type dir;
168 enum bnxt_ulp_intf_type match_port_type, act_port_type;
169 uint32_t act_port_set;
171 /* Get the computed details */
172 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
173 match_port_type = ULP_COMP_FLD_IDX_RD(params,
174 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
175 act_port_type = ULP_COMP_FLD_IDX_RD(params,
176 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
177 act_port_set = ULP_COMP_FLD_IDX_RD(params,
178 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
180 /* set the flow direction in the proto and action header */
181 if (dir == BNXT_ULP_DIR_EGRESS) {
182 ULP_BITMAP_SET(params->hdr_bitmap.bits,
183 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
184 ULP_BITMAP_SET(params->act_bitmap.bits,
185 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
188 /* calculate the VF to VF flag */
189 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
190 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
191 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
193 /* Update the decrement ttl computational fields */
194 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
195 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
197 * Check that vxlan proto is included and vxlan decap
198 * action is not set then decrement tunnel ttl.
199 * Similarly add GRE and NVGRE in future.
201 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
202 BNXT_ULP_HDR_BIT_T_VXLAN) &&
203 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
204 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
205 ULP_COMP_FLD_IDX_WR(params,
206 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
208 ULP_COMP_FLD_IDX_WR(params,
209 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
213 /* Merge the hdr_fp_bit into the proto header bit */
214 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
216 /* TBD: Handle the flow rejection scenarios */
221 * Function to compute the flow direction based on the match port details
224 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
226 enum bnxt_ulp_intf_type match_port_type;
228 /* Get the match port type */
229 match_port_type = ULP_COMP_FLD_IDX_RD(params,
230 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
232 /* If ingress flow and matchport is vf rep then dir is egress*/
233 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
234 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
235 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
236 BNXT_ULP_DIR_EGRESS);
238 /* Assign the input direction */
239 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
240 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
241 BNXT_ULP_DIR_INGRESS);
243 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
244 BNXT_ULP_DIR_EGRESS);
248 /* Function to handle the parsing of RTE Flow item PF Header. */
250 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
255 enum bnxt_ulp_direction_type dir;
256 struct ulp_rte_hdr_field *hdr_field;
257 enum bnxt_ulp_svif_type svif_type;
258 enum bnxt_ulp_intf_type port_type;
260 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
261 BNXT_ULP_INVALID_SVIF_VAL) {
263 "SVIF already set,multiple source not support'd\n");
264 return BNXT_TF_RC_ERROR;
267 /* Get port type details */
268 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
269 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
270 BNXT_TF_DBG(ERR, "Invalid port type\n");
271 return BNXT_TF_RC_ERROR;
274 /* Update the match port type */
275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
277 /* compute the direction */
278 bnxt_ulp_rte_parser_direction_compute(params);
280 /* Get the computed direction */
281 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
282 if (dir == BNXT_ULP_DIR_INGRESS) {
283 svif_type = BNXT_ULP_PHY_PORT_SVIF;
285 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
286 svif_type = BNXT_ULP_VF_FUNC_SVIF;
288 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
290 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
292 svif = rte_cpu_to_be_16(svif);
293 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
294 memcpy(hdr_field->spec, &svif, sizeof(svif));
295 memcpy(hdr_field->mask, &mask, sizeof(mask));
296 hdr_field->size = sizeof(svif);
297 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
298 rte_be_to_cpu_16(svif));
299 return BNXT_TF_RC_SUCCESS;
302 /* Function to handle the parsing of the RTE port id */
304 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
306 uint16_t port_id = 0;
307 uint16_t svif_mask = 0xFFFF;
309 int32_t rc = BNXT_TF_RC_ERROR;
311 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
312 BNXT_ULP_INVALID_SVIF_VAL)
313 return BNXT_TF_RC_SUCCESS;
315 /* SVIF not set. So get the port id */
316 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
318 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
321 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
325 /* Update the SVIF details */
326 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
330 /* Function to handle the implicit action port id */
332 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
334 struct rte_flow_action action_item = {0};
335 struct rte_flow_action_port_id port_id = {0};
337 /* Read the action port set bit */
338 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
339 /* Already set, so just exit */
340 return BNXT_TF_RC_SUCCESS;
342 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
343 action_item.conf = &port_id;
345 /* Update the action port based on incoming port */
346 ulp_rte_port_id_act_handler(&action_item, params);
348 /* Reset the action port set bit */
349 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
350 return BNXT_TF_RC_SUCCESS;
353 /* Function to handle the parsing of RTE Flow item PF Header. */
355 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
356 struct ulp_rte_parser_params *params)
358 uint16_t port_id = 0;
359 uint16_t svif_mask = 0xFFFF;
362 /* Get the implicit port id */
363 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
365 /* perform the conversion from dpdk port to bnxt ifindex */
366 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
369 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
370 return BNXT_TF_RC_ERROR;
373 /* Update the SVIF details */
374 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
377 /* Function to handle the parsing of RTE Flow item VF Header. */
379 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
380 struct ulp_rte_parser_params *params)
382 const struct rte_flow_item_vf *vf_spec = item->spec;
383 const struct rte_flow_item_vf *vf_mask = item->mask;
386 int32_t rc = BNXT_TF_RC_PARSE_ERR;
388 /* Get VF rte_flow_item for Port details */
390 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
394 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
399 /* perform the conversion from VF Func id to bnxt ifindex */
400 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
403 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
406 /* Update the SVIF details */
407 return ulp_rte_parser_svif_set(params, ifindex, mask);
410 /* Function to handle the parsing of RTE Flow item port id Header. */
412 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
413 struct ulp_rte_parser_params *params)
415 const struct rte_flow_item_port_id *port_spec = item->spec;
416 const struct rte_flow_item_port_id *port_mask = item->mask;
418 int32_t rc = BNXT_TF_RC_PARSE_ERR;
422 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
426 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
429 mask = port_mask->id;
431 /* perform the conversion from dpdk port to bnxt ifindex */
432 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
435 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
438 /* Update the SVIF details */
439 return ulp_rte_parser_svif_set(params, ifindex, mask);
442 /* Function to handle the parsing of RTE Flow item phy port Header. */
444 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
445 struct ulp_rte_parser_params *params)
447 const struct rte_flow_item_phy_port *port_spec = item->spec;
448 const struct rte_flow_item_phy_port *port_mask = item->mask;
450 int32_t rc = BNXT_TF_RC_ERROR;
452 enum bnxt_ulp_direction_type dir;
453 struct ulp_rte_hdr_field *hdr_field;
455 /* Copy the rte_flow_item for phy port into hdr_field */
457 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
461 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
464 mask = port_mask->index;
466 /* Update the match port type */
467 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
468 BNXT_ULP_INTF_TYPE_PHY_PORT);
470 /* Compute the Hw direction */
471 bnxt_ulp_rte_parser_direction_compute(params);
473 /* Direction validation */
474 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
475 if (dir == BNXT_ULP_DIR_EGRESS) {
477 "Parse Err:Phy ports are valid only for ingress\n");
478 return BNXT_TF_RC_PARSE_ERR;
481 /* Get the physical port details from port db */
482 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
485 BNXT_TF_DBG(ERR, "Failed to get port details\n");
486 return BNXT_TF_RC_PARSE_ERR;
489 /* Update the SVIF details */
490 svif = rte_cpu_to_be_16(svif);
491 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
492 memcpy(hdr_field->spec, &svif, sizeof(svif));
493 memcpy(hdr_field->mask, &mask, sizeof(mask));
494 hdr_field->size = sizeof(svif);
495 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
496 rte_be_to_cpu_16(svif));
497 return BNXT_TF_RC_SUCCESS;
500 /* Function to handle the update of proto header based on field values */
502 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
503 uint16_t type, uint32_t in_flag)
505 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
507 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
508 BNXT_ULP_HDR_BIT_I_IPV4);
509 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
511 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
512 BNXT_ULP_HDR_BIT_O_IPV4);
513 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
515 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
517 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
518 BNXT_ULP_HDR_BIT_I_IPV6);
519 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
521 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
522 BNXT_ULP_HDR_BIT_O_IPV6);
523 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
528 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
530 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
531 struct ulp_rte_parser_params *params)
533 const struct rte_flow_item_eth *eth_spec = item->spec;
534 const struct rte_flow_item_eth *eth_mask = item->mask;
535 struct ulp_rte_hdr_field *field;
536 uint32_t idx = params->field_idx;
538 uint16_t eth_type = 0;
539 uint32_t inner_flag = 0;
542 * Copy the rte_flow_item for eth into hdr_field using ethernet
546 size = sizeof(eth_spec->dst.addr_bytes);
547 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
548 eth_spec->dst.addr_bytes,
550 size = sizeof(eth_spec->src.addr_bytes);
551 field = ulp_rte_parser_fld_copy(field,
552 eth_spec->src.addr_bytes,
554 field = ulp_rte_parser_fld_copy(field,
556 sizeof(eth_spec->type));
557 eth_type = eth_spec->type;
560 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
561 sizeof(eth_mask->dst.addr_bytes));
562 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
563 sizeof(eth_mask->src.addr_bytes));
564 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
565 sizeof(eth_mask->type));
567 /* Add number of vlan header elements */
568 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
569 params->vlan_idx = params->field_idx;
570 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
572 /* Update the protocol hdr bitmap */
573 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
574 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
577 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
579 /* Update the field protocol hdr bitmap */
580 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
582 return BNXT_TF_RC_SUCCESS;
585 /* Function to handle the parsing of RTE Flow item Vlan Header. */
587 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
588 struct ulp_rte_parser_params *params)
590 const struct rte_flow_item_vlan *vlan_spec = item->spec;
591 const struct rte_flow_item_vlan *vlan_mask = item->mask;
592 struct ulp_rte_hdr_field *field;
593 struct ulp_rte_hdr_bitmap *hdr_bit;
594 uint32_t idx = params->vlan_idx;
595 uint16_t vlan_tag, priority;
596 uint32_t outer_vtag_num;
597 uint32_t inner_vtag_num;
598 uint16_t eth_type = 0;
599 uint32_t inner_flag = 0;
602 * Copy the rte_flow_item for vlan into hdr_field using Vlan
606 vlan_tag = ntohs(vlan_spec->tci);
607 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
608 vlan_tag &= ULP_VLAN_TAG_MASK;
609 vlan_tag = htons(vlan_tag);
611 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
614 field = ulp_rte_parser_fld_copy(field,
617 field = ulp_rte_parser_fld_copy(field,
618 &vlan_spec->inner_type,
619 sizeof(vlan_spec->inner_type));
620 eth_type = vlan_spec->inner_type;
624 vlan_tag = ntohs(vlan_mask->tci);
625 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
629 * the storage for priority and vlan tag is 2 bytes
630 * The mask of priority which is 3 bits if it is all 1's
631 * then make the rest bits 13 bits as 1's
632 * so that it is matched as exact match.
634 if (priority == ULP_VLAN_PRIORITY_MASK)
635 priority |= ~ULP_VLAN_PRIORITY_MASK;
636 if (vlan_tag == ULP_VLAN_TAG_MASK)
637 vlan_tag |= ~ULP_VLAN_TAG_MASK;
638 vlan_tag = htons(vlan_tag);
640 ulp_rte_prsr_mask_copy(params, &idx, &priority,
642 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
644 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
645 sizeof(vlan_mask->inner_type));
647 /* Set the vlan index to new incremented value */
648 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
650 /* Get the outer tag and inner tag counts */
651 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
652 BNXT_ULP_CF_IDX_O_VTAG_NUM);
653 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
654 BNXT_ULP_CF_IDX_I_VTAG_NUM);
656 /* Update the hdr_bitmap of the vlans */
657 hdr_bit = ¶ms->hdr_bitmap;
658 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
659 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
661 /* Update the vlan tag num */
663 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
665 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
666 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
667 ULP_BITMAP_SET(params->hdr_bitmap.bits,
668 BNXT_ULP_HDR_BIT_OO_VLAN);
669 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
670 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
671 outer_vtag_num == 1) {
672 /* update the vlan tag num */
674 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
676 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
677 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
678 ULP_BITMAP_SET(params->hdr_bitmap.bits,
679 BNXT_ULP_HDR_BIT_OI_VLAN);
680 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
681 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
683 /* update the vlan tag num */
685 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
687 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
688 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
689 ULP_BITMAP_SET(params->hdr_bitmap.bits,
690 BNXT_ULP_HDR_BIT_IO_VLAN);
692 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
693 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
694 inner_vtag_num == 1) {
695 /* update the vlan tag num */
697 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
699 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
700 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
701 ULP_BITMAP_SET(params->hdr_bitmap.bits,
702 BNXT_ULP_HDR_BIT_II_VLAN);
705 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
706 return BNXT_TF_RC_ERROR;
708 /* Update the field protocol hdr bitmap */
709 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
710 return BNXT_TF_RC_SUCCESS;
713 /* Function to handle the update of proto header based on field values */
715 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
716 uint8_t proto, uint32_t in_flag)
718 if (proto == IPPROTO_UDP) {
720 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
721 BNXT_ULP_HDR_BIT_I_UDP);
722 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
724 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
725 BNXT_ULP_HDR_BIT_O_UDP);
726 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
728 } else if (proto == IPPROTO_TCP) {
730 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
731 BNXT_ULP_HDR_BIT_I_TCP);
732 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
734 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
735 BNXT_ULP_HDR_BIT_O_TCP);
736 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
741 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
743 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
744 struct ulp_rte_parser_params *params)
746 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
747 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
748 struct ulp_rte_hdr_field *field;
749 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
750 uint32_t idx = params->field_idx;
753 uint32_t inner_flag = 0;
756 /* validate there are no 3rd L3 header */
757 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
759 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
760 return BNXT_TF_RC_ERROR;
764 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
768 size = sizeof(ipv4_spec->hdr.version_ihl);
769 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
770 &ipv4_spec->hdr.version_ihl,
772 size = sizeof(ipv4_spec->hdr.type_of_service);
773 field = ulp_rte_parser_fld_copy(field,
774 &ipv4_spec->hdr.type_of_service,
776 size = sizeof(ipv4_spec->hdr.total_length);
777 field = ulp_rte_parser_fld_copy(field,
778 &ipv4_spec->hdr.total_length,
780 size = sizeof(ipv4_spec->hdr.packet_id);
781 field = ulp_rte_parser_fld_copy(field,
782 &ipv4_spec->hdr.packet_id,
784 size = sizeof(ipv4_spec->hdr.fragment_offset);
785 field = ulp_rte_parser_fld_copy(field,
786 &ipv4_spec->hdr.fragment_offset,
788 size = sizeof(ipv4_spec->hdr.time_to_live);
789 field = ulp_rte_parser_fld_copy(field,
790 &ipv4_spec->hdr.time_to_live,
792 size = sizeof(ipv4_spec->hdr.next_proto_id);
793 field = ulp_rte_parser_fld_copy(field,
794 &ipv4_spec->hdr.next_proto_id,
796 proto = ipv4_spec->hdr.next_proto_id;
797 size = sizeof(ipv4_spec->hdr.hdr_checksum);
798 field = ulp_rte_parser_fld_copy(field,
799 &ipv4_spec->hdr.hdr_checksum,
801 size = sizeof(ipv4_spec->hdr.src_addr);
802 field = ulp_rte_parser_fld_copy(field,
803 &ipv4_spec->hdr.src_addr,
805 size = sizeof(ipv4_spec->hdr.dst_addr);
806 field = ulp_rte_parser_fld_copy(field,
807 &ipv4_spec->hdr.dst_addr,
811 ulp_rte_prsr_mask_copy(params, &idx,
812 &ipv4_mask->hdr.version_ihl,
813 sizeof(ipv4_mask->hdr.version_ihl));
814 #ifdef ULP_DONT_IGNORE_TOS
815 ulp_rte_prsr_mask_copy(params, &idx,
816 &ipv4_mask->hdr.type_of_service,
817 sizeof(ipv4_mask->hdr.type_of_service));
820 * The tos field is ignored since OVS is setting it as wild card
821 * match and it is not supported. This is a work around and
822 * shall be addressed in the future.
827 ulp_rte_prsr_mask_copy(params, &idx,
828 &ipv4_mask->hdr.total_length,
829 sizeof(ipv4_mask->hdr.total_length));
830 ulp_rte_prsr_mask_copy(params, &idx,
831 &ipv4_mask->hdr.packet_id,
832 sizeof(ipv4_mask->hdr.packet_id));
833 ulp_rte_prsr_mask_copy(params, &idx,
834 &ipv4_mask->hdr.fragment_offset,
835 sizeof(ipv4_mask->hdr.fragment_offset));
836 ulp_rte_prsr_mask_copy(params, &idx,
837 &ipv4_mask->hdr.time_to_live,
838 sizeof(ipv4_mask->hdr.time_to_live));
839 ulp_rte_prsr_mask_copy(params, &idx,
840 &ipv4_mask->hdr.next_proto_id,
841 sizeof(ipv4_mask->hdr.next_proto_id));
842 ulp_rte_prsr_mask_copy(params, &idx,
843 &ipv4_mask->hdr.hdr_checksum,
844 sizeof(ipv4_mask->hdr.hdr_checksum));
845 ulp_rte_prsr_mask_copy(params, &idx,
846 &ipv4_mask->hdr.src_addr,
847 sizeof(ipv4_mask->hdr.src_addr));
848 ulp_rte_prsr_mask_copy(params, &idx,
849 &ipv4_mask->hdr.dst_addr,
850 sizeof(ipv4_mask->hdr.dst_addr));
852 /* Add the number of ipv4 header elements */
853 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
855 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
856 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
857 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
858 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
862 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
863 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
866 /* Update the field protocol hdr bitmap */
867 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
868 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
869 return BNXT_TF_RC_SUCCESS;
872 /* Function to handle the parsing of RTE Flow item IPV6 Header */
874 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
875 struct ulp_rte_parser_params *params)
877 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
878 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
879 struct ulp_rte_hdr_field *field;
880 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
881 uint32_t idx = params->field_idx;
883 uint32_t vtcf, vtcf_mask;
885 uint32_t inner_flag = 0;
888 /* validate there are no 3rd L3 header */
889 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
891 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892 return BNXT_TF_RC_ERROR;
896 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
900 size = sizeof(ipv6_spec->hdr.vtc_flow);
902 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
903 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
907 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
908 field = ulp_rte_parser_fld_copy(field,
912 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
913 field = ulp_rte_parser_fld_copy(field,
917 size = sizeof(ipv6_spec->hdr.payload_len);
918 field = ulp_rte_parser_fld_copy(field,
919 &ipv6_spec->hdr.payload_len,
921 size = sizeof(ipv6_spec->hdr.proto);
922 field = ulp_rte_parser_fld_copy(field,
923 &ipv6_spec->hdr.proto,
925 proto = ipv6_spec->hdr.proto;
926 size = sizeof(ipv6_spec->hdr.hop_limits);
927 field = ulp_rte_parser_fld_copy(field,
928 &ipv6_spec->hdr.hop_limits,
930 size = sizeof(ipv6_spec->hdr.src_addr);
931 field = ulp_rte_parser_fld_copy(field,
932 &ipv6_spec->hdr.src_addr,
934 size = sizeof(ipv6_spec->hdr.dst_addr);
935 field = ulp_rte_parser_fld_copy(field,
936 &ipv6_spec->hdr.dst_addr,
940 size = sizeof(ipv6_mask->hdr.vtc_flow);
942 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
943 ulp_rte_prsr_mask_copy(params, &idx,
947 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
948 ulp_rte_prsr_mask_copy(params, &idx,
953 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
954 ulp_rte_prsr_mask_copy(params, &idx,
958 ulp_rte_prsr_mask_copy(params, &idx,
959 &ipv6_mask->hdr.payload_len,
960 sizeof(ipv6_mask->hdr.payload_len));
961 ulp_rte_prsr_mask_copy(params, &idx,
962 &ipv6_mask->hdr.proto,
963 sizeof(ipv6_mask->hdr.proto));
964 ulp_rte_prsr_mask_copy(params, &idx,
965 &ipv6_mask->hdr.hop_limits,
966 sizeof(ipv6_mask->hdr.hop_limits));
967 ulp_rte_prsr_mask_copy(params, &idx,
968 &ipv6_mask->hdr.src_addr,
969 sizeof(ipv6_mask->hdr.src_addr));
970 ulp_rte_prsr_mask_copy(params, &idx,
971 &ipv6_mask->hdr.dst_addr,
972 sizeof(ipv6_mask->hdr.dst_addr));
974 /* add number of ipv6 header elements */
975 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
977 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
978 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
979 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
980 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
981 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
984 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
985 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
988 /* Update the field protocol hdr bitmap */
989 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
990 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
992 return BNXT_TF_RC_SUCCESS;
995 /* Function to handle the update of proto header based on field values */
997 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1000 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1001 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1002 BNXT_ULP_HDR_BIT_T_VXLAN);
1005 /* Function to handle the parsing of RTE Flow item UDP Header. */
1007 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1008 struct ulp_rte_parser_params *params)
1010 const struct rte_flow_item_udp *udp_spec = item->spec;
1011 const struct rte_flow_item_udp *udp_mask = item->mask;
1012 struct ulp_rte_hdr_field *field;
1013 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1014 uint32_t idx = params->field_idx;
1016 uint16_t dst_port = 0;
1019 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1021 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1022 return BNXT_TF_RC_ERROR;
1026 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1030 size = sizeof(udp_spec->hdr.src_port);
1031 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1032 &udp_spec->hdr.src_port,
1034 size = sizeof(udp_spec->hdr.dst_port);
1035 field = ulp_rte_parser_fld_copy(field,
1036 &udp_spec->hdr.dst_port,
1038 dst_port = udp_spec->hdr.dst_port;
1039 size = sizeof(udp_spec->hdr.dgram_len);
1040 field = ulp_rte_parser_fld_copy(field,
1041 &udp_spec->hdr.dgram_len,
1043 size = sizeof(udp_spec->hdr.dgram_cksum);
1044 field = ulp_rte_parser_fld_copy(field,
1045 &udp_spec->hdr.dgram_cksum,
1049 ulp_rte_prsr_mask_copy(params, &idx,
1050 &udp_mask->hdr.src_port,
1051 sizeof(udp_mask->hdr.src_port));
1052 ulp_rte_prsr_mask_copy(params, &idx,
1053 &udp_mask->hdr.dst_port,
1054 sizeof(udp_mask->hdr.dst_port));
1055 ulp_rte_prsr_mask_copy(params, &idx,
1056 &udp_mask->hdr.dgram_len,
1057 sizeof(udp_mask->hdr.dgram_len));
1058 ulp_rte_prsr_mask_copy(params, &idx,
1059 &udp_mask->hdr.dgram_cksum,
1060 sizeof(udp_mask->hdr.dgram_cksum));
1063 /* Add number of UDP header elements */
1064 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1066 /* Set the udp header bitmap and computed l4 header bitmaps */
1067 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1068 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1069 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1070 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1072 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1073 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1074 /* Update the field protocol hdr bitmap */
1075 ulp_rte_l4_proto_type_update(params, dst_port);
1077 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1078 return BNXT_TF_RC_SUCCESS;
1081 /* Function to handle the parsing of RTE Flow item TCP Header. */
1083 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1084 struct ulp_rte_parser_params *params)
1086 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1087 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1088 struct ulp_rte_hdr_field *field;
1089 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1090 uint32_t idx = params->field_idx;
1094 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1096 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1097 return BNXT_TF_RC_ERROR;
1101 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1105 size = sizeof(tcp_spec->hdr.src_port);
1106 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1107 &tcp_spec->hdr.src_port,
1109 size = sizeof(tcp_spec->hdr.dst_port);
1110 field = ulp_rte_parser_fld_copy(field,
1111 &tcp_spec->hdr.dst_port,
1113 size = sizeof(tcp_spec->hdr.sent_seq);
1114 field = ulp_rte_parser_fld_copy(field,
1115 &tcp_spec->hdr.sent_seq,
1117 size = sizeof(tcp_spec->hdr.recv_ack);
1118 field = ulp_rte_parser_fld_copy(field,
1119 &tcp_spec->hdr.recv_ack,
1121 size = sizeof(tcp_spec->hdr.data_off);
1122 field = ulp_rte_parser_fld_copy(field,
1123 &tcp_spec->hdr.data_off,
1125 size = sizeof(tcp_spec->hdr.tcp_flags);
1126 field = ulp_rte_parser_fld_copy(field,
1127 &tcp_spec->hdr.tcp_flags,
1129 size = sizeof(tcp_spec->hdr.rx_win);
1130 field = ulp_rte_parser_fld_copy(field,
1131 &tcp_spec->hdr.rx_win,
1133 size = sizeof(tcp_spec->hdr.cksum);
1134 field = ulp_rte_parser_fld_copy(field,
1135 &tcp_spec->hdr.cksum,
1137 size = sizeof(tcp_spec->hdr.tcp_urp);
1138 field = ulp_rte_parser_fld_copy(field,
1139 &tcp_spec->hdr.tcp_urp,
1142 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1146 ulp_rte_prsr_mask_copy(params, &idx,
1147 &tcp_mask->hdr.src_port,
1148 sizeof(tcp_mask->hdr.src_port));
1149 ulp_rte_prsr_mask_copy(params, &idx,
1150 &tcp_mask->hdr.dst_port,
1151 sizeof(tcp_mask->hdr.dst_port));
1152 ulp_rte_prsr_mask_copy(params, &idx,
1153 &tcp_mask->hdr.sent_seq,
1154 sizeof(tcp_mask->hdr.sent_seq));
1155 ulp_rte_prsr_mask_copy(params, &idx,
1156 &tcp_mask->hdr.recv_ack,
1157 sizeof(tcp_mask->hdr.recv_ack));
1158 ulp_rte_prsr_mask_copy(params, &idx,
1159 &tcp_mask->hdr.data_off,
1160 sizeof(tcp_mask->hdr.data_off));
1161 ulp_rte_prsr_mask_copy(params, &idx,
1162 &tcp_mask->hdr.tcp_flags,
1163 sizeof(tcp_mask->hdr.tcp_flags));
1164 ulp_rte_prsr_mask_copy(params, &idx,
1165 &tcp_mask->hdr.rx_win,
1166 sizeof(tcp_mask->hdr.rx_win));
1167 ulp_rte_prsr_mask_copy(params, &idx,
1168 &tcp_mask->hdr.cksum,
1169 sizeof(tcp_mask->hdr.cksum));
1170 ulp_rte_prsr_mask_copy(params, &idx,
1171 &tcp_mask->hdr.tcp_urp,
1172 sizeof(tcp_mask->hdr.tcp_urp));
1174 /* add number of TCP header elements */
1175 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1177 /* Set the udp header bitmap and computed l4 header bitmaps */
1178 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1179 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1180 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1181 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1183 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1184 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1186 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1187 return BNXT_TF_RC_SUCCESS;
1190 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1192 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1193 struct ulp_rte_parser_params *params)
1195 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1196 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1197 struct ulp_rte_hdr_field *field;
1198 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1199 uint32_t idx = params->field_idx;
1203 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1207 size = sizeof(vxlan_spec->flags);
1208 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1211 size = sizeof(vxlan_spec->rsvd0);
1212 field = ulp_rte_parser_fld_copy(field,
1215 size = sizeof(vxlan_spec->vni);
1216 field = ulp_rte_parser_fld_copy(field,
1219 size = sizeof(vxlan_spec->rsvd1);
1220 field = ulp_rte_parser_fld_copy(field,
1225 ulp_rte_prsr_mask_copy(params, &idx,
1227 sizeof(vxlan_mask->flags));
1228 ulp_rte_prsr_mask_copy(params, &idx,
1230 sizeof(vxlan_mask->rsvd0));
1231 ulp_rte_prsr_mask_copy(params, &idx,
1233 sizeof(vxlan_mask->vni));
1234 ulp_rte_prsr_mask_copy(params, &idx,
1236 sizeof(vxlan_mask->rsvd1));
1238 /* Add number of vxlan header elements */
1239 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1241 /* Update the hdr_bitmap with vxlan */
1242 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1243 return BNXT_TF_RC_SUCCESS;
1246 /* Function to handle the parsing of RTE Flow item void Header */
1248 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1249 struct ulp_rte_parser_params *params __rte_unused)
1251 return BNXT_TF_RC_SUCCESS;
1254 /* Function to handle the parsing of RTE Flow action void Header. */
1256 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1257 struct ulp_rte_parser_params *params __rte_unused)
1259 return BNXT_TF_RC_SUCCESS;
1262 /* Function to handle the parsing of RTE Flow action Mark Header. */
1264 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1265 struct ulp_rte_parser_params *param)
1267 const struct rte_flow_action_mark *mark;
1268 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1271 mark = action_item->conf;
1273 mark_id = tfp_cpu_to_be_32(mark->id);
1274 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1275 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1277 /* Update the hdr_bitmap with vxlan */
1278 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1279 return BNXT_TF_RC_SUCCESS;
1281 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1282 return BNXT_TF_RC_ERROR;
1285 /* Function to handle the parsing of RTE Flow action RSS Header. */
1287 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1288 struct ulp_rte_parser_params *param)
1290 const struct rte_flow_action_rss *rss = action_item->conf;
1293 /* Update the hdr_bitmap with vxlan */
1294 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1295 return BNXT_TF_RC_SUCCESS;
1297 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1298 return BNXT_TF_RC_ERROR;
1301 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1303 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1304 struct ulp_rte_parser_params *params)
1306 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1307 const struct rte_flow_item *item;
1308 const struct rte_flow_item_eth *eth_spec;
1309 const struct rte_flow_item_ipv4 *ipv4_spec;
1310 const struct rte_flow_item_ipv6 *ipv6_spec;
1311 struct rte_flow_item_vxlan vxlan_spec;
1312 uint32_t vlan_num = 0, vlan_size = 0;
1313 uint32_t ip_size = 0, ip_type = 0;
1314 uint32_t vxlan_size = 0;
1316 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1317 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1319 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1320 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1322 vxlan_encap = action_item->conf;
1324 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1325 return BNXT_TF_RC_ERROR;
1328 item = vxlan_encap->definition;
1330 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1331 return BNXT_TF_RC_ERROR;
1334 if (!ulp_rte_item_skip_void(&item, 0))
1335 return BNXT_TF_RC_ERROR;
1337 /* must have ethernet header */
1338 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1339 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1340 return BNXT_TF_RC_ERROR;
1342 eth_spec = item->spec;
1343 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1344 ulp_encap_buffer_copy(buff,
1345 eth_spec->dst.addr_bytes,
1346 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1348 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1349 ulp_encap_buffer_copy(buff,
1350 eth_spec->src.addr_bytes,
1351 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1353 /* Goto the next item */
1354 if (!ulp_rte_item_skip_void(&item, 1))
1355 return BNXT_TF_RC_ERROR;
1357 /* May have vlan header */
1358 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1360 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1361 ulp_encap_buffer_copy(buff,
1363 sizeof(struct rte_flow_item_vlan));
1365 if (!ulp_rte_item_skip_void(&item, 1))
1366 return BNXT_TF_RC_ERROR;
1369 /* may have two vlan headers */
1370 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1372 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1373 sizeof(struct rte_flow_item_vlan)],
1375 sizeof(struct rte_flow_item_vlan));
1376 if (!ulp_rte_item_skip_void(&item, 1))
1377 return BNXT_TF_RC_ERROR;
1379 /* Update the vlan count and size of more than one */
1381 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1382 vlan_num = tfp_cpu_to_be_32(vlan_num);
1383 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1386 vlan_size = tfp_cpu_to_be_32(vlan_size);
1387 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1392 /* L3 must be IPv4, IPv6 */
1393 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1394 ipv4_spec = item->spec;
1395 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1397 /* copy the ipv4 details */
1398 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1399 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1400 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1401 ulp_encap_buffer_copy(buff,
1403 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1404 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1406 const uint8_t *tmp_buff;
1408 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1409 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1410 ulp_encap_buffer_copy(buff,
1412 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1413 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1414 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1415 ulp_encap_buffer_copy(buff,
1416 &ipv4_spec->hdr.version_ihl,
1417 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1419 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1420 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1421 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1422 ulp_encap_buffer_copy(buff,
1423 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1424 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1426 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1427 ulp_encap_buffer_copy(buff,
1428 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1429 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1431 /* Update the ip size details */
1432 ip_size = tfp_cpu_to_be_32(ip_size);
1433 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1434 &ip_size, sizeof(uint32_t));
1436 /* update the ip type */
1437 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1438 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1439 &ip_type, sizeof(uint32_t));
1441 /* update the computed field to notify it is ipv4 header */
1442 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1445 if (!ulp_rte_item_skip_void(&item, 1))
1446 return BNXT_TF_RC_ERROR;
1447 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1448 ipv6_spec = item->spec;
1449 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1451 /* copy the ipv4 details */
1452 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1453 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1455 /* Update the ip size details */
1456 ip_size = tfp_cpu_to_be_32(ip_size);
1457 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1458 &ip_size, sizeof(uint32_t));
1460 /* update the ip type */
1461 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1462 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1463 &ip_type, sizeof(uint32_t));
1465 /* update the computed field to notify it is ipv6 header */
1466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1469 if (!ulp_rte_item_skip_void(&item, 1))
1470 return BNXT_TF_RC_ERROR;
1472 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1473 return BNXT_TF_RC_ERROR;
1477 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1478 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1479 return BNXT_TF_RC_ERROR;
1481 /* copy the udp details */
1482 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1483 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1485 if (!ulp_rte_item_skip_void(&item, 1))
1486 return BNXT_TF_RC_ERROR;
1489 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1490 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1491 return BNXT_TF_RC_ERROR;
1493 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1494 /* copy the vxlan details */
1495 memcpy(&vxlan_spec, item->spec, vxlan_size);
1496 vxlan_spec.flags = 0x08;
1497 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1498 (const uint8_t *)&vxlan_spec,
1500 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1501 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1502 &vxlan_size, sizeof(uint32_t));
1504 /* update the hdr_bitmap with vxlan */
1505 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1506 return BNXT_TF_RC_SUCCESS;
1509 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1511 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1513 struct ulp_rte_parser_params *params)
1515 /* update the hdr_bitmap with vxlan */
1516 ULP_BITMAP_SET(params->act_bitmap.bits,
1517 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1518 return BNXT_TF_RC_SUCCESS;
1521 /* Function to handle the parsing of RTE Flow action drop Header. */
1523 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1524 struct ulp_rte_parser_params *params)
1526 /* Update the hdr_bitmap with drop */
1527 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1528 return BNXT_TF_RC_SUCCESS;
1531 /* Function to handle the parsing of RTE Flow action count. */
1533 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1534 struct ulp_rte_parser_params *params)
1537 const struct rte_flow_action_count *act_count;
1538 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1540 act_count = action_item->conf;
1542 if (act_count->shared) {
1544 "Parse Error:Shared count not supported\n");
1545 return BNXT_TF_RC_PARSE_ERR;
1547 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1549 BNXT_ULP_ACT_PROP_SZ_COUNT);
1552 /* Update the hdr_bitmap with count */
1553 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1554 return BNXT_TF_RC_SUCCESS;
1557 /* Function to handle the parsing of action ports. */
1559 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1562 enum bnxt_ulp_direction_type dir;
1565 struct ulp_rte_act_prop *act = ¶m->act_prop;
1566 enum bnxt_ulp_intf_type port_type;
1569 /* Get the direction */
1570 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1571 if (dir == BNXT_ULP_DIR_EGRESS) {
1572 /* For egress direction, fill vport */
1573 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1574 return BNXT_TF_RC_ERROR;
1577 pid = rte_cpu_to_be_32(pid);
1578 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1579 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1581 /* For ingress direction, fill vnic */
1582 port_type = ULP_COMP_FLD_IDX_RD(param,
1583 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1584 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1585 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1587 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1589 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1591 return BNXT_TF_RC_ERROR;
1594 pid = rte_cpu_to_be_32(pid);
1595 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1596 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1599 /* Update the action port set bit */
1600 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1601 return BNXT_TF_RC_SUCCESS;
1604 /* Function to handle the parsing of RTE Flow action PF. */
1606 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1607 struct ulp_rte_parser_params *params)
1611 enum bnxt_ulp_intf_type intf_type;
1613 /* Get the port id of the current device */
1614 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1616 /* Get the port db ifindex */
1617 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1619 BNXT_TF_DBG(ERR, "Invalid port id\n");
1620 return BNXT_TF_RC_ERROR;
1623 /* Check the port is PF port */
1624 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1625 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1626 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1627 return BNXT_TF_RC_ERROR;
1629 /* Update the action properties */
1630 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1631 return ulp_rte_parser_act_port_set(params, ifindex);
1634 /* Function to handle the parsing of RTE Flow action VF. */
1636 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1637 struct ulp_rte_parser_params *params)
1639 const struct rte_flow_action_vf *vf_action;
1641 enum bnxt_ulp_intf_type intf_type;
1643 vf_action = action_item->conf;
1645 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1646 return BNXT_TF_RC_PARSE_ERR;
1649 if (vf_action->original) {
1650 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1651 return BNXT_TF_RC_PARSE_ERR;
1654 /* Check the port is VF port */
1655 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1657 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1658 return BNXT_TF_RC_ERROR;
1660 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1661 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1662 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1663 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1664 return BNXT_TF_RC_ERROR;
1667 /* Update the action properties */
1668 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1669 return ulp_rte_parser_act_port_set(params, ifindex);
1672 /* Function to handle the parsing of RTE Flow action port_id. */
1674 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1675 struct ulp_rte_parser_params *param)
1677 const struct rte_flow_action_port_id *port_id = act_item->conf;
1679 enum bnxt_ulp_intf_type intf_type;
1683 "ParseErr: Invalid Argument\n");
1684 return BNXT_TF_RC_PARSE_ERR;
1686 if (port_id->original) {
1688 "ParseErr:Portid Original not supported\n");
1689 return BNXT_TF_RC_PARSE_ERR;
1692 /* Get the port db ifindex */
1693 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1695 BNXT_TF_DBG(ERR, "Invalid port id\n");
1696 return BNXT_TF_RC_ERROR;
1699 /* Get the intf type */
1700 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1702 BNXT_TF_DBG(ERR, "Invalid port type\n");
1703 return BNXT_TF_RC_ERROR;
1706 /* Set the action port */
1707 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1708 return ulp_rte_parser_act_port_set(param, ifindex);
1711 /* Function to handle the parsing of RTE Flow action phy_port. */
1713 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1714 struct ulp_rte_parser_params *prm)
1716 const struct rte_flow_action_phy_port *phy_port;
1720 enum bnxt_ulp_direction_type dir;
1722 phy_port = action_item->conf;
1725 "ParseErr: Invalid Argument\n");
1726 return BNXT_TF_RC_PARSE_ERR;
1729 if (phy_port->original) {
1731 "Parse Err:Port Original not supported\n");
1732 return BNXT_TF_RC_PARSE_ERR;
1734 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1735 if (dir != BNXT_ULP_DIR_EGRESS) {
1737 "Parse Err:Phy ports are valid only for egress\n");
1738 return BNXT_TF_RC_PARSE_ERR;
1740 /* Get the physical port details from port db */
1741 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1744 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1749 pid = rte_cpu_to_be_32(pid);
1750 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1751 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1753 /* Update the action port set bit */
1754 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1755 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1756 BNXT_ULP_INTF_TYPE_PHY_PORT);
1757 return BNXT_TF_RC_SUCCESS;
1760 /* Function to handle the parsing of RTE Flow action pop vlan. */
1762 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1763 struct ulp_rte_parser_params *params)
1765 /* Update the act_bitmap with pop */
1766 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1767 return BNXT_TF_RC_SUCCESS;
1770 /* Function to handle the parsing of RTE Flow action push vlan. */
1772 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1773 struct ulp_rte_parser_params *params)
1775 const struct rte_flow_action_of_push_vlan *push_vlan;
1777 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1779 push_vlan = action_item->conf;
1781 ethertype = push_vlan->ethertype;
1782 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1784 "Parse Err: Ethertype not supported\n");
1785 return BNXT_TF_RC_PARSE_ERR;
1787 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1788 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1789 /* Update the hdr_bitmap with push vlan */
1790 ULP_BITMAP_SET(params->act_bitmap.bits,
1791 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1792 return BNXT_TF_RC_SUCCESS;
1794 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1795 return BNXT_TF_RC_ERROR;
1798 /* Function to handle the parsing of RTE Flow action set vlan id. */
1800 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1801 struct ulp_rte_parser_params *params)
1803 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1805 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1807 vlan_vid = action_item->conf;
1808 if (vlan_vid && vlan_vid->vlan_vid) {
1809 vid = vlan_vid->vlan_vid;
1810 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1811 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1812 /* Update the hdr_bitmap with vlan vid */
1813 ULP_BITMAP_SET(params->act_bitmap.bits,
1814 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1815 return BNXT_TF_RC_SUCCESS;
1817 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1818 return BNXT_TF_RC_ERROR;
1821 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1823 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1824 struct ulp_rte_parser_params *params)
1826 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1828 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1830 vlan_pcp = action_item->conf;
1832 pcp = vlan_pcp->vlan_pcp;
1833 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1834 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1835 /* Update the hdr_bitmap with vlan vid */
1836 ULP_BITMAP_SET(params->act_bitmap.bits,
1837 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1838 return BNXT_TF_RC_SUCCESS;
1840 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1841 return BNXT_TF_RC_ERROR;
1844 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1846 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1847 struct ulp_rte_parser_params *params)
1849 const struct rte_flow_action_set_ipv4 *set_ipv4;
1850 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1852 set_ipv4 = action_item->conf;
1854 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1855 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1856 /* Update the hdr_bitmap with set ipv4 src */
1857 ULP_BITMAP_SET(params->act_bitmap.bits,
1858 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1859 return BNXT_TF_RC_SUCCESS;
1861 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1862 return BNXT_TF_RC_ERROR;
1865 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1867 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1868 struct ulp_rte_parser_params *params)
1870 const struct rte_flow_action_set_ipv4 *set_ipv4;
1871 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1873 set_ipv4 = action_item->conf;
1875 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1876 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1877 /* Update the hdr_bitmap with set ipv4 dst */
1878 ULP_BITMAP_SET(params->act_bitmap.bits,
1879 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1880 return BNXT_TF_RC_SUCCESS;
1882 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1883 return BNXT_TF_RC_ERROR;
1886 /* Function to handle the parsing of RTE Flow action set tp src.*/
1888 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1889 struct ulp_rte_parser_params *params)
1891 const struct rte_flow_action_set_tp *set_tp;
1892 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1894 set_tp = action_item->conf;
1896 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1897 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1898 /* Update the hdr_bitmap with set tp src */
1899 ULP_BITMAP_SET(params->act_bitmap.bits,
1900 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1901 return BNXT_TF_RC_SUCCESS;
1904 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1905 return BNXT_TF_RC_ERROR;
1908 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1910 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1911 struct ulp_rte_parser_params *params)
1913 const struct rte_flow_action_set_tp *set_tp;
1914 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1916 set_tp = action_item->conf;
1918 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1919 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1920 /* Update the hdr_bitmap with set tp dst */
1921 ULP_BITMAP_SET(params->act_bitmap.bits,
1922 BNXT_ULP_ACTION_BIT_SET_TP_DST);
1923 return BNXT_TF_RC_SUCCESS;
1926 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1927 return BNXT_TF_RC_ERROR;
1930 /* Function to handle the parsing of RTE Flow action dec ttl.*/
1932 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
1933 struct ulp_rte_parser_params *params)
1935 /* Update the act_bitmap with dec ttl */
1936 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
1937 return BNXT_TF_RC_SUCCESS;