1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
172 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
173 BNXT_ULP_INVALID_SVIF_VAL) {
175 "SVIF already set,multiple source not support'd\n");
176 return BNXT_TF_RC_ERROR;
179 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
180 dir = ULP_COMP_FLD_IDX_RD(params,
181 BNXT_ULP_CF_IDX_DIRECTION);
182 /* perform the conversion from dpdk port to bnxt svif */
183 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
187 "Invalid port id\n");
188 return BNXT_TF_RC_ERROR;
190 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
191 svif = rte_cpu_to_be_16(svif);
193 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
194 memcpy(hdr_field->spec, &svif, sizeof(svif));
195 memcpy(hdr_field->mask, &mask, sizeof(mask));
196 hdr_field->size = sizeof(svif);
197 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
198 rte_be_to_cpu_16(svif));
199 return BNXT_TF_RC_SUCCESS;
202 /* Function to handle the parsing of the RTE port id */
204 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
206 uint16_t port_id = 0;
207 uint16_t svif_mask = 0xFFFF;
209 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
210 BNXT_ULP_INVALID_SVIF_VAL)
211 return BNXT_TF_RC_SUCCESS;
213 /* SVIF not set. So get the port id */
214 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
216 /* Update the SVIF details */
217 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
221 /* Function to handle the implicit VNIC RTE port id */
223 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
225 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
227 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
228 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
229 return BNXT_TF_RC_SUCCESS;
231 /* Update the vnic details */
232 ulp_rte_pf_act_handler(NULL, params);
233 return BNXT_TF_RC_SUCCESS;
236 /* Function to handle the parsing of RTE Flow item PF Header. */
238 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
239 struct ulp_rte_parser_params *params)
241 uint16_t port_id = 0;
242 uint16_t svif_mask = 0xFFFF;
244 /* Get the port id */
245 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
247 /* Update the SVIF details */
248 return ulp_rte_parser_svif_set(params,
253 /* Function to handle the parsing of RTE Flow item VF Header. */
255 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
256 struct ulp_rte_parser_params *params)
258 const struct rte_flow_item_vf *vf_spec = item->spec;
259 const struct rte_flow_item_vf *vf_mask = item->mask;
260 uint16_t svif = 0, mask = 0;
262 /* Get VF rte_flow_item for Port details */
264 svif = (uint16_t)vf_spec->id;
266 mask = (uint16_t)vf_mask->id;
268 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
271 /* Function to handle the parsing of RTE Flow item port id Header. */
273 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
274 struct ulp_rte_parser_params *params)
276 const struct rte_flow_item_port_id *port_spec = item->spec;
277 const struct rte_flow_item_port_id *port_mask = item->mask;
278 uint16_t svif = 0, mask = 0;
281 * Copy the rte_flow_item for Port into hdr_field using port id
285 svif = (uint16_t)port_spec->id;
287 mask = (uint16_t)port_mask->id;
289 /* Update the SVIF details */
290 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
293 /* Function to handle the parsing of RTE Flow item phy port Header. */
295 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
296 struct ulp_rte_parser_params *params)
298 const struct rte_flow_item_phy_port *port_spec = item->spec;
299 const struct rte_flow_item_phy_port *port_mask = item->mask;
300 uint32_t svif = 0, mask = 0;
302 /* Copy the rte_flow_item for phy port into hdr_field */
304 svif = port_spec->index;
306 mask = port_mask->index;
308 /* Update the SVIF details */
309 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
312 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
314 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
315 struct ulp_rte_parser_params *params)
317 const struct rte_flow_item_eth *eth_spec = item->spec;
318 const struct rte_flow_item_eth *eth_mask = item->mask;
319 struct ulp_rte_hdr_field *field;
320 uint32_t idx = params->field_idx;
321 uint64_t set_flag = 0;
325 * Copy the rte_flow_item for eth into hdr_field using ethernet
329 size = sizeof(eth_spec->dst.addr_bytes);
330 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
331 eth_spec->dst.addr_bytes,
333 size = sizeof(eth_spec->src.addr_bytes);
334 field = ulp_rte_parser_fld_copy(field,
335 eth_spec->src.addr_bytes,
337 field = ulp_rte_parser_fld_copy(field,
339 sizeof(eth_spec->type));
342 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
343 sizeof(eth_mask->dst.addr_bytes));
344 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
345 sizeof(eth_mask->src.addr_bytes));
346 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
347 sizeof(eth_mask->type));
349 /* Add number of vlan header elements */
350 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
351 params->vlan_idx = params->field_idx;
352 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
354 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
355 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
356 BNXT_ULP_HDR_BIT_O_ETH);
358 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
360 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
361 BNXT_ULP_HDR_BIT_I_ETH);
363 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
364 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
366 return BNXT_TF_RC_SUCCESS;
369 /* Function to handle the parsing of RTE Flow item Vlan Header. */
371 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
372 struct ulp_rte_parser_params *params)
374 const struct rte_flow_item_vlan *vlan_spec = item->spec;
375 const struct rte_flow_item_vlan *vlan_mask = item->mask;
376 struct ulp_rte_hdr_field *field;
377 struct ulp_rte_hdr_bitmap *hdr_bit;
378 uint32_t idx = params->vlan_idx;
379 uint16_t vlan_tag, priority;
380 uint32_t outer_vtag_num;
381 uint32_t inner_vtag_num;
384 * Copy the rte_flow_item for vlan into hdr_field using Vlan
388 vlan_tag = ntohs(vlan_spec->tci);
389 priority = htons(vlan_tag >> 13);
391 vlan_tag = htons(vlan_tag);
393 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
396 field = ulp_rte_parser_fld_copy(field,
399 field = ulp_rte_parser_fld_copy(field,
400 &vlan_spec->inner_type,
401 sizeof(vlan_spec->inner_type));
405 vlan_tag = ntohs(vlan_mask->tci);
406 priority = htons(vlan_tag >> 13);
408 vlan_tag = htons(vlan_tag);
410 field = ¶ms->hdr_field[idx];
411 memcpy(field->mask, &priority, field->size);
413 memcpy(field->mask, &vlan_tag, field->size);
415 memcpy(field->mask, &vlan_mask->inner_type, field->size);
417 /* Set the vlan index to new incremented value */
418 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
420 /* Get the outer tag and inner tag counts */
421 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
422 BNXT_ULP_CF_IDX_O_VTAG_NUM);
423 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
424 BNXT_ULP_CF_IDX_I_VTAG_NUM);
426 /* Update the hdr_bitmap of the vlans */
427 hdr_bit = ¶ms->hdr_bitmap;
428 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
430 /* Update the vlan tag num */
432 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
435 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
436 ULP_COMP_FLD_IDX_RD(params,
437 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
438 outer_vtag_num == 1) {
439 /* update the vlan tag num */
441 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
443 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
444 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
445 ULP_COMP_FLD_IDX_RD(params,
446 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
447 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
449 /* update the vlan tag num */
451 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
454 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
455 ULP_COMP_FLD_IDX_RD(params,
456 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
457 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
458 ULP_COMP_FLD_IDX_RD(params,
459 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
460 inner_vtag_num == 1) {
461 /* update the vlan tag num */
463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
465 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
467 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
468 return BNXT_TF_RC_ERROR;
470 return BNXT_TF_RC_SUCCESS;
473 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
475 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
476 struct ulp_rte_parser_params *params)
478 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
479 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
480 struct ulp_rte_hdr_field *field;
481 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
482 uint32_t idx = params->field_idx;
484 uint32_t inner_l3, outer_l3;
486 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
488 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
489 return BNXT_TF_RC_ERROR;
493 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
497 size = sizeof(ipv4_spec->hdr.version_ihl);
498 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
499 &ipv4_spec->hdr.version_ihl,
501 size = sizeof(ipv4_spec->hdr.type_of_service);
502 field = ulp_rte_parser_fld_copy(field,
503 &ipv4_spec->hdr.type_of_service,
505 size = sizeof(ipv4_spec->hdr.total_length);
506 field = ulp_rte_parser_fld_copy(field,
507 &ipv4_spec->hdr.total_length,
509 size = sizeof(ipv4_spec->hdr.packet_id);
510 field = ulp_rte_parser_fld_copy(field,
511 &ipv4_spec->hdr.packet_id,
513 size = sizeof(ipv4_spec->hdr.fragment_offset);
514 field = ulp_rte_parser_fld_copy(field,
515 &ipv4_spec->hdr.fragment_offset,
517 size = sizeof(ipv4_spec->hdr.time_to_live);
518 field = ulp_rte_parser_fld_copy(field,
519 &ipv4_spec->hdr.time_to_live,
521 size = sizeof(ipv4_spec->hdr.next_proto_id);
522 field = ulp_rte_parser_fld_copy(field,
523 &ipv4_spec->hdr.next_proto_id,
525 size = sizeof(ipv4_spec->hdr.hdr_checksum);
526 field = ulp_rte_parser_fld_copy(field,
527 &ipv4_spec->hdr.hdr_checksum,
529 size = sizeof(ipv4_spec->hdr.src_addr);
530 field = ulp_rte_parser_fld_copy(field,
531 &ipv4_spec->hdr.src_addr,
533 size = sizeof(ipv4_spec->hdr.dst_addr);
534 field = ulp_rte_parser_fld_copy(field,
535 &ipv4_spec->hdr.dst_addr,
539 ulp_rte_prsr_mask_copy(params, &idx,
540 &ipv4_mask->hdr.version_ihl,
541 sizeof(ipv4_mask->hdr.version_ihl));
542 ulp_rte_prsr_mask_copy(params, &idx,
543 &ipv4_mask->hdr.type_of_service,
544 sizeof(ipv4_mask->hdr.type_of_service));
545 ulp_rte_prsr_mask_copy(params, &idx,
546 &ipv4_mask->hdr.total_length,
547 sizeof(ipv4_mask->hdr.total_length));
548 ulp_rte_prsr_mask_copy(params, &idx,
549 &ipv4_mask->hdr.packet_id,
550 sizeof(ipv4_mask->hdr.packet_id));
551 ulp_rte_prsr_mask_copy(params, &idx,
552 &ipv4_mask->hdr.fragment_offset,
553 sizeof(ipv4_mask->hdr.fragment_offset));
554 ulp_rte_prsr_mask_copy(params, &idx,
555 &ipv4_mask->hdr.time_to_live,
556 sizeof(ipv4_mask->hdr.time_to_live));
557 ulp_rte_prsr_mask_copy(params, &idx,
558 &ipv4_mask->hdr.next_proto_id,
559 sizeof(ipv4_mask->hdr.next_proto_id));
560 ulp_rte_prsr_mask_copy(params, &idx,
561 &ipv4_mask->hdr.hdr_checksum,
562 sizeof(ipv4_mask->hdr.hdr_checksum));
563 ulp_rte_prsr_mask_copy(params, &idx,
564 &ipv4_mask->hdr.src_addr,
565 sizeof(ipv4_mask->hdr.src_addr));
566 ulp_rte_prsr_mask_copy(params, &idx,
567 &ipv4_mask->hdr.dst_addr,
568 sizeof(ipv4_mask->hdr.dst_addr));
570 /* Add the number of ipv4 header elements */
571 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
573 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
574 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
576 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
577 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
578 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
580 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
582 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
584 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
586 return BNXT_TF_RC_SUCCESS;
589 /* Function to handle the parsing of RTE Flow item IPV6 Header */
591 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
592 struct ulp_rte_parser_params *params)
594 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
595 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
596 struct ulp_rte_hdr_field *field;
597 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
598 uint32_t idx = params->field_idx;
600 uint32_t inner_l3, outer_l3;
601 uint32_t vtcf, vtcf_mask;
603 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
605 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
606 return BNXT_TF_RC_ERROR;
610 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
614 size = sizeof(ipv6_spec->hdr.vtc_flow);
616 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
617 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
621 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
622 field = ulp_rte_parser_fld_copy(field,
626 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
627 field = ulp_rte_parser_fld_copy(field,
631 size = sizeof(ipv6_spec->hdr.payload_len);
632 field = ulp_rte_parser_fld_copy(field,
633 &ipv6_spec->hdr.payload_len,
635 size = sizeof(ipv6_spec->hdr.proto);
636 field = ulp_rte_parser_fld_copy(field,
637 &ipv6_spec->hdr.proto,
639 size = sizeof(ipv6_spec->hdr.hop_limits);
640 field = ulp_rte_parser_fld_copy(field,
641 &ipv6_spec->hdr.hop_limits,
643 size = sizeof(ipv6_spec->hdr.src_addr);
644 field = ulp_rte_parser_fld_copy(field,
645 &ipv6_spec->hdr.src_addr,
647 size = sizeof(ipv6_spec->hdr.dst_addr);
648 field = ulp_rte_parser_fld_copy(field,
649 &ipv6_spec->hdr.dst_addr,
653 size = sizeof(ipv6_mask->hdr.vtc_flow);
655 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
656 ulp_rte_prsr_mask_copy(params, &idx,
660 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
661 ulp_rte_prsr_mask_copy(params, &idx,
666 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
667 ulp_rte_prsr_mask_copy(params, &idx,
671 ulp_rte_prsr_mask_copy(params, &idx,
672 &ipv6_mask->hdr.payload_len,
673 sizeof(ipv6_mask->hdr.payload_len));
674 ulp_rte_prsr_mask_copy(params, &idx,
675 &ipv6_mask->hdr.proto,
676 sizeof(ipv6_mask->hdr.proto));
677 ulp_rte_prsr_mask_copy(params, &idx,
678 &ipv6_mask->hdr.hop_limits,
679 sizeof(ipv6_mask->hdr.hop_limits));
680 ulp_rte_prsr_mask_copy(params, &idx,
681 &ipv6_mask->hdr.src_addr,
682 sizeof(ipv6_mask->hdr.src_addr));
683 ulp_rte_prsr_mask_copy(params, &idx,
684 &ipv6_mask->hdr.dst_addr,
685 sizeof(ipv6_mask->hdr.dst_addr));
687 /* add number of ipv6 header elements */
688 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
690 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
691 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
693 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
694 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
695 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
696 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
698 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
699 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
701 return BNXT_TF_RC_SUCCESS;
704 /* Function to handle the parsing of RTE Flow item UDP Header. */
706 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
707 struct ulp_rte_parser_params *params)
709 const struct rte_flow_item_udp *udp_spec = item->spec;
710 const struct rte_flow_item_udp *udp_mask = item->mask;
711 struct ulp_rte_hdr_field *field;
712 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
713 uint32_t idx = params->field_idx;
715 uint32_t inner_l4, outer_l4;
717 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
719 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
720 return BNXT_TF_RC_ERROR;
724 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
728 size = sizeof(udp_spec->hdr.src_port);
729 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
730 &udp_spec->hdr.src_port,
732 size = sizeof(udp_spec->hdr.dst_port);
733 field = ulp_rte_parser_fld_copy(field,
734 &udp_spec->hdr.dst_port,
736 size = sizeof(udp_spec->hdr.dgram_len);
737 field = ulp_rte_parser_fld_copy(field,
738 &udp_spec->hdr.dgram_len,
740 size = sizeof(udp_spec->hdr.dgram_cksum);
741 field = ulp_rte_parser_fld_copy(field,
742 &udp_spec->hdr.dgram_cksum,
746 ulp_rte_prsr_mask_copy(params, &idx,
747 &udp_mask->hdr.src_port,
748 sizeof(udp_mask->hdr.src_port));
749 ulp_rte_prsr_mask_copy(params, &idx,
750 &udp_mask->hdr.dst_port,
751 sizeof(udp_mask->hdr.dst_port));
752 ulp_rte_prsr_mask_copy(params, &idx,
753 &udp_mask->hdr.dgram_len,
754 sizeof(udp_mask->hdr.dgram_len));
755 ulp_rte_prsr_mask_copy(params, &idx,
756 &udp_mask->hdr.dgram_cksum,
757 sizeof(udp_mask->hdr.dgram_cksum));
760 /* Add number of UDP header elements */
761 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
763 /* Set the udp header bitmap and computed l4 header bitmaps */
764 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
766 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
767 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
768 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
769 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
771 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
772 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
774 return BNXT_TF_RC_SUCCESS;
777 /* Function to handle the parsing of RTE Flow item TCP Header. */
779 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
780 struct ulp_rte_parser_params *params)
782 const struct rte_flow_item_tcp *tcp_spec = item->spec;
783 const struct rte_flow_item_tcp *tcp_mask = item->mask;
784 struct ulp_rte_hdr_field *field;
785 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
786 uint32_t idx = params->field_idx;
788 uint32_t inner_l4, outer_l4;
790 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
792 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
793 return BNXT_TF_RC_ERROR;
797 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
801 size = sizeof(tcp_spec->hdr.src_port);
802 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
803 &tcp_spec->hdr.src_port,
805 size = sizeof(tcp_spec->hdr.dst_port);
806 field = ulp_rte_parser_fld_copy(field,
807 &tcp_spec->hdr.dst_port,
809 size = sizeof(tcp_spec->hdr.sent_seq);
810 field = ulp_rte_parser_fld_copy(field,
811 &tcp_spec->hdr.sent_seq,
813 size = sizeof(tcp_spec->hdr.recv_ack);
814 field = ulp_rte_parser_fld_copy(field,
815 &tcp_spec->hdr.recv_ack,
817 size = sizeof(tcp_spec->hdr.data_off);
818 field = ulp_rte_parser_fld_copy(field,
819 &tcp_spec->hdr.data_off,
821 size = sizeof(tcp_spec->hdr.tcp_flags);
822 field = ulp_rte_parser_fld_copy(field,
823 &tcp_spec->hdr.tcp_flags,
825 size = sizeof(tcp_spec->hdr.rx_win);
826 field = ulp_rte_parser_fld_copy(field,
827 &tcp_spec->hdr.rx_win,
829 size = sizeof(tcp_spec->hdr.cksum);
830 field = ulp_rte_parser_fld_copy(field,
831 &tcp_spec->hdr.cksum,
833 size = sizeof(tcp_spec->hdr.tcp_urp);
834 field = ulp_rte_parser_fld_copy(field,
835 &tcp_spec->hdr.tcp_urp,
838 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
842 ulp_rte_prsr_mask_copy(params, &idx,
843 &tcp_mask->hdr.src_port,
844 sizeof(tcp_mask->hdr.src_port));
845 ulp_rte_prsr_mask_copy(params, &idx,
846 &tcp_mask->hdr.dst_port,
847 sizeof(tcp_mask->hdr.dst_port));
848 ulp_rte_prsr_mask_copy(params, &idx,
849 &tcp_mask->hdr.sent_seq,
850 sizeof(tcp_mask->hdr.sent_seq));
851 ulp_rte_prsr_mask_copy(params, &idx,
852 &tcp_mask->hdr.recv_ack,
853 sizeof(tcp_mask->hdr.recv_ack));
854 ulp_rte_prsr_mask_copy(params, &idx,
855 &tcp_mask->hdr.data_off,
856 sizeof(tcp_mask->hdr.data_off));
857 ulp_rte_prsr_mask_copy(params, &idx,
858 &tcp_mask->hdr.tcp_flags,
859 sizeof(tcp_mask->hdr.tcp_flags));
860 ulp_rte_prsr_mask_copy(params, &idx,
861 &tcp_mask->hdr.rx_win,
862 sizeof(tcp_mask->hdr.rx_win));
863 ulp_rte_prsr_mask_copy(params, &idx,
864 &tcp_mask->hdr.cksum,
865 sizeof(tcp_mask->hdr.cksum));
866 ulp_rte_prsr_mask_copy(params, &idx,
867 &tcp_mask->hdr.tcp_urp,
868 sizeof(tcp_mask->hdr.tcp_urp));
870 /* add number of TCP header elements */
871 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
873 /* Set the udp header bitmap and computed l4 header bitmaps */
874 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
876 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
877 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
878 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
879 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
881 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
882 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
884 return BNXT_TF_RC_SUCCESS;
887 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
889 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
890 struct ulp_rte_parser_params *params)
892 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
893 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
894 struct ulp_rte_hdr_field *field;
895 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
896 uint32_t idx = params->field_idx;
900 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
904 size = sizeof(vxlan_spec->flags);
905 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
908 size = sizeof(vxlan_spec->rsvd0);
909 field = ulp_rte_parser_fld_copy(field,
912 size = sizeof(vxlan_spec->vni);
913 field = ulp_rte_parser_fld_copy(field,
916 size = sizeof(vxlan_spec->rsvd1);
917 field = ulp_rte_parser_fld_copy(field,
922 ulp_rte_prsr_mask_copy(params, &idx,
924 sizeof(vxlan_mask->flags));
925 ulp_rte_prsr_mask_copy(params, &idx,
927 sizeof(vxlan_mask->rsvd0));
928 ulp_rte_prsr_mask_copy(params, &idx,
930 sizeof(vxlan_mask->vni));
931 ulp_rte_prsr_mask_copy(params, &idx,
933 sizeof(vxlan_mask->rsvd1));
935 /* Add number of vxlan header elements */
936 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
938 /* Update the hdr_bitmap with vxlan */
939 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
940 return BNXT_TF_RC_SUCCESS;
943 /* Function to handle the parsing of RTE Flow item void Header */
945 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
946 struct ulp_rte_parser_params *params __rte_unused)
948 return BNXT_TF_RC_SUCCESS;
951 /* Function to handle the parsing of RTE Flow action void Header. */
953 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
954 struct ulp_rte_parser_params *params __rte_unused)
956 return BNXT_TF_RC_SUCCESS;
959 /* Function to handle the parsing of RTE Flow action Mark Header. */
961 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
962 struct ulp_rte_parser_params *param)
964 const struct rte_flow_action_mark *mark;
965 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
968 mark = action_item->conf;
970 mark_id = tfp_cpu_to_be_32(mark->id);
971 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
972 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
974 /* Update the hdr_bitmap with vxlan */
975 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
976 return BNXT_TF_RC_SUCCESS;
978 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
979 return BNXT_TF_RC_ERROR;
982 /* Function to handle the parsing of RTE Flow action RSS Header. */
984 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
985 struct ulp_rte_parser_params *param)
987 const struct rte_flow_action_rss *rss = action_item->conf;
990 /* Update the hdr_bitmap with vxlan */
991 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
992 return BNXT_TF_RC_SUCCESS;
994 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
995 return BNXT_TF_RC_ERROR;
998 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1000 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1001 struct ulp_rte_parser_params *params)
1003 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1004 const struct rte_flow_item *item;
1005 const struct rte_flow_item_eth *eth_spec;
1006 const struct rte_flow_item_ipv4 *ipv4_spec;
1007 const struct rte_flow_item_ipv6 *ipv6_spec;
1008 struct rte_flow_item_vxlan vxlan_spec;
1009 uint32_t vlan_num = 0, vlan_size = 0;
1010 uint32_t ip_size = 0, ip_type = 0;
1011 uint32_t vxlan_size = 0;
1013 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1014 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1016 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1017 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1019 vxlan_encap = action_item->conf;
1021 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1022 return BNXT_TF_RC_ERROR;
1025 item = vxlan_encap->definition;
1027 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1028 return BNXT_TF_RC_ERROR;
1031 if (!ulp_rte_item_skip_void(&item, 0))
1032 return BNXT_TF_RC_ERROR;
1034 /* must have ethernet header */
1035 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1036 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1037 return BNXT_TF_RC_ERROR;
1039 eth_spec = item->spec;
1040 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1041 ulp_encap_buffer_copy(buff,
1042 eth_spec->dst.addr_bytes,
1043 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1045 /* Goto the next item */
1046 if (!ulp_rte_item_skip_void(&item, 1))
1047 return BNXT_TF_RC_ERROR;
1049 /* May have vlan header */
1050 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1052 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1053 ulp_encap_buffer_copy(buff,
1055 sizeof(struct rte_flow_item_vlan));
1057 if (!ulp_rte_item_skip_void(&item, 1))
1058 return BNXT_TF_RC_ERROR;
1061 /* may have two vlan headers */
1062 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1064 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1065 sizeof(struct rte_flow_item_vlan)],
1067 sizeof(struct rte_flow_item_vlan));
1068 if (!ulp_rte_item_skip_void(&item, 1))
1069 return BNXT_TF_RC_ERROR;
1071 /* Update the vlan count and size of more than one */
1073 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1074 vlan_num = tfp_cpu_to_be_32(vlan_num);
1075 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1078 vlan_size = tfp_cpu_to_be_32(vlan_size);
1079 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1084 /* L3 must be IPv4, IPv6 */
1085 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1086 ipv4_spec = item->spec;
1087 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1089 /* copy the ipv4 details */
1090 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1091 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1092 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1093 ulp_encap_buffer_copy(buff,
1095 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1096 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1098 const uint8_t *tmp_buff;
1100 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1101 ulp_encap_buffer_copy(buff,
1102 &ipv4_spec->hdr.version_ihl,
1103 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1104 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1105 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1106 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1107 ulp_encap_buffer_copy(buff,
1109 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1111 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1112 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1113 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1114 ulp_encap_buffer_copy(buff,
1115 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1116 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1118 /* Update the ip size details */
1119 ip_size = tfp_cpu_to_be_32(ip_size);
1120 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1121 &ip_size, sizeof(uint32_t));
1123 /* update the ip type */
1124 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1125 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1126 &ip_type, sizeof(uint32_t));
1128 if (!ulp_rte_item_skip_void(&item, 1))
1129 return BNXT_TF_RC_ERROR;
1130 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1131 ipv6_spec = item->spec;
1132 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1134 /* copy the ipv4 details */
1135 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1136 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1138 /* Update the ip size details */
1139 ip_size = tfp_cpu_to_be_32(ip_size);
1140 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1141 &ip_size, sizeof(uint32_t));
1143 /* update the ip type */
1144 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1145 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1146 &ip_type, sizeof(uint32_t));
1148 if (!ulp_rte_item_skip_void(&item, 1))
1149 return BNXT_TF_RC_ERROR;
1151 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1152 return BNXT_TF_RC_ERROR;
1156 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1157 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1158 return BNXT_TF_RC_ERROR;
1160 /* copy the udp details */
1161 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1162 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1164 if (!ulp_rte_item_skip_void(&item, 1))
1165 return BNXT_TF_RC_ERROR;
1168 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1169 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1170 return BNXT_TF_RC_ERROR;
1172 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1173 /* copy the vxlan details */
1174 memcpy(&vxlan_spec, item->spec, vxlan_size);
1175 vxlan_spec.flags = 0x08;
1176 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1177 (const uint8_t *)&vxlan_spec,
1179 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1180 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1181 &vxlan_size, sizeof(uint32_t));
1183 /*update the hdr_bitmap with vxlan */
1184 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1185 return BNXT_TF_RC_SUCCESS;
1188 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1190 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1192 struct ulp_rte_parser_params *params)
1194 /* update the hdr_bitmap with vxlan */
1195 ULP_BITMAP_SET(params->act_bitmap.bits,
1196 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1197 return BNXT_TF_RC_SUCCESS;
1200 /* Function to handle the parsing of RTE Flow action drop Header. */
1202 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1203 struct ulp_rte_parser_params *params)
1205 /* Update the hdr_bitmap with drop */
1206 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1207 return BNXT_TF_RC_SUCCESS;
1210 /* Function to handle the parsing of RTE Flow action count. */
1212 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1213 struct ulp_rte_parser_params *params)
1216 const struct rte_flow_action_count *act_count;
1217 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1219 act_count = action_item->conf;
1221 if (act_count->shared) {
1223 "Parse Error:Shared count not supported\n");
1224 return BNXT_TF_RC_PARSE_ERR;
1226 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1228 BNXT_ULP_ACT_PROP_SZ_COUNT);
1231 /* Update the hdr_bitmap with count */
1232 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1233 return BNXT_TF_RC_SUCCESS;
1236 /* Function to handle the parsing of RTE Flow action PF. */
1238 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1239 struct ulp_rte_parser_params *params)
1243 /* Update the hdr_bitmap with vnic bit */
1244 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1246 /* copy the PF of the current device into VNIC Property */
1247 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1248 svif = bnxt_get_vnic_id(svif);
1249 svif = rte_cpu_to_be_32(svif);
1250 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1251 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1253 return BNXT_TF_RC_SUCCESS;
1256 /* Function to handle the parsing of RTE Flow action VF. */
1258 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1259 struct ulp_rte_parser_params *param)
1261 const struct rte_flow_action_vf *vf_action;
1264 vf_action = action_item->conf;
1266 if (vf_action->original) {
1268 "Parse Error:VF Original not supported\n");
1269 return BNXT_TF_RC_PARSE_ERR;
1271 /* TBD: Update the computed VNIC using VF conversion */
1272 pid = bnxt_get_vnic_id(vf_action->id);
1273 pid = rte_cpu_to_be_32(pid);
1274 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1275 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1278 /* Update the hdr_bitmap with count */
1279 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1280 return BNXT_TF_RC_SUCCESS;
1283 /* Function to handle the parsing of RTE Flow action port_id. */
1285 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1286 struct ulp_rte_parser_params *param)
1288 const struct rte_flow_action_port_id *port_id;
1291 port_id = act_item->conf;
1293 if (port_id->original) {
1295 "ParseErr:Portid Original not supported\n");
1296 return BNXT_TF_RC_PARSE_ERR;
1298 /* TBD: Update the computed VNIC using port conversion */
1299 pid = bnxt_get_vnic_id(port_id->id);
1300 pid = rte_cpu_to_be_32(pid);
1301 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1302 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1305 /* Update the hdr_bitmap with count */
1306 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1307 return BNXT_TF_RC_SUCCESS;
1310 /* Function to handle the parsing of RTE Flow action phy_port. */
1312 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1313 struct ulp_rte_parser_params *prm)
1315 const struct rte_flow_action_phy_port *phy_port;
1318 phy_port = action_item->conf;
1320 if (phy_port->original) {
1322 "Parse Err:Port Original not supported\n");
1323 return BNXT_TF_RC_PARSE_ERR;
1325 /* Get the vport of the physical port */
1326 /* TBD: shall be changed later to portdb call */
1327 vport = 1 << phy_port->index;
1328 vport = rte_cpu_to_be_32(vport);
1329 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1330 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1333 /* Update the hdr_bitmap with count */
1334 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1335 return BNXT_TF_RC_SUCCESS;