1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 /* Parse all the items in the pattern */
128 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129 /* get the header information from the flow_hdr_info table */
130 hdr_info = &ulp_act_info[action_item->type];
131 if (hdr_info->act_type ==
132 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
134 "Truflow parser does not support act %u\n",
136 return BNXT_TF_RC_ERROR;
137 } else if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_SUPPORTED) {
139 /* call the registered callback handler */
140 if (hdr_info->proto_act_func) {
141 if (hdr_info->proto_act_func(action_item,
143 BNXT_TF_RC_SUCCESS) {
144 return BNXT_TF_RC_ERROR;
150 /* update the implied VNIC */
151 ulp_rte_parser_vnic_process(params);
152 return BNXT_TF_RC_SUCCESS;
155 /* Function to handle the parsing of RTE Flow item PF Header. */
157 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
158 enum rte_flow_item_type proto,
162 uint16_t port_id = svif;
164 struct ulp_rte_hdr_field *hdr_field;
168 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) {
170 "SVIF already set,multiple source not support'd\n");
171 return BNXT_TF_RC_ERROR;
174 /*update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF */
175 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF);
177 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
178 dir = ULP_UTIL_CHF_IDX_RD(params,
179 BNXT_ULP_CHF_IDX_DIRECTION);
180 /* perform the conversion from dpdk port to bnxt svif */
181 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
185 "Invalid port id\n");
186 return BNXT_TF_RC_ERROR;
188 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
189 svif = rte_cpu_to_be_16(svif);
191 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
192 memcpy(hdr_field->spec, &svif, sizeof(svif));
193 memcpy(hdr_field->mask, &mask, sizeof(mask));
194 hdr_field->size = sizeof(svif);
195 return BNXT_TF_RC_SUCCESS;
198 /* Function to handle the parsing of the RTE port id */
200 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
202 uint16_t port_id = 0;
203 uint16_t svif_mask = 0xFFFF;
205 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF))
206 return BNXT_TF_RC_SUCCESS;
208 /* SVIF not set. So get the port id */
209 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
211 /* Update the SVIF details */
212 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
216 /* Function to handle the implicit VNIC RTE port id */
218 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
220 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
222 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
223 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
224 return BNXT_TF_RC_SUCCESS;
226 /* Update the vnic details */
227 ulp_rte_pf_act_handler(NULL, params);
228 return BNXT_TF_RC_SUCCESS;
231 /* Function to handle the parsing of RTE Flow item PF Header. */
233 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
234 struct ulp_rte_parser_params *params)
236 uint16_t port_id = 0;
237 uint16_t svif_mask = 0xFFFF;
239 /* Get the port id */
240 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
242 /* Update the SVIF details */
243 return ulp_rte_parser_svif_set(params,
248 /* Function to handle the parsing of RTE Flow item VF Header. */
250 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
251 struct ulp_rte_parser_params *params)
253 const struct rte_flow_item_vf *vf_spec = item->spec;
254 const struct rte_flow_item_vf *vf_mask = item->mask;
255 uint16_t svif = 0, mask = 0;
257 /* Get VF rte_flow_item for Port details */
259 svif = (uint16_t)vf_spec->id;
261 mask = (uint16_t)vf_mask->id;
263 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
266 /* Function to handle the parsing of RTE Flow item port id Header. */
268 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
269 struct ulp_rte_parser_params *params)
271 const struct rte_flow_item_port_id *port_spec = item->spec;
272 const struct rte_flow_item_port_id *port_mask = item->mask;
273 uint16_t svif = 0, mask = 0;
276 * Copy the rte_flow_item for Port into hdr_field using port id
280 svif = (uint16_t)port_spec->id;
282 mask = (uint16_t)port_mask->id;
284 /* Update the SVIF details */
285 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
288 /* Function to handle the parsing of RTE Flow item phy port Header. */
290 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
291 struct ulp_rte_parser_params *params)
293 const struct rte_flow_item_phy_port *port_spec = item->spec;
294 const struct rte_flow_item_phy_port *port_mask = item->mask;
295 uint32_t svif = 0, mask = 0;
297 /* Copy the rte_flow_item for phy port into hdr_field */
299 svif = port_spec->index;
301 mask = port_mask->index;
303 /* Update the SVIF details */
304 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
307 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
309 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
310 struct ulp_rte_parser_params *params)
312 const struct rte_flow_item_eth *eth_spec = item->spec;
313 const struct rte_flow_item_eth *eth_mask = item->mask;
314 struct ulp_rte_hdr_field *field;
315 uint32_t idx = params->field_idx;
316 uint64_t set_flag = 0;
320 * Copy the rte_flow_item for eth into hdr_field using ethernet
324 size = sizeof(eth_spec->dst.addr_bytes);
325 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
326 eth_spec->dst.addr_bytes,
328 size = sizeof(eth_spec->src.addr_bytes);
329 field = ulp_rte_parser_fld_copy(field,
330 eth_spec->src.addr_bytes,
332 field = ulp_rte_parser_fld_copy(field,
334 sizeof(eth_spec->type));
337 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
338 sizeof(eth_mask->dst.addr_bytes));
339 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
340 sizeof(eth_mask->src.addr_bytes));
341 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
342 sizeof(eth_mask->type));
344 /* Add number of vlan header elements */
345 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
346 params->vlan_idx = params->field_idx;
347 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
349 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
350 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
351 BNXT_ULP_HDR_BIT_O_ETH);
353 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
355 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
356 BNXT_ULP_HDR_BIT_I_ETH);
358 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
359 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
361 return BNXT_TF_RC_SUCCESS;
364 /* Function to handle the parsing of RTE Flow item Vlan Header. */
366 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
367 struct ulp_rte_parser_params *params)
369 const struct rte_flow_item_vlan *vlan_spec = item->spec;
370 const struct rte_flow_item_vlan *vlan_mask = item->mask;
371 struct ulp_rte_hdr_field *field;
372 struct ulp_rte_hdr_bitmap *hdr_bit;
373 uint32_t idx = params->vlan_idx;
374 uint16_t vlan_tag, priority;
375 uint32_t outer_vtag_num;
376 uint32_t inner_vtag_num;
379 * Copy the rte_flow_item for vlan into hdr_field using Vlan
383 vlan_tag = ntohs(vlan_spec->tci);
384 priority = htons(vlan_tag >> 13);
386 vlan_tag = htons(vlan_tag);
388 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
391 field = ulp_rte_parser_fld_copy(field,
394 field = ulp_rte_parser_fld_copy(field,
395 &vlan_spec->inner_type,
396 sizeof(vlan_spec->inner_type));
400 vlan_tag = ntohs(vlan_mask->tci);
401 priority = htons(vlan_tag >> 13);
403 vlan_tag = htons(vlan_tag);
405 field = ¶ms->hdr_field[idx];
406 memcpy(field->mask, &priority, field->size);
408 memcpy(field->mask, &vlan_tag, field->size);
410 memcpy(field->mask, &vlan_mask->inner_type, field->size);
412 /* Set the vlan index to new incremented value */
413 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
415 /* Get the outer tag and inner tag counts */
416 outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
417 BNXT_ULP_CHF_IDX_O_VTAG_NUM);
418 inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
419 BNXT_ULP_CHF_IDX_I_VTAG_NUM);
421 /* Update the hdr_bitmap of the vlans */
422 hdr_bit = ¶ms->hdr_bitmap;
423 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
424 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
425 /* Set the outer vlan bit and update the vlan tag num */
426 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
428 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
430 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1);
431 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
432 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
433 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN)) {
434 /* Set the outer vlan bit and update the vlan tag num */
435 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
437 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
439 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1);
440 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
441 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
442 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) &&
443 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
444 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN)) {
445 /* Set the inner vlan bit and update the vlan tag num */
446 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
448 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
450 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1);
451 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
452 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
453 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) &&
454 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
455 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN) &&
456 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN)) {
457 /* Set the inner vlan bit and update the vlan tag num */
458 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN);
460 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
462 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1);
464 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
465 return BNXT_TF_RC_ERROR;
467 return BNXT_TF_RC_SUCCESS;
470 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
472 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
473 struct ulp_rte_parser_params *params)
475 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
476 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
477 struct ulp_rte_hdr_field *field;
478 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
479 uint32_t idx = params->field_idx;
481 uint32_t inner_l3, outer_l3;
483 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
485 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
486 return BNXT_TF_RC_ERROR;
490 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
494 size = sizeof(ipv4_spec->hdr.version_ihl);
495 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
496 &ipv4_spec->hdr.version_ihl,
498 size = sizeof(ipv4_spec->hdr.type_of_service);
499 field = ulp_rte_parser_fld_copy(field,
500 &ipv4_spec->hdr.type_of_service,
502 size = sizeof(ipv4_spec->hdr.total_length);
503 field = ulp_rte_parser_fld_copy(field,
504 &ipv4_spec->hdr.total_length,
506 size = sizeof(ipv4_spec->hdr.packet_id);
507 field = ulp_rte_parser_fld_copy(field,
508 &ipv4_spec->hdr.packet_id,
510 size = sizeof(ipv4_spec->hdr.fragment_offset);
511 field = ulp_rte_parser_fld_copy(field,
512 &ipv4_spec->hdr.fragment_offset,
514 size = sizeof(ipv4_spec->hdr.time_to_live);
515 field = ulp_rte_parser_fld_copy(field,
516 &ipv4_spec->hdr.time_to_live,
518 size = sizeof(ipv4_spec->hdr.next_proto_id);
519 field = ulp_rte_parser_fld_copy(field,
520 &ipv4_spec->hdr.next_proto_id,
522 size = sizeof(ipv4_spec->hdr.hdr_checksum);
523 field = ulp_rte_parser_fld_copy(field,
524 &ipv4_spec->hdr.hdr_checksum,
526 size = sizeof(ipv4_spec->hdr.src_addr);
527 field = ulp_rte_parser_fld_copy(field,
528 &ipv4_spec->hdr.src_addr,
530 size = sizeof(ipv4_spec->hdr.dst_addr);
531 field = ulp_rte_parser_fld_copy(field,
532 &ipv4_spec->hdr.dst_addr,
536 ulp_rte_prsr_mask_copy(params, &idx,
537 &ipv4_mask->hdr.version_ihl,
538 sizeof(ipv4_mask->hdr.version_ihl));
539 ulp_rte_prsr_mask_copy(params, &idx,
540 &ipv4_mask->hdr.type_of_service,
541 sizeof(ipv4_mask->hdr.type_of_service));
542 ulp_rte_prsr_mask_copy(params, &idx,
543 &ipv4_mask->hdr.total_length,
544 sizeof(ipv4_mask->hdr.total_length));
545 ulp_rte_prsr_mask_copy(params, &idx,
546 &ipv4_mask->hdr.packet_id,
547 sizeof(ipv4_mask->hdr.packet_id));
548 ulp_rte_prsr_mask_copy(params, &idx,
549 &ipv4_mask->hdr.fragment_offset,
550 sizeof(ipv4_mask->hdr.fragment_offset));
551 ulp_rte_prsr_mask_copy(params, &idx,
552 &ipv4_mask->hdr.time_to_live,
553 sizeof(ipv4_mask->hdr.time_to_live));
554 ulp_rte_prsr_mask_copy(params, &idx,
555 &ipv4_mask->hdr.next_proto_id,
556 sizeof(ipv4_mask->hdr.next_proto_id));
557 ulp_rte_prsr_mask_copy(params, &idx,
558 &ipv4_mask->hdr.hdr_checksum,
559 sizeof(ipv4_mask->hdr.hdr_checksum));
560 ulp_rte_prsr_mask_copy(params, &idx,
561 &ipv4_mask->hdr.src_addr,
562 sizeof(ipv4_mask->hdr.src_addr));
563 ulp_rte_prsr_mask_copy(params, &idx,
564 &ipv4_mask->hdr.dst_addr,
565 sizeof(ipv4_mask->hdr.dst_addr));
567 /* Add the number of ipv4 header elements */
568 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
570 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
571 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
573 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
574 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
575 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
577 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3);
579 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
581 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3);
583 return BNXT_TF_RC_SUCCESS;
586 /* Function to handle the parsing of RTE Flow item IPV6 Header */
588 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
589 struct ulp_rte_parser_params *params)
591 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
592 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
593 struct ulp_rte_hdr_field *field;
594 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
595 uint32_t idx = params->field_idx;
597 uint32_t inner_l3, outer_l3;
599 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
601 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
602 return BNXT_TF_RC_ERROR;
606 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
610 size = sizeof(ipv6_spec->hdr.vtc_flow);
611 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
612 &ipv6_spec->hdr.vtc_flow,
614 size = sizeof(ipv6_spec->hdr.payload_len);
615 field = ulp_rte_parser_fld_copy(field,
616 &ipv6_spec->hdr.payload_len,
618 size = sizeof(ipv6_spec->hdr.proto);
619 field = ulp_rte_parser_fld_copy(field,
620 &ipv6_spec->hdr.proto,
622 size = sizeof(ipv6_spec->hdr.hop_limits);
623 field = ulp_rte_parser_fld_copy(field,
624 &ipv6_spec->hdr.hop_limits,
626 size = sizeof(ipv6_spec->hdr.src_addr);
627 field = ulp_rte_parser_fld_copy(field,
628 &ipv6_spec->hdr.src_addr,
630 size = sizeof(ipv6_spec->hdr.dst_addr);
631 field = ulp_rte_parser_fld_copy(field,
632 &ipv6_spec->hdr.dst_addr,
636 ulp_rte_prsr_mask_copy(params, &idx,
637 &ipv6_mask->hdr.vtc_flow,
638 sizeof(ipv6_mask->hdr.vtc_flow));
639 ulp_rte_prsr_mask_copy(params, &idx,
640 &ipv6_mask->hdr.payload_len,
641 sizeof(ipv6_mask->hdr.payload_len));
642 ulp_rte_prsr_mask_copy(params, &idx,
643 &ipv6_mask->hdr.proto,
644 sizeof(ipv6_mask->hdr.proto));
645 ulp_rte_prsr_mask_copy(params, &idx,
646 &ipv6_mask->hdr.hop_limits,
647 sizeof(ipv6_mask->hdr.hop_limits));
648 ulp_rte_prsr_mask_copy(params, &idx,
649 &ipv6_mask->hdr.src_addr,
650 sizeof(ipv6_mask->hdr.src_addr));
651 ulp_rte_prsr_mask_copy(params, &idx,
652 &ipv6_mask->hdr.dst_addr,
653 sizeof(ipv6_mask->hdr.dst_addr));
655 /* add number of ipv6 header elements */
656 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
658 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
659 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
661 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
662 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
663 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
664 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1);
666 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
667 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1);
669 return BNXT_TF_RC_SUCCESS;
672 /* Function to handle the parsing of RTE Flow item UDP Header. */
674 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
675 struct ulp_rte_parser_params *params)
677 const struct rte_flow_item_udp *udp_spec = item->spec;
678 const struct rte_flow_item_udp *udp_mask = item->mask;
679 struct ulp_rte_hdr_field *field;
680 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
681 uint32_t idx = params->field_idx;
683 uint32_t inner_l4, outer_l4;
685 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
687 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
688 return BNXT_TF_RC_ERROR;
692 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
696 size = sizeof(udp_spec->hdr.src_port);
697 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
698 &udp_spec->hdr.src_port,
700 size = sizeof(udp_spec->hdr.dst_port);
701 field = ulp_rte_parser_fld_copy(field,
702 &udp_spec->hdr.dst_port,
704 size = sizeof(udp_spec->hdr.dgram_len);
705 field = ulp_rte_parser_fld_copy(field,
706 &udp_spec->hdr.dgram_len,
708 size = sizeof(udp_spec->hdr.dgram_cksum);
709 field = ulp_rte_parser_fld_copy(field,
710 &udp_spec->hdr.dgram_cksum,
714 ulp_rte_prsr_mask_copy(params, &idx,
715 &udp_mask->hdr.src_port,
716 sizeof(udp_mask->hdr.src_port));
717 ulp_rte_prsr_mask_copy(params, &idx,
718 &udp_mask->hdr.dst_port,
719 sizeof(udp_mask->hdr.dst_port));
720 ulp_rte_prsr_mask_copy(params, &idx,
721 &udp_mask->hdr.dgram_len,
722 sizeof(udp_mask->hdr.dgram_len));
723 ulp_rte_prsr_mask_copy(params, &idx,
724 &udp_mask->hdr.dgram_cksum,
725 sizeof(udp_mask->hdr.dgram_cksum));
728 /* Add number of UDP header elements */
729 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
731 /* Set the udp header bitmap and computed l4 header bitmaps */
732 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
734 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
735 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
736 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
737 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
739 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
740 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
742 return BNXT_TF_RC_SUCCESS;
745 /* Function to handle the parsing of RTE Flow item TCP Header. */
747 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
748 struct ulp_rte_parser_params *params)
750 const struct rte_flow_item_tcp *tcp_spec = item->spec;
751 const struct rte_flow_item_tcp *tcp_mask = item->mask;
752 struct ulp_rte_hdr_field *field;
753 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
754 uint32_t idx = params->field_idx;
756 uint32_t inner_l4, outer_l4;
758 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
760 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
761 return BNXT_TF_RC_ERROR;
765 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
769 size = sizeof(tcp_spec->hdr.src_port);
770 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
771 &tcp_spec->hdr.src_port,
773 size = sizeof(tcp_spec->hdr.dst_port);
774 field = ulp_rte_parser_fld_copy(field,
775 &tcp_spec->hdr.dst_port,
777 size = sizeof(tcp_spec->hdr.sent_seq);
778 field = ulp_rte_parser_fld_copy(field,
779 &tcp_spec->hdr.sent_seq,
781 size = sizeof(tcp_spec->hdr.recv_ack);
782 field = ulp_rte_parser_fld_copy(field,
783 &tcp_spec->hdr.recv_ack,
785 size = sizeof(tcp_spec->hdr.data_off);
786 field = ulp_rte_parser_fld_copy(field,
787 &tcp_spec->hdr.data_off,
789 size = sizeof(tcp_spec->hdr.tcp_flags);
790 field = ulp_rte_parser_fld_copy(field,
791 &tcp_spec->hdr.tcp_flags,
793 size = sizeof(tcp_spec->hdr.rx_win);
794 field = ulp_rte_parser_fld_copy(field,
795 &tcp_spec->hdr.rx_win,
797 size = sizeof(tcp_spec->hdr.cksum);
798 field = ulp_rte_parser_fld_copy(field,
799 &tcp_spec->hdr.cksum,
801 size = sizeof(tcp_spec->hdr.tcp_urp);
802 field = ulp_rte_parser_fld_copy(field,
803 &tcp_spec->hdr.tcp_urp,
806 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
810 ulp_rte_prsr_mask_copy(params, &idx,
811 &tcp_mask->hdr.src_port,
812 sizeof(tcp_mask->hdr.src_port));
813 ulp_rte_prsr_mask_copy(params, &idx,
814 &tcp_mask->hdr.dst_port,
815 sizeof(tcp_mask->hdr.dst_port));
816 ulp_rte_prsr_mask_copy(params, &idx,
817 &tcp_mask->hdr.sent_seq,
818 sizeof(tcp_mask->hdr.sent_seq));
819 ulp_rte_prsr_mask_copy(params, &idx,
820 &tcp_mask->hdr.recv_ack,
821 sizeof(tcp_mask->hdr.recv_ack));
822 ulp_rte_prsr_mask_copy(params, &idx,
823 &tcp_mask->hdr.data_off,
824 sizeof(tcp_mask->hdr.data_off));
825 ulp_rte_prsr_mask_copy(params, &idx,
826 &tcp_mask->hdr.tcp_flags,
827 sizeof(tcp_mask->hdr.tcp_flags));
828 ulp_rte_prsr_mask_copy(params, &idx,
829 &tcp_mask->hdr.rx_win,
830 sizeof(tcp_mask->hdr.rx_win));
831 ulp_rte_prsr_mask_copy(params, &idx,
832 &tcp_mask->hdr.cksum,
833 sizeof(tcp_mask->hdr.cksum));
834 ulp_rte_prsr_mask_copy(params, &idx,
835 &tcp_mask->hdr.tcp_urp,
836 sizeof(tcp_mask->hdr.tcp_urp));
838 /* add number of TCP header elements */
839 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
841 /* Set the udp header bitmap and computed l4 header bitmaps */
842 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
844 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
845 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
846 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
847 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
849 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
850 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
852 return BNXT_TF_RC_SUCCESS;
855 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
857 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
858 struct ulp_rte_parser_params *params)
860 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
861 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
862 struct ulp_rte_hdr_field *field;
863 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
864 uint32_t idx = params->field_idx;
868 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
872 size = sizeof(vxlan_spec->flags);
873 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
876 size = sizeof(vxlan_spec->rsvd0);
877 field = ulp_rte_parser_fld_copy(field,
880 size = sizeof(vxlan_spec->vni);
881 field = ulp_rte_parser_fld_copy(field,
884 size = sizeof(vxlan_spec->rsvd1);
885 field = ulp_rte_parser_fld_copy(field,
890 ulp_rte_prsr_mask_copy(params, &idx,
892 sizeof(vxlan_mask->flags));
893 ulp_rte_prsr_mask_copy(params, &idx,
895 sizeof(vxlan_mask->rsvd0));
896 ulp_rte_prsr_mask_copy(params, &idx,
898 sizeof(vxlan_mask->vni));
899 ulp_rte_prsr_mask_copy(params, &idx,
901 sizeof(vxlan_mask->rsvd1));
903 /* Add number of vxlan header elements */
904 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
906 /* Update the hdr_bitmap with vxlan */
907 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
908 return BNXT_TF_RC_SUCCESS;
911 /* Function to handle the parsing of RTE Flow item void Header */
913 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
914 struct ulp_rte_parser_params *params __rte_unused)
916 return BNXT_TF_RC_SUCCESS;
919 /* Function to handle the parsing of RTE Flow action void Header. */
921 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
922 struct ulp_rte_parser_params *params __rte_unused)
924 return BNXT_TF_RC_SUCCESS;
927 /* Function to handle the parsing of RTE Flow action Mark Header. */
929 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
930 struct ulp_rte_parser_params *param)
932 const struct rte_flow_action_mark *mark;
933 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
936 mark = action_item->conf;
938 mark_id = tfp_cpu_to_be_32(mark->id);
939 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
940 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
942 /* Update the hdr_bitmap with vxlan */
943 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
944 return BNXT_TF_RC_SUCCESS;
946 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
947 return BNXT_TF_RC_ERROR;
950 /* Function to handle the parsing of RTE Flow action RSS Header. */
952 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
953 struct ulp_rte_parser_params *param)
955 const struct rte_flow_action_rss *rss = action_item->conf;
958 /* Update the hdr_bitmap with vxlan */
959 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
960 return BNXT_TF_RC_SUCCESS;
962 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
963 return BNXT_TF_RC_ERROR;
966 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
968 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
969 struct ulp_rte_parser_params *params)
971 const struct rte_flow_action_vxlan_encap *vxlan_encap;
972 const struct rte_flow_item *item;
973 const struct rte_flow_item_eth *eth_spec;
974 const struct rte_flow_item_ipv4 *ipv4_spec;
975 const struct rte_flow_item_ipv6 *ipv6_spec;
976 struct rte_flow_item_vxlan vxlan_spec;
977 uint32_t vlan_num = 0, vlan_size = 0;
978 uint32_t ip_size = 0, ip_type = 0;
979 uint32_t vxlan_size = 0;
981 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
982 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
984 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
985 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
987 vxlan_encap = action_item->conf;
989 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
990 return BNXT_TF_RC_ERROR;
993 item = vxlan_encap->definition;
995 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
996 return BNXT_TF_RC_ERROR;
999 if (!ulp_rte_item_skip_void(&item, 0))
1000 return BNXT_TF_RC_ERROR;
1002 /* must have ethernet header */
1003 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1004 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1005 return BNXT_TF_RC_ERROR;
1007 eth_spec = item->spec;
1008 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1009 ulp_encap_buffer_copy(buff,
1010 eth_spec->dst.addr_bytes,
1011 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1013 /* Goto the next item */
1014 if (!ulp_rte_item_skip_void(&item, 1))
1015 return BNXT_TF_RC_ERROR;
1017 /* May have vlan header */
1018 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1020 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1021 ulp_encap_buffer_copy(buff,
1023 sizeof(struct rte_flow_item_vlan));
1025 if (!ulp_rte_item_skip_void(&item, 1))
1026 return BNXT_TF_RC_ERROR;
1029 /* may have two vlan headers */
1030 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1032 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1033 sizeof(struct rte_flow_item_vlan)],
1035 sizeof(struct rte_flow_item_vlan));
1036 if (!ulp_rte_item_skip_void(&item, 1))
1037 return BNXT_TF_RC_ERROR;
1039 /* Update the vlan count and size of more than one */
1041 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1042 vlan_num = tfp_cpu_to_be_32(vlan_num);
1043 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1046 vlan_size = tfp_cpu_to_be_32(vlan_size);
1047 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1052 /* L3 must be IPv4, IPv6 */
1053 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1054 ipv4_spec = item->spec;
1055 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1057 /* copy the ipv4 details */
1058 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1059 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1060 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1061 ulp_encap_buffer_copy(buff,
1063 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1064 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1066 const uint8_t *tmp_buff;
1068 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1069 ulp_encap_buffer_copy(buff,
1070 &ipv4_spec->hdr.version_ihl,
1071 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1072 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1073 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1074 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1075 ulp_encap_buffer_copy(buff,
1077 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1079 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1080 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1081 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1082 ulp_encap_buffer_copy(buff,
1083 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1084 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1086 /* Update the ip size details */
1087 ip_size = tfp_cpu_to_be_32(ip_size);
1088 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1089 &ip_size, sizeof(uint32_t));
1091 /* update the ip type */
1092 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1093 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1094 &ip_type, sizeof(uint32_t));
1096 if (!ulp_rte_item_skip_void(&item, 1))
1097 return BNXT_TF_RC_ERROR;
1098 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1099 ipv6_spec = item->spec;
1100 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1102 /* copy the ipv4 details */
1103 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1104 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1106 /* Update the ip size details */
1107 ip_size = tfp_cpu_to_be_32(ip_size);
1108 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1109 &ip_size, sizeof(uint32_t));
1111 /* update the ip type */
1112 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1113 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1114 &ip_type, sizeof(uint32_t));
1116 if (!ulp_rte_item_skip_void(&item, 1))
1117 return BNXT_TF_RC_ERROR;
1119 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1120 return BNXT_TF_RC_ERROR;
1124 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1125 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1126 return BNXT_TF_RC_ERROR;
1128 /* copy the udp details */
1129 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1130 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1132 if (!ulp_rte_item_skip_void(&item, 1))
1133 return BNXT_TF_RC_ERROR;
1136 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1137 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1138 return BNXT_TF_RC_ERROR;
1140 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1141 /* copy the vxlan details */
1142 memcpy(&vxlan_spec, item->spec, vxlan_size);
1143 vxlan_spec.flags = 0x08;
1144 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1145 (const uint8_t *)&vxlan_spec,
1147 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1148 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1149 &vxlan_size, sizeof(uint32_t));
1151 /*update the hdr_bitmap with vxlan */
1152 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1153 return BNXT_TF_RC_SUCCESS;
1156 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1158 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1160 struct ulp_rte_parser_params *params)
1162 /* update the hdr_bitmap with vxlan */
1163 ULP_BITMAP_SET(params->act_bitmap.bits,
1164 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1165 return BNXT_TF_RC_SUCCESS;
1168 /* Function to handle the parsing of RTE Flow action drop Header. */
1170 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1171 struct ulp_rte_parser_params *params)
1173 /* Update the hdr_bitmap with drop */
1174 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1175 return BNXT_TF_RC_SUCCESS;
1178 /* Function to handle the parsing of RTE Flow action count. */
1180 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1181 struct ulp_rte_parser_params *params)
1184 const struct rte_flow_action_count *act_count;
1185 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1187 act_count = action_item->conf;
1189 if (act_count->shared) {
1191 "Parse Error:Shared count not supported\n");
1192 return BNXT_TF_RC_PARSE_ERR;
1194 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1196 BNXT_ULP_ACT_PROP_SZ_COUNT);
1199 /* Update the hdr_bitmap with count */
1200 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1201 return BNXT_TF_RC_SUCCESS;
1204 /* Function to handle the parsing of RTE Flow action PF. */
1206 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1207 struct ulp_rte_parser_params *params)
1211 /* Update the hdr_bitmap with vnic bit */
1212 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1214 /* copy the PF of the current device into VNIC Property */
1215 svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
1216 svif = bnxt_get_vnic_id(svif);
1217 svif = rte_cpu_to_be_32(svif);
1218 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1219 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1221 return BNXT_TF_RC_SUCCESS;
1224 /* Function to handle the parsing of RTE Flow action VF. */
1226 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1227 struct ulp_rte_parser_params *param)
1229 const struct rte_flow_action_vf *vf_action;
1232 vf_action = action_item->conf;
1234 if (vf_action->original) {
1236 "Parse Error:VF Original not supported\n");
1237 return BNXT_TF_RC_PARSE_ERR;
1239 /* TBD: Update the computed VNIC using VF conversion */
1240 pid = bnxt_get_vnic_id(vf_action->id);
1241 pid = rte_cpu_to_be_32(pid);
1242 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1243 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1246 /* Update the hdr_bitmap with count */
1247 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1248 return BNXT_TF_RC_SUCCESS;
1251 /* Function to handle the parsing of RTE Flow action port_id. */
1253 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1254 struct ulp_rte_parser_params *param)
1256 const struct rte_flow_action_port_id *port_id;
1259 port_id = act_item->conf;
1261 if (port_id->original) {
1263 "ParseErr:Portid Original not supported\n");
1264 return BNXT_TF_RC_PARSE_ERR;
1266 /* TBD: Update the computed VNIC using port conversion */
1267 pid = bnxt_get_vnic_id(port_id->id);
1268 pid = rte_cpu_to_be_32(pid);
1269 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1270 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1273 /* Update the hdr_bitmap with count */
1274 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1275 return BNXT_TF_RC_SUCCESS;
1278 /* Function to handle the parsing of RTE Flow action phy_port. */
1280 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1281 struct ulp_rte_parser_params *prm)
1283 const struct rte_flow_action_phy_port *phy_port;
1286 phy_port = action_item->conf;
1288 if (phy_port->original) {
1290 "Parse Err:Port Original not supported\n");
1291 return BNXT_TF_RC_PARSE_ERR;
1293 pid = bnxt_get_vnic_id(phy_port->index);
1294 pid = rte_cpu_to_be_32(pid);
1295 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1296 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1299 /* Update the hdr_bitmap with count */
1300 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1301 return BNXT_TF_RC_SUCCESS;