1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
172 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
173 BNXT_ULP_INVALID_SVIF_VAL) {
175 "SVIF already set,multiple source not support'd\n");
176 return BNXT_TF_RC_ERROR;
179 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
180 dir = ULP_COMP_FLD_IDX_RD(params,
181 BNXT_ULP_CF_IDX_DIRECTION);
182 /* perform the conversion from dpdk port to bnxt svif */
183 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
187 "Invalid port id\n");
188 return BNXT_TF_RC_ERROR;
190 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
191 svif = rte_cpu_to_be_16(svif);
193 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
194 memcpy(hdr_field->spec, &svif, sizeof(svif));
195 memcpy(hdr_field->mask, &mask, sizeof(mask));
196 hdr_field->size = sizeof(svif);
197 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
198 rte_be_to_cpu_16(svif));
199 return BNXT_TF_RC_SUCCESS;
202 /* Function to handle the parsing of the RTE port id */
204 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
206 uint16_t port_id = 0;
207 uint16_t svif_mask = 0xFFFF;
209 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
210 BNXT_ULP_INVALID_SVIF_VAL)
211 return BNXT_TF_RC_SUCCESS;
213 /* SVIF not set. So get the port id */
214 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
216 /* Update the SVIF details */
217 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
221 /* Function to handle the implicit VNIC RTE port id */
223 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
225 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
227 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
228 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
229 return BNXT_TF_RC_SUCCESS;
231 /* Update the vnic details */
232 ulp_rte_pf_act_handler(NULL, params);
233 /* Reset the hdr_bitmap with vnic bit */
234 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
236 return BNXT_TF_RC_SUCCESS;
239 /* Function to handle the parsing of RTE Flow item PF Header. */
241 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
242 struct ulp_rte_parser_params *params)
244 uint16_t port_id = 0;
245 uint16_t svif_mask = 0xFFFF;
247 /* Get the port id */
248 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
250 /* Update the SVIF details */
251 return ulp_rte_parser_svif_set(params,
256 /* Function to handle the parsing of RTE Flow item VF Header. */
258 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
259 struct ulp_rte_parser_params *params)
261 const struct rte_flow_item_vf *vf_spec = item->spec;
262 const struct rte_flow_item_vf *vf_mask = item->mask;
263 uint16_t svif = 0, mask = 0;
265 /* Get VF rte_flow_item for Port details */
267 svif = (uint16_t)vf_spec->id;
269 mask = (uint16_t)vf_mask->id;
271 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
274 /* Function to handle the parsing of RTE Flow item port id Header. */
276 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
277 struct ulp_rte_parser_params *params)
279 const struct rte_flow_item_port_id *port_spec = item->spec;
280 const struct rte_flow_item_port_id *port_mask = item->mask;
281 uint16_t svif = 0, mask = 0;
284 * Copy the rte_flow_item for Port into hdr_field using port id
288 svif = (uint16_t)port_spec->id;
290 mask = (uint16_t)port_mask->id;
292 /* Update the SVIF details */
293 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
296 /* Function to handle the parsing of RTE Flow item phy port Header. */
298 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
299 struct ulp_rte_parser_params *params)
301 const struct rte_flow_item_phy_port *port_spec = item->spec;
302 const struct rte_flow_item_phy_port *port_mask = item->mask;
303 uint32_t svif = 0, mask = 0;
305 /* Copy the rte_flow_item for phy port into hdr_field */
307 svif = port_spec->index;
309 mask = port_mask->index;
311 /* Update the SVIF details */
312 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
315 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
317 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
318 struct ulp_rte_parser_params *params)
320 const struct rte_flow_item_eth *eth_spec = item->spec;
321 const struct rte_flow_item_eth *eth_mask = item->mask;
322 struct ulp_rte_hdr_field *field;
323 uint32_t idx = params->field_idx;
324 uint64_t set_flag = 0;
328 * Copy the rte_flow_item for eth into hdr_field using ethernet
332 size = sizeof(eth_spec->dst.addr_bytes);
333 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
334 eth_spec->dst.addr_bytes,
336 size = sizeof(eth_spec->src.addr_bytes);
337 field = ulp_rte_parser_fld_copy(field,
338 eth_spec->src.addr_bytes,
340 field = ulp_rte_parser_fld_copy(field,
342 sizeof(eth_spec->type));
345 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
346 sizeof(eth_mask->dst.addr_bytes));
347 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
348 sizeof(eth_mask->src.addr_bytes));
349 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
350 sizeof(eth_mask->type));
352 /* Add number of vlan header elements */
353 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
354 params->vlan_idx = params->field_idx;
355 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
357 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
358 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
359 BNXT_ULP_HDR_BIT_O_ETH);
361 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
363 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
364 BNXT_ULP_HDR_BIT_I_ETH);
366 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
367 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
369 return BNXT_TF_RC_SUCCESS;
372 /* Function to handle the parsing of RTE Flow item Vlan Header. */
374 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
375 struct ulp_rte_parser_params *params)
377 const struct rte_flow_item_vlan *vlan_spec = item->spec;
378 const struct rte_flow_item_vlan *vlan_mask = item->mask;
379 struct ulp_rte_hdr_field *field;
380 struct ulp_rte_hdr_bitmap *hdr_bit;
381 uint32_t idx = params->vlan_idx;
382 uint16_t vlan_tag, priority;
383 uint32_t outer_vtag_num;
384 uint32_t inner_vtag_num;
387 * Copy the rte_flow_item for vlan into hdr_field using Vlan
391 vlan_tag = ntohs(vlan_spec->tci);
392 priority = htons(vlan_tag >> 13);
394 vlan_tag = htons(vlan_tag);
396 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
399 field = ulp_rte_parser_fld_copy(field,
402 field = ulp_rte_parser_fld_copy(field,
403 &vlan_spec->inner_type,
404 sizeof(vlan_spec->inner_type));
408 vlan_tag = ntohs(vlan_mask->tci);
409 priority = htons(vlan_tag >> 13);
411 vlan_tag = htons(vlan_tag);
413 field = ¶ms->hdr_field[idx];
414 memcpy(field->mask, &priority, field->size);
416 memcpy(field->mask, &vlan_tag, field->size);
418 memcpy(field->mask, &vlan_mask->inner_type, field->size);
420 /* Set the vlan index to new incremented value */
421 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
423 /* Get the outer tag and inner tag counts */
424 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
425 BNXT_ULP_CF_IDX_O_VTAG_NUM);
426 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
427 BNXT_ULP_CF_IDX_I_VTAG_NUM);
429 /* Update the hdr_bitmap of the vlans */
430 hdr_bit = ¶ms->hdr_bitmap;
431 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
433 /* Update the vlan tag num */
435 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
438 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
439 ULP_COMP_FLD_IDX_RD(params,
440 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
441 outer_vtag_num == 1) {
442 /* update the vlan tag num */
444 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
446 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
447 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
448 ULP_COMP_FLD_IDX_RD(params,
449 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
450 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
452 /* update the vlan tag num */
454 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
456 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
457 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
458 ULP_COMP_FLD_IDX_RD(params,
459 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
460 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
461 ULP_COMP_FLD_IDX_RD(params,
462 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
463 inner_vtag_num == 1) {
464 /* update the vlan tag num */
466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
468 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
470 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
471 return BNXT_TF_RC_ERROR;
473 return BNXT_TF_RC_SUCCESS;
476 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
478 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
479 struct ulp_rte_parser_params *params)
481 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
482 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
483 struct ulp_rte_hdr_field *field;
484 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
485 uint32_t idx = params->field_idx;
487 uint32_t inner_l3, outer_l3;
489 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
491 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
492 return BNXT_TF_RC_ERROR;
496 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
500 size = sizeof(ipv4_spec->hdr.version_ihl);
501 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
502 &ipv4_spec->hdr.version_ihl,
504 size = sizeof(ipv4_spec->hdr.type_of_service);
505 field = ulp_rte_parser_fld_copy(field,
506 &ipv4_spec->hdr.type_of_service,
508 size = sizeof(ipv4_spec->hdr.total_length);
509 field = ulp_rte_parser_fld_copy(field,
510 &ipv4_spec->hdr.total_length,
512 size = sizeof(ipv4_spec->hdr.packet_id);
513 field = ulp_rte_parser_fld_copy(field,
514 &ipv4_spec->hdr.packet_id,
516 size = sizeof(ipv4_spec->hdr.fragment_offset);
517 field = ulp_rte_parser_fld_copy(field,
518 &ipv4_spec->hdr.fragment_offset,
520 size = sizeof(ipv4_spec->hdr.time_to_live);
521 field = ulp_rte_parser_fld_copy(field,
522 &ipv4_spec->hdr.time_to_live,
524 size = sizeof(ipv4_spec->hdr.next_proto_id);
525 field = ulp_rte_parser_fld_copy(field,
526 &ipv4_spec->hdr.next_proto_id,
528 size = sizeof(ipv4_spec->hdr.hdr_checksum);
529 field = ulp_rte_parser_fld_copy(field,
530 &ipv4_spec->hdr.hdr_checksum,
532 size = sizeof(ipv4_spec->hdr.src_addr);
533 field = ulp_rte_parser_fld_copy(field,
534 &ipv4_spec->hdr.src_addr,
536 size = sizeof(ipv4_spec->hdr.dst_addr);
537 field = ulp_rte_parser_fld_copy(field,
538 &ipv4_spec->hdr.dst_addr,
542 ulp_rte_prsr_mask_copy(params, &idx,
543 &ipv4_mask->hdr.version_ihl,
544 sizeof(ipv4_mask->hdr.version_ihl));
545 ulp_rte_prsr_mask_copy(params, &idx,
546 &ipv4_mask->hdr.type_of_service,
547 sizeof(ipv4_mask->hdr.type_of_service));
548 ulp_rte_prsr_mask_copy(params, &idx,
549 &ipv4_mask->hdr.total_length,
550 sizeof(ipv4_mask->hdr.total_length));
551 ulp_rte_prsr_mask_copy(params, &idx,
552 &ipv4_mask->hdr.packet_id,
553 sizeof(ipv4_mask->hdr.packet_id));
554 ulp_rte_prsr_mask_copy(params, &idx,
555 &ipv4_mask->hdr.fragment_offset,
556 sizeof(ipv4_mask->hdr.fragment_offset));
557 ulp_rte_prsr_mask_copy(params, &idx,
558 &ipv4_mask->hdr.time_to_live,
559 sizeof(ipv4_mask->hdr.time_to_live));
560 ulp_rte_prsr_mask_copy(params, &idx,
561 &ipv4_mask->hdr.next_proto_id,
562 sizeof(ipv4_mask->hdr.next_proto_id));
563 ulp_rte_prsr_mask_copy(params, &idx,
564 &ipv4_mask->hdr.hdr_checksum,
565 sizeof(ipv4_mask->hdr.hdr_checksum));
566 ulp_rte_prsr_mask_copy(params, &idx,
567 &ipv4_mask->hdr.src_addr,
568 sizeof(ipv4_mask->hdr.src_addr));
569 ulp_rte_prsr_mask_copy(params, &idx,
570 &ipv4_mask->hdr.dst_addr,
571 sizeof(ipv4_mask->hdr.dst_addr));
573 /* Add the number of ipv4 header elements */
574 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
576 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
577 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
579 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
580 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
581 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
583 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
585 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
587 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
589 return BNXT_TF_RC_SUCCESS;
592 /* Function to handle the parsing of RTE Flow item IPV6 Header */
594 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
595 struct ulp_rte_parser_params *params)
597 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
598 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
599 struct ulp_rte_hdr_field *field;
600 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
601 uint32_t idx = params->field_idx;
603 uint32_t inner_l3, outer_l3;
604 uint32_t vtcf, vtcf_mask;
606 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
608 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
609 return BNXT_TF_RC_ERROR;
613 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
617 size = sizeof(ipv6_spec->hdr.vtc_flow);
619 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
620 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
624 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
625 field = ulp_rte_parser_fld_copy(field,
629 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
630 field = ulp_rte_parser_fld_copy(field,
634 size = sizeof(ipv6_spec->hdr.payload_len);
635 field = ulp_rte_parser_fld_copy(field,
636 &ipv6_spec->hdr.payload_len,
638 size = sizeof(ipv6_spec->hdr.proto);
639 field = ulp_rte_parser_fld_copy(field,
640 &ipv6_spec->hdr.proto,
642 size = sizeof(ipv6_spec->hdr.hop_limits);
643 field = ulp_rte_parser_fld_copy(field,
644 &ipv6_spec->hdr.hop_limits,
646 size = sizeof(ipv6_spec->hdr.src_addr);
647 field = ulp_rte_parser_fld_copy(field,
648 &ipv6_spec->hdr.src_addr,
650 size = sizeof(ipv6_spec->hdr.dst_addr);
651 field = ulp_rte_parser_fld_copy(field,
652 &ipv6_spec->hdr.dst_addr,
656 size = sizeof(ipv6_mask->hdr.vtc_flow);
658 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
659 ulp_rte_prsr_mask_copy(params, &idx,
663 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
664 ulp_rte_prsr_mask_copy(params, &idx,
669 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
670 ulp_rte_prsr_mask_copy(params, &idx,
674 ulp_rte_prsr_mask_copy(params, &idx,
675 &ipv6_mask->hdr.payload_len,
676 sizeof(ipv6_mask->hdr.payload_len));
677 ulp_rte_prsr_mask_copy(params, &idx,
678 &ipv6_mask->hdr.proto,
679 sizeof(ipv6_mask->hdr.proto));
680 ulp_rte_prsr_mask_copy(params, &idx,
681 &ipv6_mask->hdr.hop_limits,
682 sizeof(ipv6_mask->hdr.hop_limits));
683 ulp_rte_prsr_mask_copy(params, &idx,
684 &ipv6_mask->hdr.src_addr,
685 sizeof(ipv6_mask->hdr.src_addr));
686 ulp_rte_prsr_mask_copy(params, &idx,
687 &ipv6_mask->hdr.dst_addr,
688 sizeof(ipv6_mask->hdr.dst_addr));
690 /* add number of ipv6 header elements */
691 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
693 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
694 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
696 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
697 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
698 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
699 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
701 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
702 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
704 return BNXT_TF_RC_SUCCESS;
707 /* Function to handle the parsing of RTE Flow item UDP Header. */
709 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
710 struct ulp_rte_parser_params *params)
712 const struct rte_flow_item_udp *udp_spec = item->spec;
713 const struct rte_flow_item_udp *udp_mask = item->mask;
714 struct ulp_rte_hdr_field *field;
715 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
716 uint32_t idx = params->field_idx;
718 uint32_t inner_l4, outer_l4;
720 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
722 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
723 return BNXT_TF_RC_ERROR;
727 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
731 size = sizeof(udp_spec->hdr.src_port);
732 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
733 &udp_spec->hdr.src_port,
735 size = sizeof(udp_spec->hdr.dst_port);
736 field = ulp_rte_parser_fld_copy(field,
737 &udp_spec->hdr.dst_port,
739 size = sizeof(udp_spec->hdr.dgram_len);
740 field = ulp_rte_parser_fld_copy(field,
741 &udp_spec->hdr.dgram_len,
743 size = sizeof(udp_spec->hdr.dgram_cksum);
744 field = ulp_rte_parser_fld_copy(field,
745 &udp_spec->hdr.dgram_cksum,
749 ulp_rte_prsr_mask_copy(params, &idx,
750 &udp_mask->hdr.src_port,
751 sizeof(udp_mask->hdr.src_port));
752 ulp_rte_prsr_mask_copy(params, &idx,
753 &udp_mask->hdr.dst_port,
754 sizeof(udp_mask->hdr.dst_port));
755 ulp_rte_prsr_mask_copy(params, &idx,
756 &udp_mask->hdr.dgram_len,
757 sizeof(udp_mask->hdr.dgram_len));
758 ulp_rte_prsr_mask_copy(params, &idx,
759 &udp_mask->hdr.dgram_cksum,
760 sizeof(udp_mask->hdr.dgram_cksum));
763 /* Add number of UDP header elements */
764 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
766 /* Set the udp header bitmap and computed l4 header bitmaps */
767 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
769 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
770 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
771 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
772 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
774 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
775 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
777 return BNXT_TF_RC_SUCCESS;
780 /* Function to handle the parsing of RTE Flow item TCP Header. */
782 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
783 struct ulp_rte_parser_params *params)
785 const struct rte_flow_item_tcp *tcp_spec = item->spec;
786 const struct rte_flow_item_tcp *tcp_mask = item->mask;
787 struct ulp_rte_hdr_field *field;
788 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
789 uint32_t idx = params->field_idx;
791 uint32_t inner_l4, outer_l4;
793 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
795 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
796 return BNXT_TF_RC_ERROR;
800 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
804 size = sizeof(tcp_spec->hdr.src_port);
805 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
806 &tcp_spec->hdr.src_port,
808 size = sizeof(tcp_spec->hdr.dst_port);
809 field = ulp_rte_parser_fld_copy(field,
810 &tcp_spec->hdr.dst_port,
812 size = sizeof(tcp_spec->hdr.sent_seq);
813 field = ulp_rte_parser_fld_copy(field,
814 &tcp_spec->hdr.sent_seq,
816 size = sizeof(tcp_spec->hdr.recv_ack);
817 field = ulp_rte_parser_fld_copy(field,
818 &tcp_spec->hdr.recv_ack,
820 size = sizeof(tcp_spec->hdr.data_off);
821 field = ulp_rte_parser_fld_copy(field,
822 &tcp_spec->hdr.data_off,
824 size = sizeof(tcp_spec->hdr.tcp_flags);
825 field = ulp_rte_parser_fld_copy(field,
826 &tcp_spec->hdr.tcp_flags,
828 size = sizeof(tcp_spec->hdr.rx_win);
829 field = ulp_rte_parser_fld_copy(field,
830 &tcp_spec->hdr.rx_win,
832 size = sizeof(tcp_spec->hdr.cksum);
833 field = ulp_rte_parser_fld_copy(field,
834 &tcp_spec->hdr.cksum,
836 size = sizeof(tcp_spec->hdr.tcp_urp);
837 field = ulp_rte_parser_fld_copy(field,
838 &tcp_spec->hdr.tcp_urp,
841 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
845 ulp_rte_prsr_mask_copy(params, &idx,
846 &tcp_mask->hdr.src_port,
847 sizeof(tcp_mask->hdr.src_port));
848 ulp_rte_prsr_mask_copy(params, &idx,
849 &tcp_mask->hdr.dst_port,
850 sizeof(tcp_mask->hdr.dst_port));
851 ulp_rte_prsr_mask_copy(params, &idx,
852 &tcp_mask->hdr.sent_seq,
853 sizeof(tcp_mask->hdr.sent_seq));
854 ulp_rte_prsr_mask_copy(params, &idx,
855 &tcp_mask->hdr.recv_ack,
856 sizeof(tcp_mask->hdr.recv_ack));
857 ulp_rte_prsr_mask_copy(params, &idx,
858 &tcp_mask->hdr.data_off,
859 sizeof(tcp_mask->hdr.data_off));
860 ulp_rte_prsr_mask_copy(params, &idx,
861 &tcp_mask->hdr.tcp_flags,
862 sizeof(tcp_mask->hdr.tcp_flags));
863 ulp_rte_prsr_mask_copy(params, &idx,
864 &tcp_mask->hdr.rx_win,
865 sizeof(tcp_mask->hdr.rx_win));
866 ulp_rte_prsr_mask_copy(params, &idx,
867 &tcp_mask->hdr.cksum,
868 sizeof(tcp_mask->hdr.cksum));
869 ulp_rte_prsr_mask_copy(params, &idx,
870 &tcp_mask->hdr.tcp_urp,
871 sizeof(tcp_mask->hdr.tcp_urp));
873 /* add number of TCP header elements */
874 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
876 /* Set the udp header bitmap and computed l4 header bitmaps */
877 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
879 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
880 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
881 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
882 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
884 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
885 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
887 return BNXT_TF_RC_SUCCESS;
890 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
892 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
893 struct ulp_rte_parser_params *params)
895 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
896 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
897 struct ulp_rte_hdr_field *field;
898 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
899 uint32_t idx = params->field_idx;
903 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
907 size = sizeof(vxlan_spec->flags);
908 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
911 size = sizeof(vxlan_spec->rsvd0);
912 field = ulp_rte_parser_fld_copy(field,
915 size = sizeof(vxlan_spec->vni);
916 field = ulp_rte_parser_fld_copy(field,
919 size = sizeof(vxlan_spec->rsvd1);
920 field = ulp_rte_parser_fld_copy(field,
925 ulp_rte_prsr_mask_copy(params, &idx,
927 sizeof(vxlan_mask->flags));
928 ulp_rte_prsr_mask_copy(params, &idx,
930 sizeof(vxlan_mask->rsvd0));
931 ulp_rte_prsr_mask_copy(params, &idx,
933 sizeof(vxlan_mask->vni));
934 ulp_rte_prsr_mask_copy(params, &idx,
936 sizeof(vxlan_mask->rsvd1));
938 /* Add number of vxlan header elements */
939 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
941 /* Update the hdr_bitmap with vxlan */
942 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
943 return BNXT_TF_RC_SUCCESS;
946 /* Function to handle the parsing of RTE Flow item void Header */
948 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
949 struct ulp_rte_parser_params *params __rte_unused)
951 return BNXT_TF_RC_SUCCESS;
954 /* Function to handle the parsing of RTE Flow action void Header. */
956 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
957 struct ulp_rte_parser_params *params __rte_unused)
959 return BNXT_TF_RC_SUCCESS;
962 /* Function to handle the parsing of RTE Flow action Mark Header. */
964 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
965 struct ulp_rte_parser_params *param)
967 const struct rte_flow_action_mark *mark;
968 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
971 mark = action_item->conf;
973 mark_id = tfp_cpu_to_be_32(mark->id);
974 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
975 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
977 /* Update the hdr_bitmap with vxlan */
978 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
979 return BNXT_TF_RC_SUCCESS;
981 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
982 return BNXT_TF_RC_ERROR;
985 /* Function to handle the parsing of RTE Flow action RSS Header. */
987 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
988 struct ulp_rte_parser_params *param)
990 const struct rte_flow_action_rss *rss = action_item->conf;
993 /* Update the hdr_bitmap with vxlan */
994 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
995 return BNXT_TF_RC_SUCCESS;
997 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
998 return BNXT_TF_RC_ERROR;
1001 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1003 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1004 struct ulp_rte_parser_params *params)
1006 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1007 const struct rte_flow_item *item;
1008 const struct rte_flow_item_eth *eth_spec;
1009 const struct rte_flow_item_ipv4 *ipv4_spec;
1010 const struct rte_flow_item_ipv6 *ipv6_spec;
1011 struct rte_flow_item_vxlan vxlan_spec;
1012 uint32_t vlan_num = 0, vlan_size = 0;
1013 uint32_t ip_size = 0, ip_type = 0;
1014 uint32_t vxlan_size = 0;
1016 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1017 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1019 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1020 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1022 vxlan_encap = action_item->conf;
1024 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1025 return BNXT_TF_RC_ERROR;
1028 item = vxlan_encap->definition;
1030 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1031 return BNXT_TF_RC_ERROR;
1034 if (!ulp_rte_item_skip_void(&item, 0))
1035 return BNXT_TF_RC_ERROR;
1037 /* must have ethernet header */
1038 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1039 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1040 return BNXT_TF_RC_ERROR;
1042 eth_spec = item->spec;
1043 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1044 ulp_encap_buffer_copy(buff,
1045 eth_spec->dst.addr_bytes,
1046 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1048 /* Goto the next item */
1049 if (!ulp_rte_item_skip_void(&item, 1))
1050 return BNXT_TF_RC_ERROR;
1052 /* May have vlan header */
1053 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1055 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1056 ulp_encap_buffer_copy(buff,
1058 sizeof(struct rte_flow_item_vlan));
1060 if (!ulp_rte_item_skip_void(&item, 1))
1061 return BNXT_TF_RC_ERROR;
1064 /* may have two vlan headers */
1065 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1067 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1068 sizeof(struct rte_flow_item_vlan)],
1070 sizeof(struct rte_flow_item_vlan));
1071 if (!ulp_rte_item_skip_void(&item, 1))
1072 return BNXT_TF_RC_ERROR;
1074 /* Update the vlan count and size of more than one */
1076 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1077 vlan_num = tfp_cpu_to_be_32(vlan_num);
1078 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1081 vlan_size = tfp_cpu_to_be_32(vlan_size);
1082 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1087 /* L3 must be IPv4, IPv6 */
1088 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1089 ipv4_spec = item->spec;
1090 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1092 /* copy the ipv4 details */
1093 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1094 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1095 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1096 ulp_encap_buffer_copy(buff,
1098 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1099 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1101 const uint8_t *tmp_buff;
1103 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1104 ulp_encap_buffer_copy(buff,
1105 &ipv4_spec->hdr.version_ihl,
1106 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1107 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1108 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1109 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1110 ulp_encap_buffer_copy(buff,
1112 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1114 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1115 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1116 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1117 ulp_encap_buffer_copy(buff,
1118 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1119 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1121 /* Update the ip size details */
1122 ip_size = tfp_cpu_to_be_32(ip_size);
1123 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1124 &ip_size, sizeof(uint32_t));
1126 /* update the ip type */
1127 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1128 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1129 &ip_type, sizeof(uint32_t));
1131 if (!ulp_rte_item_skip_void(&item, 1))
1132 return BNXT_TF_RC_ERROR;
1133 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1134 ipv6_spec = item->spec;
1135 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1137 /* copy the ipv4 details */
1138 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1139 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1141 /* Update the ip size details */
1142 ip_size = tfp_cpu_to_be_32(ip_size);
1143 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1144 &ip_size, sizeof(uint32_t));
1146 /* update the ip type */
1147 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1148 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1149 &ip_type, sizeof(uint32_t));
1151 if (!ulp_rte_item_skip_void(&item, 1))
1152 return BNXT_TF_RC_ERROR;
1154 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1155 return BNXT_TF_RC_ERROR;
1159 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1160 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1161 return BNXT_TF_RC_ERROR;
1163 /* copy the udp details */
1164 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1165 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1167 if (!ulp_rte_item_skip_void(&item, 1))
1168 return BNXT_TF_RC_ERROR;
1171 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1172 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1173 return BNXT_TF_RC_ERROR;
1175 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1176 /* copy the vxlan details */
1177 memcpy(&vxlan_spec, item->spec, vxlan_size);
1178 vxlan_spec.flags = 0x08;
1179 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1180 (const uint8_t *)&vxlan_spec,
1182 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1183 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1184 &vxlan_size, sizeof(uint32_t));
1186 /*update the hdr_bitmap with vxlan */
1187 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1188 return BNXT_TF_RC_SUCCESS;
1191 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1193 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1195 struct ulp_rte_parser_params *params)
1197 /* update the hdr_bitmap with vxlan */
1198 ULP_BITMAP_SET(params->act_bitmap.bits,
1199 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1200 return BNXT_TF_RC_SUCCESS;
1203 /* Function to handle the parsing of RTE Flow action drop Header. */
1205 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1206 struct ulp_rte_parser_params *params)
1208 /* Update the hdr_bitmap with drop */
1209 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1210 return BNXT_TF_RC_SUCCESS;
1213 /* Function to handle the parsing of RTE Flow action count. */
1215 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1216 struct ulp_rte_parser_params *params)
1219 const struct rte_flow_action_count *act_count;
1220 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1222 act_count = action_item->conf;
1224 if (act_count->shared) {
1226 "Parse Error:Shared count not supported\n");
1227 return BNXT_TF_RC_PARSE_ERR;
1229 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1231 BNXT_ULP_ACT_PROP_SZ_COUNT);
1234 /* Update the hdr_bitmap with count */
1235 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1236 return BNXT_TF_RC_SUCCESS;
1239 /* Function to handle the parsing of RTE Flow action PF. */
1241 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1242 struct ulp_rte_parser_params *params)
1246 /* Update the hdr_bitmap with vnic bit */
1247 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1249 /* copy the PF of the current device into VNIC Property */
1250 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1251 svif = bnxt_get_vnic_id(svif);
1252 svif = rte_cpu_to_be_32(svif);
1253 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1254 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1256 return BNXT_TF_RC_SUCCESS;
1259 /* Function to handle the parsing of RTE Flow action VF. */
1261 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1262 struct ulp_rte_parser_params *param)
1264 const struct rte_flow_action_vf *vf_action;
1267 vf_action = action_item->conf;
1269 if (vf_action->original) {
1271 "Parse Error:VF Original not supported\n");
1272 return BNXT_TF_RC_PARSE_ERR;
1274 /* TBD: Update the computed VNIC using VF conversion */
1275 pid = bnxt_get_vnic_id(vf_action->id);
1276 pid = rte_cpu_to_be_32(pid);
1277 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1278 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1281 /* Update the hdr_bitmap with count */
1282 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1283 return BNXT_TF_RC_SUCCESS;
1286 /* Function to handle the parsing of RTE Flow action port_id. */
1288 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1289 struct ulp_rte_parser_params *param)
1291 const struct rte_flow_action_port_id *port_id;
1294 port_id = act_item->conf;
1296 if (port_id->original) {
1298 "ParseErr:Portid Original not supported\n");
1299 return BNXT_TF_RC_PARSE_ERR;
1301 /* TBD: Update the computed VNIC using port conversion */
1302 pid = bnxt_get_vnic_id(port_id->id);
1303 pid = rte_cpu_to_be_32(pid);
1304 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1305 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1308 /* Update the hdr_bitmap with count */
1309 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1310 return BNXT_TF_RC_SUCCESS;
1313 /* Function to handle the parsing of RTE Flow action phy_port. */
1315 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1316 struct ulp_rte_parser_params *prm)
1318 const struct rte_flow_action_phy_port *phy_port;
1321 phy_port = action_item->conf;
1323 if (phy_port->original) {
1325 "Parse Err:Port Original not supported\n");
1326 return BNXT_TF_RC_PARSE_ERR;
1328 /* Get the vport of the physical port */
1329 /* TBD: shall be changed later to portdb call */
1330 vport = 1 << phy_port->index;
1331 vport = rte_cpu_to_be_32(vport);
1332 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1333 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1336 /* Update the hdr_bitmap with count */
1337 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1338 return BNXT_TF_RC_SUCCESS;