1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 /* Parse all the items in the pattern */
128 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129 /* get the header information from the flow_hdr_info table */
130 hdr_info = &ulp_act_info[action_item->type];
131 if (hdr_info->act_type ==
132 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
134 "Truflow parser does not support act %u\n",
136 return BNXT_TF_RC_ERROR;
137 } else if (hdr_info->act_type ==
138 BNXT_ULP_ACT_TYPE_SUPPORTED) {
139 /* call the registered callback handler */
140 if (hdr_info->proto_act_func) {
141 if (hdr_info->proto_act_func(action_item,
143 BNXT_TF_RC_SUCCESS) {
144 return BNXT_TF_RC_ERROR;
150 /* update the implied VNIC */
151 ulp_rte_parser_vnic_process(params);
152 return BNXT_TF_RC_SUCCESS;
155 /* Function to handle the parsing of RTE Flow item PF Header. */
157 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
158 enum rte_flow_item_type proto,
162 uint16_t port_id = svif;
164 struct ulp_rte_hdr_field *hdr_field;
168 if (ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_SVIF) !=
169 BNXT_ULP_INVALID_SVIF_VAL) {
171 "SVIF already set,multiple source not support'd\n");
172 return BNXT_TF_RC_ERROR;
175 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
176 dir = ULP_UTIL_CHF_IDX_RD(params,
177 BNXT_ULP_CHF_IDX_DIRECTION);
178 /* perform the conversion from dpdk port to bnxt svif */
179 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
183 "Invalid port id\n");
184 return BNXT_TF_RC_ERROR;
186 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
187 svif = rte_cpu_to_be_16(svif);
189 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
190 memcpy(hdr_field->spec, &svif, sizeof(svif));
191 memcpy(hdr_field->mask, &mask, sizeof(mask));
192 hdr_field->size = sizeof(svif);
193 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_SVIF,
194 rte_be_to_cpu_16(svif));
195 return BNXT_TF_RC_SUCCESS;
198 /* Function to handle the parsing of the RTE port id */
200 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
202 uint16_t port_id = 0;
203 uint16_t svif_mask = 0xFFFF;
205 if (ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_SVIF) !=
206 BNXT_ULP_INVALID_SVIF_VAL)
207 return BNXT_TF_RC_SUCCESS;
209 /* SVIF not set. So get the port id */
210 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
212 /* Update the SVIF details */
213 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
217 /* Function to handle the implicit VNIC RTE port id */
219 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
221 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
223 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
224 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
225 return BNXT_TF_RC_SUCCESS;
227 /* Update the vnic details */
228 ulp_rte_pf_act_handler(NULL, params);
229 return BNXT_TF_RC_SUCCESS;
232 /* Function to handle the parsing of RTE Flow item PF Header. */
234 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
235 struct ulp_rte_parser_params *params)
237 uint16_t port_id = 0;
238 uint16_t svif_mask = 0xFFFF;
240 /* Get the port id */
241 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
243 /* Update the SVIF details */
244 return ulp_rte_parser_svif_set(params,
249 /* Function to handle the parsing of RTE Flow item VF Header. */
251 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
252 struct ulp_rte_parser_params *params)
254 const struct rte_flow_item_vf *vf_spec = item->spec;
255 const struct rte_flow_item_vf *vf_mask = item->mask;
256 uint16_t svif = 0, mask = 0;
258 /* Get VF rte_flow_item for Port details */
260 svif = (uint16_t)vf_spec->id;
262 mask = (uint16_t)vf_mask->id;
264 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
267 /* Function to handle the parsing of RTE Flow item port id Header. */
269 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
270 struct ulp_rte_parser_params *params)
272 const struct rte_flow_item_port_id *port_spec = item->spec;
273 const struct rte_flow_item_port_id *port_mask = item->mask;
274 uint16_t svif = 0, mask = 0;
277 * Copy the rte_flow_item for Port into hdr_field using port id
281 svif = (uint16_t)port_spec->id;
283 mask = (uint16_t)port_mask->id;
285 /* Update the SVIF details */
286 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
289 /* Function to handle the parsing of RTE Flow item phy port Header. */
291 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
292 struct ulp_rte_parser_params *params)
294 const struct rte_flow_item_phy_port *port_spec = item->spec;
295 const struct rte_flow_item_phy_port *port_mask = item->mask;
296 uint32_t svif = 0, mask = 0;
298 /* Copy the rte_flow_item for phy port into hdr_field */
300 svif = port_spec->index;
302 mask = port_mask->index;
304 /* Update the SVIF details */
305 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
308 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
310 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
311 struct ulp_rte_parser_params *params)
313 const struct rte_flow_item_eth *eth_spec = item->spec;
314 const struct rte_flow_item_eth *eth_mask = item->mask;
315 struct ulp_rte_hdr_field *field;
316 uint32_t idx = params->field_idx;
317 uint64_t set_flag = 0;
321 * Copy the rte_flow_item for eth into hdr_field using ethernet
325 size = sizeof(eth_spec->dst.addr_bytes);
326 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
327 eth_spec->dst.addr_bytes,
329 size = sizeof(eth_spec->src.addr_bytes);
330 field = ulp_rte_parser_fld_copy(field,
331 eth_spec->src.addr_bytes,
333 field = ulp_rte_parser_fld_copy(field,
335 sizeof(eth_spec->type));
338 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
339 sizeof(eth_mask->dst.addr_bytes));
340 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
341 sizeof(eth_mask->src.addr_bytes));
342 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
343 sizeof(eth_mask->type));
345 /* Add number of vlan header elements */
346 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
347 params->vlan_idx = params->field_idx;
348 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
350 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
351 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
352 BNXT_ULP_HDR_BIT_O_ETH);
354 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
356 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
357 BNXT_ULP_HDR_BIT_I_ETH);
359 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
360 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
362 return BNXT_TF_RC_SUCCESS;
365 /* Function to handle the parsing of RTE Flow item Vlan Header. */
367 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
368 struct ulp_rte_parser_params *params)
370 const struct rte_flow_item_vlan *vlan_spec = item->spec;
371 const struct rte_flow_item_vlan *vlan_mask = item->mask;
372 struct ulp_rte_hdr_field *field;
373 struct ulp_rte_hdr_bitmap *hdr_bit;
374 uint32_t idx = params->vlan_idx;
375 uint16_t vlan_tag, priority;
376 uint32_t outer_vtag_num;
377 uint32_t inner_vtag_num;
380 * Copy the rte_flow_item for vlan into hdr_field using Vlan
384 vlan_tag = ntohs(vlan_spec->tci);
385 priority = htons(vlan_tag >> 13);
387 vlan_tag = htons(vlan_tag);
389 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
392 field = ulp_rte_parser_fld_copy(field,
395 field = ulp_rte_parser_fld_copy(field,
396 &vlan_spec->inner_type,
397 sizeof(vlan_spec->inner_type));
401 vlan_tag = ntohs(vlan_mask->tci);
402 priority = htons(vlan_tag >> 13);
404 vlan_tag = htons(vlan_tag);
406 field = ¶ms->hdr_field[idx];
407 memcpy(field->mask, &priority, field->size);
409 memcpy(field->mask, &vlan_tag, field->size);
411 memcpy(field->mask, &vlan_mask->inner_type, field->size);
413 /* Set the vlan index to new incremented value */
414 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
416 /* Get the outer tag and inner tag counts */
417 outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
418 BNXT_ULP_CHF_IDX_O_VTAG_NUM);
419 inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
420 BNXT_ULP_CHF_IDX_I_VTAG_NUM);
422 /* Update the hdr_bitmap of the vlans */
423 hdr_bit = ¶ms->hdr_bitmap;
424 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
426 /* Update the vlan tag num */
428 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
430 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1);
431 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
432 ULP_UTIL_CHF_IDX_RD(params,
433 BNXT_ULP_CHF_IDX_O_VTAG_PRESENT) &&
434 outer_vtag_num == 1) {
435 /* update the vlan tag num */
437 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
439 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1);
440 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
441 ULP_UTIL_CHF_IDX_RD(params,
442 BNXT_ULP_CHF_IDX_O_VTAG_PRESENT) &&
443 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
445 /* update the vlan tag num */
447 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
449 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1);
450 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
451 ULP_UTIL_CHF_IDX_RD(params,
452 BNXT_ULP_CHF_IDX_O_VTAG_PRESENT) &&
453 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
454 ULP_UTIL_CHF_IDX_RD(params,
455 BNXT_ULP_CHF_IDX_O_VTAG_PRESENT) &&
456 inner_vtag_num == 1) {
457 /* update the vlan tag num */
459 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
461 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1);
463 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
464 return BNXT_TF_RC_ERROR;
466 return BNXT_TF_RC_SUCCESS;
469 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
471 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
472 struct ulp_rte_parser_params *params)
474 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
475 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
476 struct ulp_rte_hdr_field *field;
477 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
478 uint32_t idx = params->field_idx;
480 uint32_t inner_l3, outer_l3;
482 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
484 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
485 return BNXT_TF_RC_ERROR;
489 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
493 size = sizeof(ipv4_spec->hdr.version_ihl);
494 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
495 &ipv4_spec->hdr.version_ihl,
497 size = sizeof(ipv4_spec->hdr.type_of_service);
498 field = ulp_rte_parser_fld_copy(field,
499 &ipv4_spec->hdr.type_of_service,
501 size = sizeof(ipv4_spec->hdr.total_length);
502 field = ulp_rte_parser_fld_copy(field,
503 &ipv4_spec->hdr.total_length,
505 size = sizeof(ipv4_spec->hdr.packet_id);
506 field = ulp_rte_parser_fld_copy(field,
507 &ipv4_spec->hdr.packet_id,
509 size = sizeof(ipv4_spec->hdr.fragment_offset);
510 field = ulp_rte_parser_fld_copy(field,
511 &ipv4_spec->hdr.fragment_offset,
513 size = sizeof(ipv4_spec->hdr.time_to_live);
514 field = ulp_rte_parser_fld_copy(field,
515 &ipv4_spec->hdr.time_to_live,
517 size = sizeof(ipv4_spec->hdr.next_proto_id);
518 field = ulp_rte_parser_fld_copy(field,
519 &ipv4_spec->hdr.next_proto_id,
521 size = sizeof(ipv4_spec->hdr.hdr_checksum);
522 field = ulp_rte_parser_fld_copy(field,
523 &ipv4_spec->hdr.hdr_checksum,
525 size = sizeof(ipv4_spec->hdr.src_addr);
526 field = ulp_rte_parser_fld_copy(field,
527 &ipv4_spec->hdr.src_addr,
529 size = sizeof(ipv4_spec->hdr.dst_addr);
530 field = ulp_rte_parser_fld_copy(field,
531 &ipv4_spec->hdr.dst_addr,
535 ulp_rte_prsr_mask_copy(params, &idx,
536 &ipv4_mask->hdr.version_ihl,
537 sizeof(ipv4_mask->hdr.version_ihl));
538 ulp_rte_prsr_mask_copy(params, &idx,
539 &ipv4_mask->hdr.type_of_service,
540 sizeof(ipv4_mask->hdr.type_of_service));
541 ulp_rte_prsr_mask_copy(params, &idx,
542 &ipv4_mask->hdr.total_length,
543 sizeof(ipv4_mask->hdr.total_length));
544 ulp_rte_prsr_mask_copy(params, &idx,
545 &ipv4_mask->hdr.packet_id,
546 sizeof(ipv4_mask->hdr.packet_id));
547 ulp_rte_prsr_mask_copy(params, &idx,
548 &ipv4_mask->hdr.fragment_offset,
549 sizeof(ipv4_mask->hdr.fragment_offset));
550 ulp_rte_prsr_mask_copy(params, &idx,
551 &ipv4_mask->hdr.time_to_live,
552 sizeof(ipv4_mask->hdr.time_to_live));
553 ulp_rte_prsr_mask_copy(params, &idx,
554 &ipv4_mask->hdr.next_proto_id,
555 sizeof(ipv4_mask->hdr.next_proto_id));
556 ulp_rte_prsr_mask_copy(params, &idx,
557 &ipv4_mask->hdr.hdr_checksum,
558 sizeof(ipv4_mask->hdr.hdr_checksum));
559 ulp_rte_prsr_mask_copy(params, &idx,
560 &ipv4_mask->hdr.src_addr,
561 sizeof(ipv4_mask->hdr.src_addr));
562 ulp_rte_prsr_mask_copy(params, &idx,
563 &ipv4_mask->hdr.dst_addr,
564 sizeof(ipv4_mask->hdr.dst_addr));
566 /* Add the number of ipv4 header elements */
567 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
569 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
570 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
572 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
573 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
574 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
576 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3);
578 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
580 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3);
582 return BNXT_TF_RC_SUCCESS;
585 /* Function to handle the parsing of RTE Flow item IPV6 Header */
587 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
588 struct ulp_rte_parser_params *params)
590 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
591 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
592 struct ulp_rte_hdr_field *field;
593 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
594 uint32_t idx = params->field_idx;
596 uint32_t inner_l3, outer_l3;
598 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
600 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
601 return BNXT_TF_RC_ERROR;
605 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
609 size = sizeof(ipv6_spec->hdr.vtc_flow);
610 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
611 &ipv6_spec->hdr.vtc_flow,
613 size = sizeof(ipv6_spec->hdr.payload_len);
614 field = ulp_rte_parser_fld_copy(field,
615 &ipv6_spec->hdr.payload_len,
617 size = sizeof(ipv6_spec->hdr.proto);
618 field = ulp_rte_parser_fld_copy(field,
619 &ipv6_spec->hdr.proto,
621 size = sizeof(ipv6_spec->hdr.hop_limits);
622 field = ulp_rte_parser_fld_copy(field,
623 &ipv6_spec->hdr.hop_limits,
625 size = sizeof(ipv6_spec->hdr.src_addr);
626 field = ulp_rte_parser_fld_copy(field,
627 &ipv6_spec->hdr.src_addr,
629 size = sizeof(ipv6_spec->hdr.dst_addr);
630 field = ulp_rte_parser_fld_copy(field,
631 &ipv6_spec->hdr.dst_addr,
635 ulp_rte_prsr_mask_copy(params, &idx,
636 &ipv6_mask->hdr.vtc_flow,
637 sizeof(ipv6_mask->hdr.vtc_flow));
638 ulp_rte_prsr_mask_copy(params, &idx,
639 &ipv6_mask->hdr.payload_len,
640 sizeof(ipv6_mask->hdr.payload_len));
641 ulp_rte_prsr_mask_copy(params, &idx,
642 &ipv6_mask->hdr.proto,
643 sizeof(ipv6_mask->hdr.proto));
644 ulp_rte_prsr_mask_copy(params, &idx,
645 &ipv6_mask->hdr.hop_limits,
646 sizeof(ipv6_mask->hdr.hop_limits));
647 ulp_rte_prsr_mask_copy(params, &idx,
648 &ipv6_mask->hdr.src_addr,
649 sizeof(ipv6_mask->hdr.src_addr));
650 ulp_rte_prsr_mask_copy(params, &idx,
651 &ipv6_mask->hdr.dst_addr,
652 sizeof(ipv6_mask->hdr.dst_addr));
654 /* add number of ipv6 header elements */
655 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
657 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
658 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
660 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
661 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
662 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
663 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1);
665 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
666 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1);
668 return BNXT_TF_RC_SUCCESS;
671 /* Function to handle the parsing of RTE Flow item UDP Header. */
673 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
674 struct ulp_rte_parser_params *params)
676 const struct rte_flow_item_udp *udp_spec = item->spec;
677 const struct rte_flow_item_udp *udp_mask = item->mask;
678 struct ulp_rte_hdr_field *field;
679 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
680 uint32_t idx = params->field_idx;
682 uint32_t inner_l4, outer_l4;
684 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
686 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
687 return BNXT_TF_RC_ERROR;
691 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
695 size = sizeof(udp_spec->hdr.src_port);
696 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
697 &udp_spec->hdr.src_port,
699 size = sizeof(udp_spec->hdr.dst_port);
700 field = ulp_rte_parser_fld_copy(field,
701 &udp_spec->hdr.dst_port,
703 size = sizeof(udp_spec->hdr.dgram_len);
704 field = ulp_rte_parser_fld_copy(field,
705 &udp_spec->hdr.dgram_len,
707 size = sizeof(udp_spec->hdr.dgram_cksum);
708 field = ulp_rte_parser_fld_copy(field,
709 &udp_spec->hdr.dgram_cksum,
713 ulp_rte_prsr_mask_copy(params, &idx,
714 &udp_mask->hdr.src_port,
715 sizeof(udp_mask->hdr.src_port));
716 ulp_rte_prsr_mask_copy(params, &idx,
717 &udp_mask->hdr.dst_port,
718 sizeof(udp_mask->hdr.dst_port));
719 ulp_rte_prsr_mask_copy(params, &idx,
720 &udp_mask->hdr.dgram_len,
721 sizeof(udp_mask->hdr.dgram_len));
722 ulp_rte_prsr_mask_copy(params, &idx,
723 &udp_mask->hdr.dgram_cksum,
724 sizeof(udp_mask->hdr.dgram_cksum));
727 /* Add number of UDP header elements */
728 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
730 /* Set the udp header bitmap and computed l4 header bitmaps */
731 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
733 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
734 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
735 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
736 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
738 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
739 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
741 return BNXT_TF_RC_SUCCESS;
744 /* Function to handle the parsing of RTE Flow item TCP Header. */
746 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
747 struct ulp_rte_parser_params *params)
749 const struct rte_flow_item_tcp *tcp_spec = item->spec;
750 const struct rte_flow_item_tcp *tcp_mask = item->mask;
751 struct ulp_rte_hdr_field *field;
752 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
753 uint32_t idx = params->field_idx;
755 uint32_t inner_l4, outer_l4;
757 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
759 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
760 return BNXT_TF_RC_ERROR;
764 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
768 size = sizeof(tcp_spec->hdr.src_port);
769 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
770 &tcp_spec->hdr.src_port,
772 size = sizeof(tcp_spec->hdr.dst_port);
773 field = ulp_rte_parser_fld_copy(field,
774 &tcp_spec->hdr.dst_port,
776 size = sizeof(tcp_spec->hdr.sent_seq);
777 field = ulp_rte_parser_fld_copy(field,
778 &tcp_spec->hdr.sent_seq,
780 size = sizeof(tcp_spec->hdr.recv_ack);
781 field = ulp_rte_parser_fld_copy(field,
782 &tcp_spec->hdr.recv_ack,
784 size = sizeof(tcp_spec->hdr.data_off);
785 field = ulp_rte_parser_fld_copy(field,
786 &tcp_spec->hdr.data_off,
788 size = sizeof(tcp_spec->hdr.tcp_flags);
789 field = ulp_rte_parser_fld_copy(field,
790 &tcp_spec->hdr.tcp_flags,
792 size = sizeof(tcp_spec->hdr.rx_win);
793 field = ulp_rte_parser_fld_copy(field,
794 &tcp_spec->hdr.rx_win,
796 size = sizeof(tcp_spec->hdr.cksum);
797 field = ulp_rte_parser_fld_copy(field,
798 &tcp_spec->hdr.cksum,
800 size = sizeof(tcp_spec->hdr.tcp_urp);
801 field = ulp_rte_parser_fld_copy(field,
802 &tcp_spec->hdr.tcp_urp,
805 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
809 ulp_rte_prsr_mask_copy(params, &idx,
810 &tcp_mask->hdr.src_port,
811 sizeof(tcp_mask->hdr.src_port));
812 ulp_rte_prsr_mask_copy(params, &idx,
813 &tcp_mask->hdr.dst_port,
814 sizeof(tcp_mask->hdr.dst_port));
815 ulp_rte_prsr_mask_copy(params, &idx,
816 &tcp_mask->hdr.sent_seq,
817 sizeof(tcp_mask->hdr.sent_seq));
818 ulp_rte_prsr_mask_copy(params, &idx,
819 &tcp_mask->hdr.recv_ack,
820 sizeof(tcp_mask->hdr.recv_ack));
821 ulp_rte_prsr_mask_copy(params, &idx,
822 &tcp_mask->hdr.data_off,
823 sizeof(tcp_mask->hdr.data_off));
824 ulp_rte_prsr_mask_copy(params, &idx,
825 &tcp_mask->hdr.tcp_flags,
826 sizeof(tcp_mask->hdr.tcp_flags));
827 ulp_rte_prsr_mask_copy(params, &idx,
828 &tcp_mask->hdr.rx_win,
829 sizeof(tcp_mask->hdr.rx_win));
830 ulp_rte_prsr_mask_copy(params, &idx,
831 &tcp_mask->hdr.cksum,
832 sizeof(tcp_mask->hdr.cksum));
833 ulp_rte_prsr_mask_copy(params, &idx,
834 &tcp_mask->hdr.tcp_urp,
835 sizeof(tcp_mask->hdr.tcp_urp));
837 /* add number of TCP header elements */
838 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
840 /* Set the udp header bitmap and computed l4 header bitmaps */
841 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
843 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
844 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
845 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
846 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
848 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
849 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
851 return BNXT_TF_RC_SUCCESS;
854 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
856 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
857 struct ulp_rte_parser_params *params)
859 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
860 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
861 struct ulp_rte_hdr_field *field;
862 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
863 uint32_t idx = params->field_idx;
867 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
871 size = sizeof(vxlan_spec->flags);
872 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
875 size = sizeof(vxlan_spec->rsvd0);
876 field = ulp_rte_parser_fld_copy(field,
879 size = sizeof(vxlan_spec->vni);
880 field = ulp_rte_parser_fld_copy(field,
883 size = sizeof(vxlan_spec->rsvd1);
884 field = ulp_rte_parser_fld_copy(field,
889 ulp_rte_prsr_mask_copy(params, &idx,
891 sizeof(vxlan_mask->flags));
892 ulp_rte_prsr_mask_copy(params, &idx,
894 sizeof(vxlan_mask->rsvd0));
895 ulp_rte_prsr_mask_copy(params, &idx,
897 sizeof(vxlan_mask->vni));
898 ulp_rte_prsr_mask_copy(params, &idx,
900 sizeof(vxlan_mask->rsvd1));
902 /* Add number of vxlan header elements */
903 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
905 /* Update the hdr_bitmap with vxlan */
906 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
907 return BNXT_TF_RC_SUCCESS;
910 /* Function to handle the parsing of RTE Flow item void Header */
912 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
913 struct ulp_rte_parser_params *params __rte_unused)
915 return BNXT_TF_RC_SUCCESS;
918 /* Function to handle the parsing of RTE Flow action void Header. */
920 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
921 struct ulp_rte_parser_params *params __rte_unused)
923 return BNXT_TF_RC_SUCCESS;
926 /* Function to handle the parsing of RTE Flow action Mark Header. */
928 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
929 struct ulp_rte_parser_params *param)
931 const struct rte_flow_action_mark *mark;
932 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
935 mark = action_item->conf;
937 mark_id = tfp_cpu_to_be_32(mark->id);
938 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
939 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
941 /* Update the hdr_bitmap with vxlan */
942 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
943 return BNXT_TF_RC_SUCCESS;
945 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
946 return BNXT_TF_RC_ERROR;
949 /* Function to handle the parsing of RTE Flow action RSS Header. */
951 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
952 struct ulp_rte_parser_params *param)
954 const struct rte_flow_action_rss *rss = action_item->conf;
957 /* Update the hdr_bitmap with vxlan */
958 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
959 return BNXT_TF_RC_SUCCESS;
961 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
962 return BNXT_TF_RC_ERROR;
965 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
967 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
968 struct ulp_rte_parser_params *params)
970 const struct rte_flow_action_vxlan_encap *vxlan_encap;
971 const struct rte_flow_item *item;
972 const struct rte_flow_item_eth *eth_spec;
973 const struct rte_flow_item_ipv4 *ipv4_spec;
974 const struct rte_flow_item_ipv6 *ipv6_spec;
975 struct rte_flow_item_vxlan vxlan_spec;
976 uint32_t vlan_num = 0, vlan_size = 0;
977 uint32_t ip_size = 0, ip_type = 0;
978 uint32_t vxlan_size = 0;
980 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
981 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
983 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
984 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
986 vxlan_encap = action_item->conf;
988 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
989 return BNXT_TF_RC_ERROR;
992 item = vxlan_encap->definition;
994 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
995 return BNXT_TF_RC_ERROR;
998 if (!ulp_rte_item_skip_void(&item, 0))
999 return BNXT_TF_RC_ERROR;
1001 /* must have ethernet header */
1002 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1003 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1004 return BNXT_TF_RC_ERROR;
1006 eth_spec = item->spec;
1007 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1008 ulp_encap_buffer_copy(buff,
1009 eth_spec->dst.addr_bytes,
1010 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1012 /* Goto the next item */
1013 if (!ulp_rte_item_skip_void(&item, 1))
1014 return BNXT_TF_RC_ERROR;
1016 /* May have vlan header */
1017 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1019 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1020 ulp_encap_buffer_copy(buff,
1022 sizeof(struct rte_flow_item_vlan));
1024 if (!ulp_rte_item_skip_void(&item, 1))
1025 return BNXT_TF_RC_ERROR;
1028 /* may have two vlan headers */
1029 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1031 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1032 sizeof(struct rte_flow_item_vlan)],
1034 sizeof(struct rte_flow_item_vlan));
1035 if (!ulp_rte_item_skip_void(&item, 1))
1036 return BNXT_TF_RC_ERROR;
1038 /* Update the vlan count and size of more than one */
1040 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1041 vlan_num = tfp_cpu_to_be_32(vlan_num);
1042 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1045 vlan_size = tfp_cpu_to_be_32(vlan_size);
1046 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1051 /* L3 must be IPv4, IPv6 */
1052 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1053 ipv4_spec = item->spec;
1054 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1056 /* copy the ipv4 details */
1057 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1058 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1059 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1060 ulp_encap_buffer_copy(buff,
1062 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1063 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1065 const uint8_t *tmp_buff;
1067 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1068 ulp_encap_buffer_copy(buff,
1069 &ipv4_spec->hdr.version_ihl,
1070 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1071 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1072 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1073 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1074 ulp_encap_buffer_copy(buff,
1076 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1078 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1079 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1080 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1081 ulp_encap_buffer_copy(buff,
1082 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1083 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1085 /* Update the ip size details */
1086 ip_size = tfp_cpu_to_be_32(ip_size);
1087 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1088 &ip_size, sizeof(uint32_t));
1090 /* update the ip type */
1091 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1092 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1093 &ip_type, sizeof(uint32_t));
1095 if (!ulp_rte_item_skip_void(&item, 1))
1096 return BNXT_TF_RC_ERROR;
1097 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1098 ipv6_spec = item->spec;
1099 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1101 /* copy the ipv4 details */
1102 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1103 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1105 /* Update the ip size details */
1106 ip_size = tfp_cpu_to_be_32(ip_size);
1107 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1108 &ip_size, sizeof(uint32_t));
1110 /* update the ip type */
1111 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1112 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1113 &ip_type, sizeof(uint32_t));
1115 if (!ulp_rte_item_skip_void(&item, 1))
1116 return BNXT_TF_RC_ERROR;
1118 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1119 return BNXT_TF_RC_ERROR;
1123 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1124 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1125 return BNXT_TF_RC_ERROR;
1127 /* copy the udp details */
1128 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1129 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1131 if (!ulp_rte_item_skip_void(&item, 1))
1132 return BNXT_TF_RC_ERROR;
1135 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1136 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1137 return BNXT_TF_RC_ERROR;
1139 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1140 /* copy the vxlan details */
1141 memcpy(&vxlan_spec, item->spec, vxlan_size);
1142 vxlan_spec.flags = 0x08;
1143 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1144 (const uint8_t *)&vxlan_spec,
1146 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1147 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1148 &vxlan_size, sizeof(uint32_t));
1150 /*update the hdr_bitmap with vxlan */
1151 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1152 return BNXT_TF_RC_SUCCESS;
1155 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1157 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1159 struct ulp_rte_parser_params *params)
1161 /* update the hdr_bitmap with vxlan */
1162 ULP_BITMAP_SET(params->act_bitmap.bits,
1163 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1164 return BNXT_TF_RC_SUCCESS;
1167 /* Function to handle the parsing of RTE Flow action drop Header. */
1169 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1170 struct ulp_rte_parser_params *params)
1172 /* Update the hdr_bitmap with drop */
1173 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1174 return BNXT_TF_RC_SUCCESS;
1177 /* Function to handle the parsing of RTE Flow action count. */
1179 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1180 struct ulp_rte_parser_params *params)
1183 const struct rte_flow_action_count *act_count;
1184 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1186 act_count = action_item->conf;
1188 if (act_count->shared) {
1190 "Parse Error:Shared count not supported\n");
1191 return BNXT_TF_RC_PARSE_ERR;
1193 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1195 BNXT_ULP_ACT_PROP_SZ_COUNT);
1198 /* Update the hdr_bitmap with count */
1199 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1200 return BNXT_TF_RC_SUCCESS;
1203 /* Function to handle the parsing of RTE Flow action PF. */
1205 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1206 struct ulp_rte_parser_params *params)
1210 /* Update the hdr_bitmap with vnic bit */
1211 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1213 /* copy the PF of the current device into VNIC Property */
1214 svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
1215 svif = bnxt_get_vnic_id(svif);
1216 svif = rte_cpu_to_be_32(svif);
1217 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1218 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1220 return BNXT_TF_RC_SUCCESS;
1223 /* Function to handle the parsing of RTE Flow action VF. */
1225 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1226 struct ulp_rte_parser_params *param)
1228 const struct rte_flow_action_vf *vf_action;
1231 vf_action = action_item->conf;
1233 if (vf_action->original) {
1235 "Parse Error:VF Original not supported\n");
1236 return BNXT_TF_RC_PARSE_ERR;
1238 /* TBD: Update the computed VNIC using VF conversion */
1239 pid = bnxt_get_vnic_id(vf_action->id);
1240 pid = rte_cpu_to_be_32(pid);
1241 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1242 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1245 /* Update the hdr_bitmap with count */
1246 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1247 return BNXT_TF_RC_SUCCESS;
1250 /* Function to handle the parsing of RTE Flow action port_id. */
1252 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1253 struct ulp_rte_parser_params *param)
1255 const struct rte_flow_action_port_id *port_id;
1258 port_id = act_item->conf;
1260 if (port_id->original) {
1262 "ParseErr:Portid Original not supported\n");
1263 return BNXT_TF_RC_PARSE_ERR;
1265 /* TBD: Update the computed VNIC using port conversion */
1266 pid = bnxt_get_vnic_id(port_id->id);
1267 pid = rte_cpu_to_be_32(pid);
1268 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1269 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1272 /* Update the hdr_bitmap with count */
1273 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1274 return BNXT_TF_RC_SUCCESS;
1277 /* Function to handle the parsing of RTE Flow action phy_port. */
1279 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1280 struct ulp_rte_parser_params *prm)
1282 const struct rte_flow_action_phy_port *phy_port;
1285 phy_port = action_item->conf;
1287 if (phy_port->original) {
1289 "Parse Err:Port Original not supported\n");
1290 return BNXT_TF_RC_PARSE_ERR;
1292 pid = bnxt_get_vnic_id(phy_port->index);
1293 pid = rte_cpu_to_be_32(pid);
1294 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1295 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1298 /* Update the hdr_bitmap with count */
1299 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1300 return BNXT_TF_RC_SUCCESS;