1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
172 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
173 BNXT_ULP_INVALID_SVIF_VAL) {
175 "SVIF already set,multiple source not support'd\n");
176 return BNXT_TF_RC_ERROR;
179 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
180 dir = ULP_COMP_FLD_IDX_RD(params,
181 BNXT_ULP_CF_IDX_DIRECTION);
182 /* perform the conversion from dpdk port to bnxt svif */
183 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
187 "Invalid port id\n");
188 return BNXT_TF_RC_ERROR;
190 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
191 svif = rte_cpu_to_be_16(svif);
193 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
194 memcpy(hdr_field->spec, &svif, sizeof(svif));
195 memcpy(hdr_field->mask, &mask, sizeof(mask));
196 hdr_field->size = sizeof(svif);
197 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
198 rte_be_to_cpu_16(svif));
199 return BNXT_TF_RC_SUCCESS;
202 /* Function to handle the parsing of the RTE port id */
204 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
206 uint16_t port_id = 0;
207 uint16_t svif_mask = 0xFFFF;
209 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
210 BNXT_ULP_INVALID_SVIF_VAL)
211 return BNXT_TF_RC_SUCCESS;
213 /* SVIF not set. So get the port id */
214 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
216 /* Update the SVIF details */
217 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
221 /* Function to handle the implicit VNIC RTE port id */
223 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
225 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
227 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
228 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
229 return BNXT_TF_RC_SUCCESS;
231 /* Update the vnic details */
232 ulp_rte_pf_act_handler(NULL, params);
233 return BNXT_TF_RC_SUCCESS;
236 /* Function to handle the parsing of RTE Flow item PF Header. */
238 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
239 struct ulp_rte_parser_params *params)
241 uint16_t port_id = 0;
242 uint16_t svif_mask = 0xFFFF;
244 /* Get the port id */
245 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
247 /* Update the SVIF details */
248 return ulp_rte_parser_svif_set(params,
253 /* Function to handle the parsing of RTE Flow item VF Header. */
255 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
256 struct ulp_rte_parser_params *params)
258 const struct rte_flow_item_vf *vf_spec = item->spec;
259 const struct rte_flow_item_vf *vf_mask = item->mask;
260 uint16_t svif = 0, mask = 0;
262 /* Get VF rte_flow_item for Port details */
264 svif = (uint16_t)vf_spec->id;
266 mask = (uint16_t)vf_mask->id;
268 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
271 /* Function to handle the parsing of RTE Flow item port id Header. */
273 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
274 struct ulp_rte_parser_params *params)
276 const struct rte_flow_item_port_id *port_spec = item->spec;
277 const struct rte_flow_item_port_id *port_mask = item->mask;
278 uint16_t svif = 0, mask = 0;
281 * Copy the rte_flow_item for Port into hdr_field using port id
285 svif = (uint16_t)port_spec->id;
287 mask = (uint16_t)port_mask->id;
289 /* Update the SVIF details */
290 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
293 /* Function to handle the parsing of RTE Flow item phy port Header. */
295 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
296 struct ulp_rte_parser_params *params)
298 const struct rte_flow_item_phy_port *port_spec = item->spec;
299 const struct rte_flow_item_phy_port *port_mask = item->mask;
300 uint32_t svif = 0, mask = 0;
302 /* Copy the rte_flow_item for phy port into hdr_field */
304 svif = port_spec->index;
306 mask = port_mask->index;
308 /* Update the SVIF details */
309 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
312 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
314 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
315 struct ulp_rte_parser_params *params)
317 const struct rte_flow_item_eth *eth_spec = item->spec;
318 const struct rte_flow_item_eth *eth_mask = item->mask;
319 struct ulp_rte_hdr_field *field;
320 uint32_t idx = params->field_idx;
321 uint64_t set_flag = 0;
325 * Copy the rte_flow_item for eth into hdr_field using ethernet
329 size = sizeof(eth_spec->dst.addr_bytes);
330 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
331 eth_spec->dst.addr_bytes,
333 size = sizeof(eth_spec->src.addr_bytes);
334 field = ulp_rte_parser_fld_copy(field,
335 eth_spec->src.addr_bytes,
337 field = ulp_rte_parser_fld_copy(field,
339 sizeof(eth_spec->type));
342 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
343 sizeof(eth_mask->dst.addr_bytes));
344 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
345 sizeof(eth_mask->src.addr_bytes));
346 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
347 sizeof(eth_mask->type));
349 /* Add number of vlan header elements */
350 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
351 params->vlan_idx = params->field_idx;
352 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
354 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
355 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
356 BNXT_ULP_HDR_BIT_O_ETH);
358 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
360 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
361 BNXT_ULP_HDR_BIT_I_ETH);
363 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
364 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
366 return BNXT_TF_RC_SUCCESS;
369 /* Function to handle the parsing of RTE Flow item Vlan Header. */
371 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
372 struct ulp_rte_parser_params *params)
374 const struct rte_flow_item_vlan *vlan_spec = item->spec;
375 const struct rte_flow_item_vlan *vlan_mask = item->mask;
376 struct ulp_rte_hdr_field *field;
377 struct ulp_rte_hdr_bitmap *hdr_bit;
378 uint32_t idx = params->vlan_idx;
379 uint16_t vlan_tag, priority;
380 uint32_t outer_vtag_num;
381 uint32_t inner_vtag_num;
384 * Copy the rte_flow_item for vlan into hdr_field using Vlan
388 vlan_tag = ntohs(vlan_spec->tci);
389 priority = htons(vlan_tag >> 13);
391 vlan_tag = htons(vlan_tag);
393 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
396 field = ulp_rte_parser_fld_copy(field,
399 field = ulp_rte_parser_fld_copy(field,
400 &vlan_spec->inner_type,
401 sizeof(vlan_spec->inner_type));
405 vlan_tag = ntohs(vlan_mask->tci);
406 priority = htons(vlan_tag >> 13);
408 vlan_tag = htons(vlan_tag);
410 field = ¶ms->hdr_field[idx];
411 memcpy(field->mask, &priority, field->size);
413 memcpy(field->mask, &vlan_tag, field->size);
415 memcpy(field->mask, &vlan_mask->inner_type, field->size);
417 /* Set the vlan index to new incremented value */
418 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
420 /* Get the outer tag and inner tag counts */
421 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
422 BNXT_ULP_CF_IDX_O_VTAG_NUM);
423 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
424 BNXT_ULP_CF_IDX_I_VTAG_NUM);
426 /* Update the hdr_bitmap of the vlans */
427 hdr_bit = ¶ms->hdr_bitmap;
428 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
430 /* Update the vlan tag num */
432 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
434 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
435 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
436 ULP_COMP_FLD_IDX_RD(params,
437 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
438 outer_vtag_num == 1) {
439 /* update the vlan tag num */
441 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
443 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
444 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
445 ULP_COMP_FLD_IDX_RD(params,
446 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
447 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
449 /* update the vlan tag num */
451 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
453 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
454 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
455 ULP_COMP_FLD_IDX_RD(params,
456 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
457 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
458 ULP_COMP_FLD_IDX_RD(params,
459 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
460 inner_vtag_num == 1) {
461 /* update the vlan tag num */
463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
465 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
467 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
468 return BNXT_TF_RC_ERROR;
470 return BNXT_TF_RC_SUCCESS;
473 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
475 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
476 struct ulp_rte_parser_params *params)
478 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
479 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
480 struct ulp_rte_hdr_field *field;
481 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
482 uint32_t idx = params->field_idx;
484 uint32_t inner_l3, outer_l3;
486 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
488 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
489 return BNXT_TF_RC_ERROR;
493 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
497 size = sizeof(ipv4_spec->hdr.version_ihl);
498 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
499 &ipv4_spec->hdr.version_ihl,
501 size = sizeof(ipv4_spec->hdr.type_of_service);
502 field = ulp_rte_parser_fld_copy(field,
503 &ipv4_spec->hdr.type_of_service,
505 size = sizeof(ipv4_spec->hdr.total_length);
506 field = ulp_rte_parser_fld_copy(field,
507 &ipv4_spec->hdr.total_length,
509 size = sizeof(ipv4_spec->hdr.packet_id);
510 field = ulp_rte_parser_fld_copy(field,
511 &ipv4_spec->hdr.packet_id,
513 size = sizeof(ipv4_spec->hdr.fragment_offset);
514 field = ulp_rte_parser_fld_copy(field,
515 &ipv4_spec->hdr.fragment_offset,
517 size = sizeof(ipv4_spec->hdr.time_to_live);
518 field = ulp_rte_parser_fld_copy(field,
519 &ipv4_spec->hdr.time_to_live,
521 size = sizeof(ipv4_spec->hdr.next_proto_id);
522 field = ulp_rte_parser_fld_copy(field,
523 &ipv4_spec->hdr.next_proto_id,
525 size = sizeof(ipv4_spec->hdr.hdr_checksum);
526 field = ulp_rte_parser_fld_copy(field,
527 &ipv4_spec->hdr.hdr_checksum,
529 size = sizeof(ipv4_spec->hdr.src_addr);
530 field = ulp_rte_parser_fld_copy(field,
531 &ipv4_spec->hdr.src_addr,
533 size = sizeof(ipv4_spec->hdr.dst_addr);
534 field = ulp_rte_parser_fld_copy(field,
535 &ipv4_spec->hdr.dst_addr,
539 ulp_rte_prsr_mask_copy(params, &idx,
540 &ipv4_mask->hdr.version_ihl,
541 sizeof(ipv4_mask->hdr.version_ihl));
542 ulp_rte_prsr_mask_copy(params, &idx,
543 &ipv4_mask->hdr.type_of_service,
544 sizeof(ipv4_mask->hdr.type_of_service));
545 ulp_rte_prsr_mask_copy(params, &idx,
546 &ipv4_mask->hdr.total_length,
547 sizeof(ipv4_mask->hdr.total_length));
548 ulp_rte_prsr_mask_copy(params, &idx,
549 &ipv4_mask->hdr.packet_id,
550 sizeof(ipv4_mask->hdr.packet_id));
551 ulp_rte_prsr_mask_copy(params, &idx,
552 &ipv4_mask->hdr.fragment_offset,
553 sizeof(ipv4_mask->hdr.fragment_offset));
554 ulp_rte_prsr_mask_copy(params, &idx,
555 &ipv4_mask->hdr.time_to_live,
556 sizeof(ipv4_mask->hdr.time_to_live));
557 ulp_rte_prsr_mask_copy(params, &idx,
558 &ipv4_mask->hdr.next_proto_id,
559 sizeof(ipv4_mask->hdr.next_proto_id));
560 ulp_rte_prsr_mask_copy(params, &idx,
561 &ipv4_mask->hdr.hdr_checksum,
562 sizeof(ipv4_mask->hdr.hdr_checksum));
563 ulp_rte_prsr_mask_copy(params, &idx,
564 &ipv4_mask->hdr.src_addr,
565 sizeof(ipv4_mask->hdr.src_addr));
566 ulp_rte_prsr_mask_copy(params, &idx,
567 &ipv4_mask->hdr.dst_addr,
568 sizeof(ipv4_mask->hdr.dst_addr));
570 /* Add the number of ipv4 header elements */
571 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
573 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
574 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
576 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
577 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
578 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
580 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
582 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
584 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
586 return BNXT_TF_RC_SUCCESS;
589 /* Function to handle the parsing of RTE Flow item IPV6 Header */
591 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
592 struct ulp_rte_parser_params *params)
594 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
595 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
596 struct ulp_rte_hdr_field *field;
597 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
598 uint32_t idx = params->field_idx;
600 uint32_t inner_l3, outer_l3;
602 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
604 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
605 return BNXT_TF_RC_ERROR;
609 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
613 size = sizeof(ipv6_spec->hdr.vtc_flow);
614 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
615 &ipv6_spec->hdr.vtc_flow,
617 size = sizeof(ipv6_spec->hdr.payload_len);
618 field = ulp_rte_parser_fld_copy(field,
619 &ipv6_spec->hdr.payload_len,
621 size = sizeof(ipv6_spec->hdr.proto);
622 field = ulp_rte_parser_fld_copy(field,
623 &ipv6_spec->hdr.proto,
625 size = sizeof(ipv6_spec->hdr.hop_limits);
626 field = ulp_rte_parser_fld_copy(field,
627 &ipv6_spec->hdr.hop_limits,
629 size = sizeof(ipv6_spec->hdr.src_addr);
630 field = ulp_rte_parser_fld_copy(field,
631 &ipv6_spec->hdr.src_addr,
633 size = sizeof(ipv6_spec->hdr.dst_addr);
634 field = ulp_rte_parser_fld_copy(field,
635 &ipv6_spec->hdr.dst_addr,
639 ulp_rte_prsr_mask_copy(params, &idx,
640 &ipv6_mask->hdr.vtc_flow,
641 sizeof(ipv6_mask->hdr.vtc_flow));
642 ulp_rte_prsr_mask_copy(params, &idx,
643 &ipv6_mask->hdr.payload_len,
644 sizeof(ipv6_mask->hdr.payload_len));
645 ulp_rte_prsr_mask_copy(params, &idx,
646 &ipv6_mask->hdr.proto,
647 sizeof(ipv6_mask->hdr.proto));
648 ulp_rte_prsr_mask_copy(params, &idx,
649 &ipv6_mask->hdr.hop_limits,
650 sizeof(ipv6_mask->hdr.hop_limits));
651 ulp_rte_prsr_mask_copy(params, &idx,
652 &ipv6_mask->hdr.src_addr,
653 sizeof(ipv6_mask->hdr.src_addr));
654 ulp_rte_prsr_mask_copy(params, &idx,
655 &ipv6_mask->hdr.dst_addr,
656 sizeof(ipv6_mask->hdr.dst_addr));
658 /* add number of ipv6 header elements */
659 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
661 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
662 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
664 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
665 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
666 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
667 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
669 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
670 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
672 return BNXT_TF_RC_SUCCESS;
675 /* Function to handle the parsing of RTE Flow item UDP Header. */
677 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
678 struct ulp_rte_parser_params *params)
680 const struct rte_flow_item_udp *udp_spec = item->spec;
681 const struct rte_flow_item_udp *udp_mask = item->mask;
682 struct ulp_rte_hdr_field *field;
683 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
684 uint32_t idx = params->field_idx;
686 uint32_t inner_l4, outer_l4;
688 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
690 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
691 return BNXT_TF_RC_ERROR;
695 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
699 size = sizeof(udp_spec->hdr.src_port);
700 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
701 &udp_spec->hdr.src_port,
703 size = sizeof(udp_spec->hdr.dst_port);
704 field = ulp_rte_parser_fld_copy(field,
705 &udp_spec->hdr.dst_port,
707 size = sizeof(udp_spec->hdr.dgram_len);
708 field = ulp_rte_parser_fld_copy(field,
709 &udp_spec->hdr.dgram_len,
711 size = sizeof(udp_spec->hdr.dgram_cksum);
712 field = ulp_rte_parser_fld_copy(field,
713 &udp_spec->hdr.dgram_cksum,
717 ulp_rte_prsr_mask_copy(params, &idx,
718 &udp_mask->hdr.src_port,
719 sizeof(udp_mask->hdr.src_port));
720 ulp_rte_prsr_mask_copy(params, &idx,
721 &udp_mask->hdr.dst_port,
722 sizeof(udp_mask->hdr.dst_port));
723 ulp_rte_prsr_mask_copy(params, &idx,
724 &udp_mask->hdr.dgram_len,
725 sizeof(udp_mask->hdr.dgram_len));
726 ulp_rte_prsr_mask_copy(params, &idx,
727 &udp_mask->hdr.dgram_cksum,
728 sizeof(udp_mask->hdr.dgram_cksum));
731 /* Add number of UDP header elements */
732 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
734 /* Set the udp header bitmap and computed l4 header bitmaps */
735 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
737 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
738 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
739 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
740 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
742 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
743 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
745 return BNXT_TF_RC_SUCCESS;
748 /* Function to handle the parsing of RTE Flow item TCP Header. */
750 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
751 struct ulp_rte_parser_params *params)
753 const struct rte_flow_item_tcp *tcp_spec = item->spec;
754 const struct rte_flow_item_tcp *tcp_mask = item->mask;
755 struct ulp_rte_hdr_field *field;
756 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
757 uint32_t idx = params->field_idx;
759 uint32_t inner_l4, outer_l4;
761 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
763 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
764 return BNXT_TF_RC_ERROR;
768 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
772 size = sizeof(tcp_spec->hdr.src_port);
773 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
774 &tcp_spec->hdr.src_port,
776 size = sizeof(tcp_spec->hdr.dst_port);
777 field = ulp_rte_parser_fld_copy(field,
778 &tcp_spec->hdr.dst_port,
780 size = sizeof(tcp_spec->hdr.sent_seq);
781 field = ulp_rte_parser_fld_copy(field,
782 &tcp_spec->hdr.sent_seq,
784 size = sizeof(tcp_spec->hdr.recv_ack);
785 field = ulp_rte_parser_fld_copy(field,
786 &tcp_spec->hdr.recv_ack,
788 size = sizeof(tcp_spec->hdr.data_off);
789 field = ulp_rte_parser_fld_copy(field,
790 &tcp_spec->hdr.data_off,
792 size = sizeof(tcp_spec->hdr.tcp_flags);
793 field = ulp_rte_parser_fld_copy(field,
794 &tcp_spec->hdr.tcp_flags,
796 size = sizeof(tcp_spec->hdr.rx_win);
797 field = ulp_rte_parser_fld_copy(field,
798 &tcp_spec->hdr.rx_win,
800 size = sizeof(tcp_spec->hdr.cksum);
801 field = ulp_rte_parser_fld_copy(field,
802 &tcp_spec->hdr.cksum,
804 size = sizeof(tcp_spec->hdr.tcp_urp);
805 field = ulp_rte_parser_fld_copy(field,
806 &tcp_spec->hdr.tcp_urp,
809 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
813 ulp_rte_prsr_mask_copy(params, &idx,
814 &tcp_mask->hdr.src_port,
815 sizeof(tcp_mask->hdr.src_port));
816 ulp_rte_prsr_mask_copy(params, &idx,
817 &tcp_mask->hdr.dst_port,
818 sizeof(tcp_mask->hdr.dst_port));
819 ulp_rte_prsr_mask_copy(params, &idx,
820 &tcp_mask->hdr.sent_seq,
821 sizeof(tcp_mask->hdr.sent_seq));
822 ulp_rte_prsr_mask_copy(params, &idx,
823 &tcp_mask->hdr.recv_ack,
824 sizeof(tcp_mask->hdr.recv_ack));
825 ulp_rte_prsr_mask_copy(params, &idx,
826 &tcp_mask->hdr.data_off,
827 sizeof(tcp_mask->hdr.data_off));
828 ulp_rte_prsr_mask_copy(params, &idx,
829 &tcp_mask->hdr.tcp_flags,
830 sizeof(tcp_mask->hdr.tcp_flags));
831 ulp_rte_prsr_mask_copy(params, &idx,
832 &tcp_mask->hdr.rx_win,
833 sizeof(tcp_mask->hdr.rx_win));
834 ulp_rte_prsr_mask_copy(params, &idx,
835 &tcp_mask->hdr.cksum,
836 sizeof(tcp_mask->hdr.cksum));
837 ulp_rte_prsr_mask_copy(params, &idx,
838 &tcp_mask->hdr.tcp_urp,
839 sizeof(tcp_mask->hdr.tcp_urp));
841 /* add number of TCP header elements */
842 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
844 /* Set the udp header bitmap and computed l4 header bitmaps */
845 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
847 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
848 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
849 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
850 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
852 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
853 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
855 return BNXT_TF_RC_SUCCESS;
858 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
860 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
861 struct ulp_rte_parser_params *params)
863 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
864 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
865 struct ulp_rte_hdr_field *field;
866 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
867 uint32_t idx = params->field_idx;
871 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
875 size = sizeof(vxlan_spec->flags);
876 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
879 size = sizeof(vxlan_spec->rsvd0);
880 field = ulp_rte_parser_fld_copy(field,
883 size = sizeof(vxlan_spec->vni);
884 field = ulp_rte_parser_fld_copy(field,
887 size = sizeof(vxlan_spec->rsvd1);
888 field = ulp_rte_parser_fld_copy(field,
893 ulp_rte_prsr_mask_copy(params, &idx,
895 sizeof(vxlan_mask->flags));
896 ulp_rte_prsr_mask_copy(params, &idx,
898 sizeof(vxlan_mask->rsvd0));
899 ulp_rte_prsr_mask_copy(params, &idx,
901 sizeof(vxlan_mask->vni));
902 ulp_rte_prsr_mask_copy(params, &idx,
904 sizeof(vxlan_mask->rsvd1));
906 /* Add number of vxlan header elements */
907 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
909 /* Update the hdr_bitmap with vxlan */
910 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
911 return BNXT_TF_RC_SUCCESS;
914 /* Function to handle the parsing of RTE Flow item void Header */
916 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
917 struct ulp_rte_parser_params *params __rte_unused)
919 return BNXT_TF_RC_SUCCESS;
922 /* Function to handle the parsing of RTE Flow action void Header. */
924 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
925 struct ulp_rte_parser_params *params __rte_unused)
927 return BNXT_TF_RC_SUCCESS;
930 /* Function to handle the parsing of RTE Flow action Mark Header. */
932 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
933 struct ulp_rte_parser_params *param)
935 const struct rte_flow_action_mark *mark;
936 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
939 mark = action_item->conf;
941 mark_id = tfp_cpu_to_be_32(mark->id);
942 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
943 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
945 /* Update the hdr_bitmap with vxlan */
946 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
947 return BNXT_TF_RC_SUCCESS;
949 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
950 return BNXT_TF_RC_ERROR;
953 /* Function to handle the parsing of RTE Flow action RSS Header. */
955 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
956 struct ulp_rte_parser_params *param)
958 const struct rte_flow_action_rss *rss = action_item->conf;
961 /* Update the hdr_bitmap with vxlan */
962 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
963 return BNXT_TF_RC_SUCCESS;
965 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
966 return BNXT_TF_RC_ERROR;
969 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
971 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
972 struct ulp_rte_parser_params *params)
974 const struct rte_flow_action_vxlan_encap *vxlan_encap;
975 const struct rte_flow_item *item;
976 const struct rte_flow_item_eth *eth_spec;
977 const struct rte_flow_item_ipv4 *ipv4_spec;
978 const struct rte_flow_item_ipv6 *ipv6_spec;
979 struct rte_flow_item_vxlan vxlan_spec;
980 uint32_t vlan_num = 0, vlan_size = 0;
981 uint32_t ip_size = 0, ip_type = 0;
982 uint32_t vxlan_size = 0;
984 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
985 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
987 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
988 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
990 vxlan_encap = action_item->conf;
992 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
993 return BNXT_TF_RC_ERROR;
996 item = vxlan_encap->definition;
998 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
999 return BNXT_TF_RC_ERROR;
1002 if (!ulp_rte_item_skip_void(&item, 0))
1003 return BNXT_TF_RC_ERROR;
1005 /* must have ethernet header */
1006 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1007 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1008 return BNXT_TF_RC_ERROR;
1010 eth_spec = item->spec;
1011 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1012 ulp_encap_buffer_copy(buff,
1013 eth_spec->dst.addr_bytes,
1014 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1016 /* Goto the next item */
1017 if (!ulp_rte_item_skip_void(&item, 1))
1018 return BNXT_TF_RC_ERROR;
1020 /* May have vlan header */
1021 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1023 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1024 ulp_encap_buffer_copy(buff,
1026 sizeof(struct rte_flow_item_vlan));
1028 if (!ulp_rte_item_skip_void(&item, 1))
1029 return BNXT_TF_RC_ERROR;
1032 /* may have two vlan headers */
1033 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1035 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1036 sizeof(struct rte_flow_item_vlan)],
1038 sizeof(struct rte_flow_item_vlan));
1039 if (!ulp_rte_item_skip_void(&item, 1))
1040 return BNXT_TF_RC_ERROR;
1042 /* Update the vlan count and size of more than one */
1044 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1045 vlan_num = tfp_cpu_to_be_32(vlan_num);
1046 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1049 vlan_size = tfp_cpu_to_be_32(vlan_size);
1050 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1055 /* L3 must be IPv4, IPv6 */
1056 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1057 ipv4_spec = item->spec;
1058 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1060 /* copy the ipv4 details */
1061 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1062 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1063 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1064 ulp_encap_buffer_copy(buff,
1066 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1067 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1069 const uint8_t *tmp_buff;
1071 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1072 ulp_encap_buffer_copy(buff,
1073 &ipv4_spec->hdr.version_ihl,
1074 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1075 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1076 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1077 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1078 ulp_encap_buffer_copy(buff,
1080 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1082 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1083 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1084 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1085 ulp_encap_buffer_copy(buff,
1086 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1087 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1089 /* Update the ip size details */
1090 ip_size = tfp_cpu_to_be_32(ip_size);
1091 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1092 &ip_size, sizeof(uint32_t));
1094 /* update the ip type */
1095 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1096 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1097 &ip_type, sizeof(uint32_t));
1099 if (!ulp_rte_item_skip_void(&item, 1))
1100 return BNXT_TF_RC_ERROR;
1101 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1102 ipv6_spec = item->spec;
1103 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1105 /* copy the ipv4 details */
1106 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1107 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1109 /* Update the ip size details */
1110 ip_size = tfp_cpu_to_be_32(ip_size);
1111 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1112 &ip_size, sizeof(uint32_t));
1114 /* update the ip type */
1115 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1116 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1117 &ip_type, sizeof(uint32_t));
1119 if (!ulp_rte_item_skip_void(&item, 1))
1120 return BNXT_TF_RC_ERROR;
1122 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1123 return BNXT_TF_RC_ERROR;
1127 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1128 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1129 return BNXT_TF_RC_ERROR;
1131 /* copy the udp details */
1132 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1133 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1135 if (!ulp_rte_item_skip_void(&item, 1))
1136 return BNXT_TF_RC_ERROR;
1139 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1140 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1141 return BNXT_TF_RC_ERROR;
1143 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1144 /* copy the vxlan details */
1145 memcpy(&vxlan_spec, item->spec, vxlan_size);
1146 vxlan_spec.flags = 0x08;
1147 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1148 (const uint8_t *)&vxlan_spec,
1150 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1151 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1152 &vxlan_size, sizeof(uint32_t));
1154 /*update the hdr_bitmap with vxlan */
1155 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1156 return BNXT_TF_RC_SUCCESS;
1159 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1161 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1163 struct ulp_rte_parser_params *params)
1165 /* update the hdr_bitmap with vxlan */
1166 ULP_BITMAP_SET(params->act_bitmap.bits,
1167 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1168 return BNXT_TF_RC_SUCCESS;
1171 /* Function to handle the parsing of RTE Flow action drop Header. */
1173 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1174 struct ulp_rte_parser_params *params)
1176 /* Update the hdr_bitmap with drop */
1177 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1178 return BNXT_TF_RC_SUCCESS;
1181 /* Function to handle the parsing of RTE Flow action count. */
1183 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1184 struct ulp_rte_parser_params *params)
1187 const struct rte_flow_action_count *act_count;
1188 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1190 act_count = action_item->conf;
1192 if (act_count->shared) {
1194 "Parse Error:Shared count not supported\n");
1195 return BNXT_TF_RC_PARSE_ERR;
1197 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1199 BNXT_ULP_ACT_PROP_SZ_COUNT);
1202 /* Update the hdr_bitmap with count */
1203 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1204 return BNXT_TF_RC_SUCCESS;
1207 /* Function to handle the parsing of RTE Flow action PF. */
1209 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1210 struct ulp_rte_parser_params *params)
1214 /* Update the hdr_bitmap with vnic bit */
1215 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1217 /* copy the PF of the current device into VNIC Property */
1218 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1219 svif = bnxt_get_vnic_id(svif);
1220 svif = rte_cpu_to_be_32(svif);
1221 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1222 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1224 return BNXT_TF_RC_SUCCESS;
1227 /* Function to handle the parsing of RTE Flow action VF. */
1229 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1230 struct ulp_rte_parser_params *param)
1232 const struct rte_flow_action_vf *vf_action;
1235 vf_action = action_item->conf;
1237 if (vf_action->original) {
1239 "Parse Error:VF Original not supported\n");
1240 return BNXT_TF_RC_PARSE_ERR;
1242 /* TBD: Update the computed VNIC using VF conversion */
1243 pid = bnxt_get_vnic_id(vf_action->id);
1244 pid = rte_cpu_to_be_32(pid);
1245 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1246 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1249 /* Update the hdr_bitmap with count */
1250 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1251 return BNXT_TF_RC_SUCCESS;
1254 /* Function to handle the parsing of RTE Flow action port_id. */
1256 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1257 struct ulp_rte_parser_params *param)
1259 const struct rte_flow_action_port_id *port_id;
1262 port_id = act_item->conf;
1264 if (port_id->original) {
1266 "ParseErr:Portid Original not supported\n");
1267 return BNXT_TF_RC_PARSE_ERR;
1269 /* TBD: Update the computed VNIC using port conversion */
1270 pid = bnxt_get_vnic_id(port_id->id);
1271 pid = rte_cpu_to_be_32(pid);
1272 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1273 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1276 /* Update the hdr_bitmap with count */
1277 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1278 return BNXT_TF_RC_SUCCESS;
1281 /* Function to handle the parsing of RTE Flow action phy_port. */
1283 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1284 struct ulp_rte_parser_params *prm)
1286 const struct rte_flow_action_phy_port *phy_port;
1289 phy_port = action_item->conf;
1291 if (phy_port->original) {
1293 "Parse Err:Port Original not supported\n");
1294 return BNXT_TF_RC_PARSE_ERR;
1296 /* Get the vport of the physical port */
1297 /* TBD: shall be changed later to portdb call */
1298 vport = 1 << phy_port->index;
1299 vport = rte_cpu_to_be_32(vport);
1300 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1301 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1304 /* Update the hdr_bitmap with count */
1305 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1306 return BNXT_TF_RC_SUCCESS;