1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
169 enum bnxt_ulp_svif_type svif_type;
170 enum bnxt_ulp_intf_type if_type;
174 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
175 BNXT_ULP_INVALID_SVIF_VAL) {
177 "SVIF already set,multiple source not support'd\n");
178 return BNXT_TF_RC_ERROR;
181 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
182 dir = ULP_COMP_FLD_IDX_RD(params,
183 BNXT_ULP_CF_IDX_DIRECTION);
184 /* perform the conversion from dpdk port to bnxt svif */
185 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
189 "Invalid port id\n");
190 return BNXT_TF_RC_ERROR;
193 if (dir == ULP_DIR_INGRESS) {
194 svif_type = BNXT_ULP_PHY_PORT_SVIF;
196 if_type = bnxt_get_interface_type(port_id);
197 if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
198 svif_type = BNXT_ULP_VF_FUNC_SVIF;
200 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
202 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
204 svif = rte_cpu_to_be_16(svif);
206 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
207 memcpy(hdr_field->spec, &svif, sizeof(svif));
208 memcpy(hdr_field->mask, &mask, sizeof(mask));
209 hdr_field->size = sizeof(svif);
210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
211 rte_be_to_cpu_16(svif));
212 return BNXT_TF_RC_SUCCESS;
215 /* Function to handle the parsing of the RTE port id */
217 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
219 uint16_t port_id = 0;
220 uint16_t svif_mask = 0xFFFF;
222 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
223 BNXT_ULP_INVALID_SVIF_VAL)
224 return BNXT_TF_RC_SUCCESS;
226 /* SVIF not set. So get the port id */
227 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
229 /* Update the SVIF details */
230 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
234 /* Function to handle the implicit VNIC RTE port id */
236 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
238 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
240 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
241 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
242 return BNXT_TF_RC_SUCCESS;
244 /* Update the vnic details */
245 ulp_rte_pf_act_handler(NULL, params);
246 /* Reset the hdr_bitmap with vnic bit */
247 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
249 return BNXT_TF_RC_SUCCESS;
252 /* Function to handle the parsing of RTE Flow item PF Header. */
254 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
255 struct ulp_rte_parser_params *params)
257 uint16_t port_id = 0;
258 uint16_t svif_mask = 0xFFFF;
260 /* Get the port id */
261 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
263 /* Update the SVIF details */
264 return ulp_rte_parser_svif_set(params,
269 /* Function to handle the parsing of RTE Flow item VF Header. */
271 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
272 struct ulp_rte_parser_params *params)
274 const struct rte_flow_item_vf *vf_spec = item->spec;
275 const struct rte_flow_item_vf *vf_mask = item->mask;
276 uint16_t svif = 0, mask = 0;
278 /* Get VF rte_flow_item for Port details */
280 svif = (uint16_t)vf_spec->id;
282 mask = (uint16_t)vf_mask->id;
284 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
287 /* Function to handle the parsing of RTE Flow item port id Header. */
289 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
290 struct ulp_rte_parser_params *params)
292 const struct rte_flow_item_port_id *port_spec = item->spec;
293 const struct rte_flow_item_port_id *port_mask = item->mask;
294 uint16_t svif = 0, mask = 0;
297 * Copy the rte_flow_item for Port into hdr_field using port id
301 svif = (uint16_t)port_spec->id;
303 mask = (uint16_t)port_mask->id;
305 /* Update the SVIF details */
306 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
309 /* Function to handle the parsing of RTE Flow item phy port Header. */
311 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
312 struct ulp_rte_parser_params *params)
314 const struct rte_flow_item_phy_port *port_spec = item->spec;
315 const struct rte_flow_item_phy_port *port_mask = item->mask;
316 uint32_t svif = 0, mask = 0;
318 /* Copy the rte_flow_item for phy port into hdr_field */
320 svif = port_spec->index;
322 mask = port_mask->index;
324 /* Update the SVIF details */
325 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
328 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
330 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
331 struct ulp_rte_parser_params *params)
333 const struct rte_flow_item_eth *eth_spec = item->spec;
334 const struct rte_flow_item_eth *eth_mask = item->mask;
335 struct ulp_rte_hdr_field *field;
336 uint32_t idx = params->field_idx;
337 uint64_t set_flag = 0;
341 * Copy the rte_flow_item for eth into hdr_field using ethernet
345 size = sizeof(eth_spec->dst.addr_bytes);
346 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
347 eth_spec->dst.addr_bytes,
349 size = sizeof(eth_spec->src.addr_bytes);
350 field = ulp_rte_parser_fld_copy(field,
351 eth_spec->src.addr_bytes,
353 field = ulp_rte_parser_fld_copy(field,
355 sizeof(eth_spec->type));
358 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
359 sizeof(eth_mask->dst.addr_bytes));
360 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
361 sizeof(eth_mask->src.addr_bytes));
362 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
363 sizeof(eth_mask->type));
365 /* Add number of vlan header elements */
366 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
367 params->vlan_idx = params->field_idx;
368 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
370 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
371 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
372 BNXT_ULP_HDR_BIT_O_ETH);
374 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
376 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
377 BNXT_ULP_HDR_BIT_I_ETH);
379 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
380 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
382 return BNXT_TF_RC_SUCCESS;
385 /* Function to handle the parsing of RTE Flow item Vlan Header. */
387 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
388 struct ulp_rte_parser_params *params)
390 const struct rte_flow_item_vlan *vlan_spec = item->spec;
391 const struct rte_flow_item_vlan *vlan_mask = item->mask;
392 struct ulp_rte_hdr_field *field;
393 struct ulp_rte_hdr_bitmap *hdr_bit;
394 uint32_t idx = params->vlan_idx;
395 uint16_t vlan_tag, priority;
396 uint32_t outer_vtag_num;
397 uint32_t inner_vtag_num;
400 * Copy the rte_flow_item for vlan into hdr_field using Vlan
404 vlan_tag = ntohs(vlan_spec->tci);
405 priority = htons(vlan_tag >> 13);
407 vlan_tag = htons(vlan_tag);
409 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
412 field = ulp_rte_parser_fld_copy(field,
415 field = ulp_rte_parser_fld_copy(field,
416 &vlan_spec->inner_type,
417 sizeof(vlan_spec->inner_type));
421 vlan_tag = ntohs(vlan_mask->tci);
422 priority = htons(vlan_tag >> 13);
424 vlan_tag = htons(vlan_tag);
426 field = ¶ms->hdr_field[idx];
427 memcpy(field->mask, &priority, field->size);
429 memcpy(field->mask, &vlan_tag, field->size);
431 memcpy(field->mask, &vlan_mask->inner_type, field->size);
433 /* Set the vlan index to new incremented value */
434 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
436 /* Get the outer tag and inner tag counts */
437 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
438 BNXT_ULP_CF_IDX_O_VTAG_NUM);
439 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
440 BNXT_ULP_CF_IDX_I_VTAG_NUM);
442 /* Update the hdr_bitmap of the vlans */
443 hdr_bit = ¶ms->hdr_bitmap;
444 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
445 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
447 /* Update the vlan tag num */
449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
451 ULP_BITMAP_SET(params->hdr_bitmap.bits,
452 BNXT_ULP_HDR_BIT_OO_VLAN);
453 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
454 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
455 outer_vtag_num == 1) {
456 /* update the vlan tag num */
458 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
460 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
461 ULP_BITMAP_SET(params->hdr_bitmap.bits,
462 BNXT_ULP_HDR_BIT_OI_VLAN);
463 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
464 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
466 /* update the vlan tag num */
468 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
470 ULP_BITMAP_SET(params->hdr_bitmap.bits,
471 BNXT_ULP_HDR_BIT_IO_VLAN);
472 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
473 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
474 inner_vtag_num == 1) {
475 /* update the vlan tag num */
477 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
479 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
480 ULP_BITMAP_SET(params->hdr_bitmap.bits,
481 BNXT_ULP_HDR_BIT_II_VLAN);
483 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
484 return BNXT_TF_RC_ERROR;
486 return BNXT_TF_RC_SUCCESS;
489 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
491 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
492 struct ulp_rte_parser_params *params)
494 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
495 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
496 struct ulp_rte_hdr_field *field;
497 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
498 uint32_t idx = params->field_idx;
500 uint32_t inner_l3, outer_l3;
502 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
504 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
505 return BNXT_TF_RC_ERROR;
509 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
513 size = sizeof(ipv4_spec->hdr.version_ihl);
514 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
515 &ipv4_spec->hdr.version_ihl,
517 size = sizeof(ipv4_spec->hdr.type_of_service);
518 field = ulp_rte_parser_fld_copy(field,
519 &ipv4_spec->hdr.type_of_service,
521 size = sizeof(ipv4_spec->hdr.total_length);
522 field = ulp_rte_parser_fld_copy(field,
523 &ipv4_spec->hdr.total_length,
525 size = sizeof(ipv4_spec->hdr.packet_id);
526 field = ulp_rte_parser_fld_copy(field,
527 &ipv4_spec->hdr.packet_id,
529 size = sizeof(ipv4_spec->hdr.fragment_offset);
530 field = ulp_rte_parser_fld_copy(field,
531 &ipv4_spec->hdr.fragment_offset,
533 size = sizeof(ipv4_spec->hdr.time_to_live);
534 field = ulp_rte_parser_fld_copy(field,
535 &ipv4_spec->hdr.time_to_live,
537 size = sizeof(ipv4_spec->hdr.next_proto_id);
538 field = ulp_rte_parser_fld_copy(field,
539 &ipv4_spec->hdr.next_proto_id,
541 size = sizeof(ipv4_spec->hdr.hdr_checksum);
542 field = ulp_rte_parser_fld_copy(field,
543 &ipv4_spec->hdr.hdr_checksum,
545 size = sizeof(ipv4_spec->hdr.src_addr);
546 field = ulp_rte_parser_fld_copy(field,
547 &ipv4_spec->hdr.src_addr,
549 size = sizeof(ipv4_spec->hdr.dst_addr);
550 field = ulp_rte_parser_fld_copy(field,
551 &ipv4_spec->hdr.dst_addr,
555 ulp_rte_prsr_mask_copy(params, &idx,
556 &ipv4_mask->hdr.version_ihl,
557 sizeof(ipv4_mask->hdr.version_ihl));
558 ulp_rte_prsr_mask_copy(params, &idx,
559 &ipv4_mask->hdr.type_of_service,
560 sizeof(ipv4_mask->hdr.type_of_service));
561 ulp_rte_prsr_mask_copy(params, &idx,
562 &ipv4_mask->hdr.total_length,
563 sizeof(ipv4_mask->hdr.total_length));
564 ulp_rte_prsr_mask_copy(params, &idx,
565 &ipv4_mask->hdr.packet_id,
566 sizeof(ipv4_mask->hdr.packet_id));
567 ulp_rte_prsr_mask_copy(params, &idx,
568 &ipv4_mask->hdr.fragment_offset,
569 sizeof(ipv4_mask->hdr.fragment_offset));
570 ulp_rte_prsr_mask_copy(params, &idx,
571 &ipv4_mask->hdr.time_to_live,
572 sizeof(ipv4_mask->hdr.time_to_live));
573 ulp_rte_prsr_mask_copy(params, &idx,
574 &ipv4_mask->hdr.next_proto_id,
575 sizeof(ipv4_mask->hdr.next_proto_id));
576 ulp_rte_prsr_mask_copy(params, &idx,
577 &ipv4_mask->hdr.hdr_checksum,
578 sizeof(ipv4_mask->hdr.hdr_checksum));
579 ulp_rte_prsr_mask_copy(params, &idx,
580 &ipv4_mask->hdr.src_addr,
581 sizeof(ipv4_mask->hdr.src_addr));
582 ulp_rte_prsr_mask_copy(params, &idx,
583 &ipv4_mask->hdr.dst_addr,
584 sizeof(ipv4_mask->hdr.dst_addr));
586 /* Add the number of ipv4 header elements */
587 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
589 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
590 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
592 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
593 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
594 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
596 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
598 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
600 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
602 return BNXT_TF_RC_SUCCESS;
605 /* Function to handle the parsing of RTE Flow item IPV6 Header */
607 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
608 struct ulp_rte_parser_params *params)
610 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
611 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
612 struct ulp_rte_hdr_field *field;
613 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
614 uint32_t idx = params->field_idx;
616 uint32_t inner_l3, outer_l3;
617 uint32_t vtcf, vtcf_mask;
619 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
621 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
622 return BNXT_TF_RC_ERROR;
626 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
630 size = sizeof(ipv6_spec->hdr.vtc_flow);
632 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
633 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
637 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
638 field = ulp_rte_parser_fld_copy(field,
642 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
643 field = ulp_rte_parser_fld_copy(field,
647 size = sizeof(ipv6_spec->hdr.payload_len);
648 field = ulp_rte_parser_fld_copy(field,
649 &ipv6_spec->hdr.payload_len,
651 size = sizeof(ipv6_spec->hdr.proto);
652 field = ulp_rte_parser_fld_copy(field,
653 &ipv6_spec->hdr.proto,
655 size = sizeof(ipv6_spec->hdr.hop_limits);
656 field = ulp_rte_parser_fld_copy(field,
657 &ipv6_spec->hdr.hop_limits,
659 size = sizeof(ipv6_spec->hdr.src_addr);
660 field = ulp_rte_parser_fld_copy(field,
661 &ipv6_spec->hdr.src_addr,
663 size = sizeof(ipv6_spec->hdr.dst_addr);
664 field = ulp_rte_parser_fld_copy(field,
665 &ipv6_spec->hdr.dst_addr,
669 size = sizeof(ipv6_mask->hdr.vtc_flow);
671 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
672 ulp_rte_prsr_mask_copy(params, &idx,
676 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
677 ulp_rte_prsr_mask_copy(params, &idx,
682 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
683 ulp_rte_prsr_mask_copy(params, &idx,
687 ulp_rte_prsr_mask_copy(params, &idx,
688 &ipv6_mask->hdr.payload_len,
689 sizeof(ipv6_mask->hdr.payload_len));
690 ulp_rte_prsr_mask_copy(params, &idx,
691 &ipv6_mask->hdr.proto,
692 sizeof(ipv6_mask->hdr.proto));
693 ulp_rte_prsr_mask_copy(params, &idx,
694 &ipv6_mask->hdr.hop_limits,
695 sizeof(ipv6_mask->hdr.hop_limits));
696 ulp_rte_prsr_mask_copy(params, &idx,
697 &ipv6_mask->hdr.src_addr,
698 sizeof(ipv6_mask->hdr.src_addr));
699 ulp_rte_prsr_mask_copy(params, &idx,
700 &ipv6_mask->hdr.dst_addr,
701 sizeof(ipv6_mask->hdr.dst_addr));
703 /* add number of ipv6 header elements */
704 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
706 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
707 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
709 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
710 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
711 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
712 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
714 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
715 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
717 return BNXT_TF_RC_SUCCESS;
720 /* Function to handle the parsing of RTE Flow item UDP Header. */
722 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
723 struct ulp_rte_parser_params *params)
725 const struct rte_flow_item_udp *udp_spec = item->spec;
726 const struct rte_flow_item_udp *udp_mask = item->mask;
727 struct ulp_rte_hdr_field *field;
728 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
729 uint32_t idx = params->field_idx;
731 uint32_t inner_l4, outer_l4;
733 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
735 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
736 return BNXT_TF_RC_ERROR;
740 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
744 size = sizeof(udp_spec->hdr.src_port);
745 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
746 &udp_spec->hdr.src_port,
748 size = sizeof(udp_spec->hdr.dst_port);
749 field = ulp_rte_parser_fld_copy(field,
750 &udp_spec->hdr.dst_port,
752 size = sizeof(udp_spec->hdr.dgram_len);
753 field = ulp_rte_parser_fld_copy(field,
754 &udp_spec->hdr.dgram_len,
756 size = sizeof(udp_spec->hdr.dgram_cksum);
757 field = ulp_rte_parser_fld_copy(field,
758 &udp_spec->hdr.dgram_cksum,
762 ulp_rte_prsr_mask_copy(params, &idx,
763 &udp_mask->hdr.src_port,
764 sizeof(udp_mask->hdr.src_port));
765 ulp_rte_prsr_mask_copy(params, &idx,
766 &udp_mask->hdr.dst_port,
767 sizeof(udp_mask->hdr.dst_port));
768 ulp_rte_prsr_mask_copy(params, &idx,
769 &udp_mask->hdr.dgram_len,
770 sizeof(udp_mask->hdr.dgram_len));
771 ulp_rte_prsr_mask_copy(params, &idx,
772 &udp_mask->hdr.dgram_cksum,
773 sizeof(udp_mask->hdr.dgram_cksum));
776 /* Add number of UDP header elements */
777 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
779 /* Set the udp header bitmap and computed l4 header bitmaps */
780 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
782 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
783 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
784 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
785 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
787 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
788 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
790 return BNXT_TF_RC_SUCCESS;
793 /* Function to handle the parsing of RTE Flow item TCP Header. */
795 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
796 struct ulp_rte_parser_params *params)
798 const struct rte_flow_item_tcp *tcp_spec = item->spec;
799 const struct rte_flow_item_tcp *tcp_mask = item->mask;
800 struct ulp_rte_hdr_field *field;
801 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
802 uint32_t idx = params->field_idx;
804 uint32_t inner_l4, outer_l4;
806 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
808 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
809 return BNXT_TF_RC_ERROR;
813 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
817 size = sizeof(tcp_spec->hdr.src_port);
818 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
819 &tcp_spec->hdr.src_port,
821 size = sizeof(tcp_spec->hdr.dst_port);
822 field = ulp_rte_parser_fld_copy(field,
823 &tcp_spec->hdr.dst_port,
825 size = sizeof(tcp_spec->hdr.sent_seq);
826 field = ulp_rte_parser_fld_copy(field,
827 &tcp_spec->hdr.sent_seq,
829 size = sizeof(tcp_spec->hdr.recv_ack);
830 field = ulp_rte_parser_fld_copy(field,
831 &tcp_spec->hdr.recv_ack,
833 size = sizeof(tcp_spec->hdr.data_off);
834 field = ulp_rte_parser_fld_copy(field,
835 &tcp_spec->hdr.data_off,
837 size = sizeof(tcp_spec->hdr.tcp_flags);
838 field = ulp_rte_parser_fld_copy(field,
839 &tcp_spec->hdr.tcp_flags,
841 size = sizeof(tcp_spec->hdr.rx_win);
842 field = ulp_rte_parser_fld_copy(field,
843 &tcp_spec->hdr.rx_win,
845 size = sizeof(tcp_spec->hdr.cksum);
846 field = ulp_rte_parser_fld_copy(field,
847 &tcp_spec->hdr.cksum,
849 size = sizeof(tcp_spec->hdr.tcp_urp);
850 field = ulp_rte_parser_fld_copy(field,
851 &tcp_spec->hdr.tcp_urp,
854 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
858 ulp_rte_prsr_mask_copy(params, &idx,
859 &tcp_mask->hdr.src_port,
860 sizeof(tcp_mask->hdr.src_port));
861 ulp_rte_prsr_mask_copy(params, &idx,
862 &tcp_mask->hdr.dst_port,
863 sizeof(tcp_mask->hdr.dst_port));
864 ulp_rte_prsr_mask_copy(params, &idx,
865 &tcp_mask->hdr.sent_seq,
866 sizeof(tcp_mask->hdr.sent_seq));
867 ulp_rte_prsr_mask_copy(params, &idx,
868 &tcp_mask->hdr.recv_ack,
869 sizeof(tcp_mask->hdr.recv_ack));
870 ulp_rte_prsr_mask_copy(params, &idx,
871 &tcp_mask->hdr.data_off,
872 sizeof(tcp_mask->hdr.data_off));
873 ulp_rte_prsr_mask_copy(params, &idx,
874 &tcp_mask->hdr.tcp_flags,
875 sizeof(tcp_mask->hdr.tcp_flags));
876 ulp_rte_prsr_mask_copy(params, &idx,
877 &tcp_mask->hdr.rx_win,
878 sizeof(tcp_mask->hdr.rx_win));
879 ulp_rte_prsr_mask_copy(params, &idx,
880 &tcp_mask->hdr.cksum,
881 sizeof(tcp_mask->hdr.cksum));
882 ulp_rte_prsr_mask_copy(params, &idx,
883 &tcp_mask->hdr.tcp_urp,
884 sizeof(tcp_mask->hdr.tcp_urp));
886 /* add number of TCP header elements */
887 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
889 /* Set the udp header bitmap and computed l4 header bitmaps */
890 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
892 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
893 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
894 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
895 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
897 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
898 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
900 return BNXT_TF_RC_SUCCESS;
903 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
905 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
906 struct ulp_rte_parser_params *params)
908 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
909 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
910 struct ulp_rte_hdr_field *field;
911 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
912 uint32_t idx = params->field_idx;
916 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
920 size = sizeof(vxlan_spec->flags);
921 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
924 size = sizeof(vxlan_spec->rsvd0);
925 field = ulp_rte_parser_fld_copy(field,
928 size = sizeof(vxlan_spec->vni);
929 field = ulp_rte_parser_fld_copy(field,
932 size = sizeof(vxlan_spec->rsvd1);
933 field = ulp_rte_parser_fld_copy(field,
938 ulp_rte_prsr_mask_copy(params, &idx,
940 sizeof(vxlan_mask->flags));
941 ulp_rte_prsr_mask_copy(params, &idx,
943 sizeof(vxlan_mask->rsvd0));
944 ulp_rte_prsr_mask_copy(params, &idx,
946 sizeof(vxlan_mask->vni));
947 ulp_rte_prsr_mask_copy(params, &idx,
949 sizeof(vxlan_mask->rsvd1));
951 /* Add number of vxlan header elements */
952 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
954 /* Update the hdr_bitmap with vxlan */
955 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
956 return BNXT_TF_RC_SUCCESS;
959 /* Function to handle the parsing of RTE Flow item void Header */
961 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
962 struct ulp_rte_parser_params *params __rte_unused)
964 return BNXT_TF_RC_SUCCESS;
967 /* Function to handle the parsing of RTE Flow action void Header. */
969 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
970 struct ulp_rte_parser_params *params __rte_unused)
972 return BNXT_TF_RC_SUCCESS;
975 /* Function to handle the parsing of RTE Flow action Mark Header. */
977 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
978 struct ulp_rte_parser_params *param)
980 const struct rte_flow_action_mark *mark;
981 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
984 mark = action_item->conf;
986 mark_id = tfp_cpu_to_be_32(mark->id);
987 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
988 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
990 /* Update the hdr_bitmap with vxlan */
991 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
992 return BNXT_TF_RC_SUCCESS;
994 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
995 return BNXT_TF_RC_ERROR;
998 /* Function to handle the parsing of RTE Flow action RSS Header. */
1000 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1001 struct ulp_rte_parser_params *param)
1003 const struct rte_flow_action_rss *rss = action_item->conf;
1006 /* Update the hdr_bitmap with vxlan */
1007 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1008 return BNXT_TF_RC_SUCCESS;
1010 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1011 return BNXT_TF_RC_ERROR;
1014 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1016 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1017 struct ulp_rte_parser_params *params)
1019 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1020 const struct rte_flow_item *item;
1021 const struct rte_flow_item_eth *eth_spec;
1022 const struct rte_flow_item_ipv4 *ipv4_spec;
1023 const struct rte_flow_item_ipv6 *ipv6_spec;
1024 struct rte_flow_item_vxlan vxlan_spec;
1025 uint32_t vlan_num = 0, vlan_size = 0;
1026 uint32_t ip_size = 0, ip_type = 0;
1027 uint32_t vxlan_size = 0;
1029 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1030 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1032 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1033 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1035 vxlan_encap = action_item->conf;
1037 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1038 return BNXT_TF_RC_ERROR;
1041 item = vxlan_encap->definition;
1043 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1044 return BNXT_TF_RC_ERROR;
1047 if (!ulp_rte_item_skip_void(&item, 0))
1048 return BNXT_TF_RC_ERROR;
1050 /* must have ethernet header */
1051 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1052 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1053 return BNXT_TF_RC_ERROR;
1055 eth_spec = item->spec;
1056 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1057 ulp_encap_buffer_copy(buff,
1058 eth_spec->dst.addr_bytes,
1059 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1061 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1062 ulp_encap_buffer_copy(buff,
1063 eth_spec->src.addr_bytes,
1064 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1066 /* Goto the next item */
1067 if (!ulp_rte_item_skip_void(&item, 1))
1068 return BNXT_TF_RC_ERROR;
1070 /* May have vlan header */
1071 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1073 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1074 ulp_encap_buffer_copy(buff,
1076 sizeof(struct rte_flow_item_vlan));
1078 if (!ulp_rte_item_skip_void(&item, 1))
1079 return BNXT_TF_RC_ERROR;
1082 /* may have two vlan headers */
1083 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1085 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1086 sizeof(struct rte_flow_item_vlan)],
1088 sizeof(struct rte_flow_item_vlan));
1089 if (!ulp_rte_item_skip_void(&item, 1))
1090 return BNXT_TF_RC_ERROR;
1092 /* Update the vlan count and size of more than one */
1094 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1095 vlan_num = tfp_cpu_to_be_32(vlan_num);
1096 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1099 vlan_size = tfp_cpu_to_be_32(vlan_size);
1100 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1105 /* L3 must be IPv4, IPv6 */
1106 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1107 ipv4_spec = item->spec;
1108 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1110 /* copy the ipv4 details */
1111 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1112 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1113 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1114 ulp_encap_buffer_copy(buff,
1116 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1117 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1119 const uint8_t *tmp_buff;
1121 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1122 ulp_encap_buffer_copy(buff,
1123 &ipv4_spec->hdr.version_ihl,
1124 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1125 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1126 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1127 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1128 ulp_encap_buffer_copy(buff,
1130 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1132 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1133 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1134 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1135 ulp_encap_buffer_copy(buff,
1136 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1137 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1139 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1140 ulp_encap_buffer_copy(buff,
1141 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1142 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1144 /* Update the ip size details */
1145 ip_size = tfp_cpu_to_be_32(ip_size);
1146 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1147 &ip_size, sizeof(uint32_t));
1149 /* update the ip type */
1150 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1151 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1152 &ip_type, sizeof(uint32_t));
1154 /* update the computed field to notify it is ipv4 header */
1155 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1158 if (!ulp_rte_item_skip_void(&item, 1))
1159 return BNXT_TF_RC_ERROR;
1160 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1161 ipv6_spec = item->spec;
1162 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1164 /* copy the ipv4 details */
1165 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1166 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1168 /* Update the ip size details */
1169 ip_size = tfp_cpu_to_be_32(ip_size);
1170 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1171 &ip_size, sizeof(uint32_t));
1173 /* update the ip type */
1174 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1175 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1176 &ip_type, sizeof(uint32_t));
1178 /* update the computed field to notify it is ipv6 header */
1179 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1182 if (!ulp_rte_item_skip_void(&item, 1))
1183 return BNXT_TF_RC_ERROR;
1185 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1186 return BNXT_TF_RC_ERROR;
1190 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1191 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1192 return BNXT_TF_RC_ERROR;
1194 /* copy the udp details */
1195 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1196 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1198 if (!ulp_rte_item_skip_void(&item, 1))
1199 return BNXT_TF_RC_ERROR;
1202 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1203 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1204 return BNXT_TF_RC_ERROR;
1206 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1207 /* copy the vxlan details */
1208 memcpy(&vxlan_spec, item->spec, vxlan_size);
1209 vxlan_spec.flags = 0x08;
1210 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1211 (const uint8_t *)&vxlan_spec,
1213 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1214 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1215 &vxlan_size, sizeof(uint32_t));
1217 /*update the hdr_bitmap with vxlan */
1218 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1219 return BNXT_TF_RC_SUCCESS;
1222 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1224 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1226 struct ulp_rte_parser_params *params)
1228 /* update the hdr_bitmap with vxlan */
1229 ULP_BITMAP_SET(params->act_bitmap.bits,
1230 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1231 return BNXT_TF_RC_SUCCESS;
1234 /* Function to handle the parsing of RTE Flow action drop Header. */
1236 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1237 struct ulp_rte_parser_params *params)
1239 /* Update the hdr_bitmap with drop */
1240 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1241 return BNXT_TF_RC_SUCCESS;
1244 /* Function to handle the parsing of RTE Flow action count. */
1246 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1247 struct ulp_rte_parser_params *params)
1250 const struct rte_flow_action_count *act_count;
1251 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1253 act_count = action_item->conf;
1255 if (act_count->shared) {
1257 "Parse Error:Shared count not supported\n");
1258 return BNXT_TF_RC_PARSE_ERR;
1260 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1262 BNXT_ULP_ACT_PROP_SZ_COUNT);
1265 /* Update the hdr_bitmap with count */
1266 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1267 return BNXT_TF_RC_SUCCESS;
1270 /* Function to handle the parsing of RTE Flow action PF. */
1272 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1273 struct ulp_rte_parser_params *params)
1277 /* Update the hdr_bitmap with vnic bit */
1278 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1280 /* copy the PF of the current device into VNIC Property */
1281 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1282 svif = bnxt_get_vnic_id(svif, BNXT_ULP_INTF_TYPE_INVALID);
1283 svif = rte_cpu_to_be_32(svif);
1284 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1285 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1287 return BNXT_TF_RC_SUCCESS;
1290 /* Function to handle the parsing of RTE Flow action VF. */
1292 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1293 struct ulp_rte_parser_params *param)
1295 const struct rte_flow_action_vf *vf_action;
1298 vf_action = action_item->conf;
1300 if (vf_action->original) {
1302 "Parse Error:VF Original not supported\n");
1303 return BNXT_TF_RC_PARSE_ERR;
1305 /* TBD: Update the computed VNIC using VF conversion */
1306 pid = bnxt_get_vnic_id(vf_action->id,
1307 BNXT_ULP_INTF_TYPE_INVALID);
1308 pid = rte_cpu_to_be_32(pid);
1309 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1310 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1313 /* Update the hdr_bitmap with count */
1314 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1315 return BNXT_TF_RC_SUCCESS;
1318 /* Function to handle the parsing of RTE Flow action port_id. */
1320 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1321 struct ulp_rte_parser_params *param)
1323 const struct rte_flow_action_port_id *port_id;
1326 port_id = act_item->conf;
1328 if (port_id->original) {
1330 "ParseErr:Portid Original not supported\n");
1331 return BNXT_TF_RC_PARSE_ERR;
1333 /* TBD: Update the computed VNIC using port conversion */
1334 pid = bnxt_get_vnic_id(port_id->id, BNXT_ULP_INTF_TYPE_INVALID);
1335 pid = rte_cpu_to_be_32(pid);
1336 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1337 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1340 /* Update the hdr_bitmap with count */
1341 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1342 return BNXT_TF_RC_SUCCESS;
1345 /* Function to handle the parsing of RTE Flow action phy_port. */
1347 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1348 struct ulp_rte_parser_params *prm)
1350 const struct rte_flow_action_phy_port *phy_port;
1353 phy_port = action_item->conf;
1355 if (phy_port->original) {
1357 "Parse Err:Port Original not supported\n");
1358 return BNXT_TF_RC_PARSE_ERR;
1360 /* Get the vport of the physical port */
1361 /* TBD: shall be changed later to portdb call */
1362 vport = 1 << phy_port->index;
1363 vport = rte_cpu_to_be_32(vport);
1364 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1365 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1368 /* Update the hdr_bitmap with count */
1369 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1370 return BNXT_TF_RC_SUCCESS;