1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
169 enum bnxt_ulp_svif_type svif_type;
170 enum bnxt_ulp_intf_type if_type;
174 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
175 BNXT_ULP_INVALID_SVIF_VAL) {
177 "SVIF already set,multiple source not support'd\n");
178 return BNXT_TF_RC_ERROR;
181 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
182 dir = ULP_COMP_FLD_IDX_RD(params,
183 BNXT_ULP_CF_IDX_DIRECTION);
184 /* perform the conversion from dpdk port to bnxt svif */
185 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
189 "Invalid port id\n");
190 return BNXT_TF_RC_ERROR;
193 if (dir == ULP_DIR_INGRESS) {
194 svif_type = BNXT_ULP_PHY_PORT_SVIF;
196 if_type = bnxt_get_interface_type(port_id);
197 if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
198 svif_type = BNXT_ULP_VF_FUNC_SVIF;
200 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
202 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
204 svif = rte_cpu_to_be_16(svif);
206 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
207 memcpy(hdr_field->spec, &svif, sizeof(svif));
208 memcpy(hdr_field->mask, &mask, sizeof(mask));
209 hdr_field->size = sizeof(svif);
210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
211 rte_be_to_cpu_16(svif));
212 return BNXT_TF_RC_SUCCESS;
215 /* Function to handle the parsing of the RTE port id */
217 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
219 uint16_t port_id = 0;
220 uint16_t svif_mask = 0xFFFF;
222 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
223 BNXT_ULP_INVALID_SVIF_VAL)
224 return BNXT_TF_RC_SUCCESS;
226 /* SVIF not set. So get the port id */
227 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
229 /* Update the SVIF details */
230 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
234 /* Function to handle the implicit VNIC RTE port id */
236 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
238 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
240 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
241 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
242 return BNXT_TF_RC_SUCCESS;
244 /* Update the vnic details */
245 ulp_rte_pf_act_handler(NULL, params);
246 /* Reset the hdr_bitmap with vnic bit */
247 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
249 return BNXT_TF_RC_SUCCESS;
252 /* Function to handle the parsing of RTE Flow item PF Header. */
254 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
255 struct ulp_rte_parser_params *params)
257 uint16_t port_id = 0;
258 uint16_t svif_mask = 0xFFFF;
260 /* Get the port id */
261 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
263 /* Update the SVIF details */
264 return ulp_rte_parser_svif_set(params,
269 /* Function to handle the parsing of RTE Flow item VF Header. */
271 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
272 struct ulp_rte_parser_params *params)
274 const struct rte_flow_item_vf *vf_spec = item->spec;
275 const struct rte_flow_item_vf *vf_mask = item->mask;
276 uint16_t svif = 0, mask = 0;
278 /* Get VF rte_flow_item for Port details */
280 svif = (uint16_t)vf_spec->id;
282 mask = (uint16_t)vf_mask->id;
284 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
287 /* Function to handle the parsing of RTE Flow item port id Header. */
289 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
290 struct ulp_rte_parser_params *params)
292 const struct rte_flow_item_port_id *port_spec = item->spec;
293 const struct rte_flow_item_port_id *port_mask = item->mask;
294 uint16_t svif = 0, mask = 0;
297 * Copy the rte_flow_item for Port into hdr_field using port id
301 svif = (uint16_t)port_spec->id;
302 if (svif >= RTE_MAX_ETHPORTS) {
303 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
304 return BNXT_TF_RC_PARSE_ERR;
308 mask = (uint16_t)port_mask->id;
310 /* Update the SVIF details */
311 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
314 /* Function to handle the parsing of RTE Flow item phy port Header. */
316 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
317 struct ulp_rte_parser_params *params)
319 const struct rte_flow_item_phy_port *port_spec = item->spec;
320 const struct rte_flow_item_phy_port *port_mask = item->mask;
321 uint32_t svif = 0, mask = 0;
322 struct bnxt_ulp_device_params *dparms;
325 /* Copy the rte_flow_item for phy port into hdr_field */
327 svif = port_spec->index;
329 mask = port_mask->index;
331 if (bnxt_ulp_cntxt_dev_id_get(params->ulp_ctx, &dev_id)) {
332 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
336 dparms = bnxt_ulp_device_params_get(dev_id);
338 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
342 if (svif > dparms->num_phy_ports) {
343 BNXT_TF_DBG(ERR, "ParseErr:Phy Port is not valid\n");
344 return BNXT_TF_RC_PARSE_ERR;
347 /* Update the SVIF details */
348 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
351 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
353 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
354 struct ulp_rte_parser_params *params)
356 const struct rte_flow_item_eth *eth_spec = item->spec;
357 const struct rte_flow_item_eth *eth_mask = item->mask;
358 struct ulp_rte_hdr_field *field;
359 uint32_t idx = params->field_idx;
360 uint64_t set_flag = 0;
364 * Copy the rte_flow_item for eth into hdr_field using ethernet
368 size = sizeof(eth_spec->dst.addr_bytes);
369 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
370 eth_spec->dst.addr_bytes,
372 size = sizeof(eth_spec->src.addr_bytes);
373 field = ulp_rte_parser_fld_copy(field,
374 eth_spec->src.addr_bytes,
376 field = ulp_rte_parser_fld_copy(field,
378 sizeof(eth_spec->type));
381 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
382 sizeof(eth_mask->dst.addr_bytes));
383 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
384 sizeof(eth_mask->src.addr_bytes));
385 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
386 sizeof(eth_mask->type));
388 /* Add number of vlan header elements */
389 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
390 params->vlan_idx = params->field_idx;
391 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
393 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
394 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
395 BNXT_ULP_HDR_BIT_O_ETH);
397 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
399 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
400 BNXT_ULP_HDR_BIT_I_ETH);
402 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
403 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
405 return BNXT_TF_RC_SUCCESS;
408 /* Function to handle the parsing of RTE Flow item Vlan Header. */
410 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
411 struct ulp_rte_parser_params *params)
413 const struct rte_flow_item_vlan *vlan_spec = item->spec;
414 const struct rte_flow_item_vlan *vlan_mask = item->mask;
415 struct ulp_rte_hdr_field *field;
416 struct ulp_rte_hdr_bitmap *hdr_bit;
417 uint32_t idx = params->vlan_idx;
418 uint16_t vlan_tag, priority;
419 uint32_t outer_vtag_num;
420 uint32_t inner_vtag_num;
423 * Copy the rte_flow_item for vlan into hdr_field using Vlan
427 vlan_tag = ntohs(vlan_spec->tci);
428 priority = htons(vlan_tag >> 13);
430 vlan_tag = htons(vlan_tag);
432 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
435 field = ulp_rte_parser_fld_copy(field,
438 field = ulp_rte_parser_fld_copy(field,
439 &vlan_spec->inner_type,
440 sizeof(vlan_spec->inner_type));
444 vlan_tag = ntohs(vlan_mask->tci);
445 priority = htons(vlan_tag >> 13);
447 vlan_tag = htons(vlan_tag);
449 field = ¶ms->hdr_field[idx];
450 memcpy(field->mask, &priority, field->size);
452 memcpy(field->mask, &vlan_tag, field->size);
454 memcpy(field->mask, &vlan_mask->inner_type, field->size);
456 /* Set the vlan index to new incremented value */
457 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
459 /* Get the outer tag and inner tag counts */
460 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
461 BNXT_ULP_CF_IDX_O_VTAG_NUM);
462 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
463 BNXT_ULP_CF_IDX_I_VTAG_NUM);
465 /* Update the hdr_bitmap of the vlans */
466 hdr_bit = ¶ms->hdr_bitmap;
467 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
468 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
470 /* Update the vlan tag num */
472 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
474 ULP_BITMAP_SET(params->hdr_bitmap.bits,
475 BNXT_ULP_HDR_BIT_OO_VLAN);
476 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
477 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
478 outer_vtag_num == 1) {
479 /* update the vlan tag num */
481 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
483 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
484 ULP_BITMAP_SET(params->hdr_bitmap.bits,
485 BNXT_ULP_HDR_BIT_OI_VLAN);
486 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
487 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
489 /* update the vlan tag num */
491 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
493 ULP_BITMAP_SET(params->hdr_bitmap.bits,
494 BNXT_ULP_HDR_BIT_IO_VLAN);
495 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
496 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
497 inner_vtag_num == 1) {
498 /* update the vlan tag num */
500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
502 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
503 ULP_BITMAP_SET(params->hdr_bitmap.bits,
504 BNXT_ULP_HDR_BIT_II_VLAN);
506 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
507 return BNXT_TF_RC_ERROR;
509 return BNXT_TF_RC_SUCCESS;
512 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
514 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
515 struct ulp_rte_parser_params *params)
517 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
518 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
519 struct ulp_rte_hdr_field *field;
520 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
521 uint32_t idx = params->field_idx;
523 uint32_t inner_l3, outer_l3;
525 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
527 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
528 return BNXT_TF_RC_ERROR;
532 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
536 size = sizeof(ipv4_spec->hdr.version_ihl);
537 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
538 &ipv4_spec->hdr.version_ihl,
540 size = sizeof(ipv4_spec->hdr.type_of_service);
541 field = ulp_rte_parser_fld_copy(field,
542 &ipv4_spec->hdr.type_of_service,
544 size = sizeof(ipv4_spec->hdr.total_length);
545 field = ulp_rte_parser_fld_copy(field,
546 &ipv4_spec->hdr.total_length,
548 size = sizeof(ipv4_spec->hdr.packet_id);
549 field = ulp_rte_parser_fld_copy(field,
550 &ipv4_spec->hdr.packet_id,
552 size = sizeof(ipv4_spec->hdr.fragment_offset);
553 field = ulp_rte_parser_fld_copy(field,
554 &ipv4_spec->hdr.fragment_offset,
556 size = sizeof(ipv4_spec->hdr.time_to_live);
557 field = ulp_rte_parser_fld_copy(field,
558 &ipv4_spec->hdr.time_to_live,
560 size = sizeof(ipv4_spec->hdr.next_proto_id);
561 field = ulp_rte_parser_fld_copy(field,
562 &ipv4_spec->hdr.next_proto_id,
564 size = sizeof(ipv4_spec->hdr.hdr_checksum);
565 field = ulp_rte_parser_fld_copy(field,
566 &ipv4_spec->hdr.hdr_checksum,
568 size = sizeof(ipv4_spec->hdr.src_addr);
569 field = ulp_rte_parser_fld_copy(field,
570 &ipv4_spec->hdr.src_addr,
572 size = sizeof(ipv4_spec->hdr.dst_addr);
573 field = ulp_rte_parser_fld_copy(field,
574 &ipv4_spec->hdr.dst_addr,
578 ulp_rte_prsr_mask_copy(params, &idx,
579 &ipv4_mask->hdr.version_ihl,
580 sizeof(ipv4_mask->hdr.version_ihl));
581 ulp_rte_prsr_mask_copy(params, &idx,
582 &ipv4_mask->hdr.type_of_service,
583 sizeof(ipv4_mask->hdr.type_of_service));
584 ulp_rte_prsr_mask_copy(params, &idx,
585 &ipv4_mask->hdr.total_length,
586 sizeof(ipv4_mask->hdr.total_length));
587 ulp_rte_prsr_mask_copy(params, &idx,
588 &ipv4_mask->hdr.packet_id,
589 sizeof(ipv4_mask->hdr.packet_id));
590 ulp_rte_prsr_mask_copy(params, &idx,
591 &ipv4_mask->hdr.fragment_offset,
592 sizeof(ipv4_mask->hdr.fragment_offset));
593 ulp_rte_prsr_mask_copy(params, &idx,
594 &ipv4_mask->hdr.time_to_live,
595 sizeof(ipv4_mask->hdr.time_to_live));
596 ulp_rte_prsr_mask_copy(params, &idx,
597 &ipv4_mask->hdr.next_proto_id,
598 sizeof(ipv4_mask->hdr.next_proto_id));
599 ulp_rte_prsr_mask_copy(params, &idx,
600 &ipv4_mask->hdr.hdr_checksum,
601 sizeof(ipv4_mask->hdr.hdr_checksum));
602 ulp_rte_prsr_mask_copy(params, &idx,
603 &ipv4_mask->hdr.src_addr,
604 sizeof(ipv4_mask->hdr.src_addr));
605 ulp_rte_prsr_mask_copy(params, &idx,
606 &ipv4_mask->hdr.dst_addr,
607 sizeof(ipv4_mask->hdr.dst_addr));
609 /* Add the number of ipv4 header elements */
610 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
612 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
613 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
615 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
616 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
617 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
619 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
621 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
623 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
625 return BNXT_TF_RC_SUCCESS;
628 /* Function to handle the parsing of RTE Flow item IPV6 Header */
630 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
631 struct ulp_rte_parser_params *params)
633 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
634 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
635 struct ulp_rte_hdr_field *field;
636 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
637 uint32_t idx = params->field_idx;
639 uint32_t inner_l3, outer_l3;
640 uint32_t vtcf, vtcf_mask;
642 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
644 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
645 return BNXT_TF_RC_ERROR;
649 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
653 size = sizeof(ipv6_spec->hdr.vtc_flow);
655 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
656 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
660 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
661 field = ulp_rte_parser_fld_copy(field,
665 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
666 field = ulp_rte_parser_fld_copy(field,
670 size = sizeof(ipv6_spec->hdr.payload_len);
671 field = ulp_rte_parser_fld_copy(field,
672 &ipv6_spec->hdr.payload_len,
674 size = sizeof(ipv6_spec->hdr.proto);
675 field = ulp_rte_parser_fld_copy(field,
676 &ipv6_spec->hdr.proto,
678 size = sizeof(ipv6_spec->hdr.hop_limits);
679 field = ulp_rte_parser_fld_copy(field,
680 &ipv6_spec->hdr.hop_limits,
682 size = sizeof(ipv6_spec->hdr.src_addr);
683 field = ulp_rte_parser_fld_copy(field,
684 &ipv6_spec->hdr.src_addr,
686 size = sizeof(ipv6_spec->hdr.dst_addr);
687 field = ulp_rte_parser_fld_copy(field,
688 &ipv6_spec->hdr.dst_addr,
692 size = sizeof(ipv6_mask->hdr.vtc_flow);
694 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
695 ulp_rte_prsr_mask_copy(params, &idx,
699 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
700 ulp_rte_prsr_mask_copy(params, &idx,
705 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
706 ulp_rte_prsr_mask_copy(params, &idx,
710 ulp_rte_prsr_mask_copy(params, &idx,
711 &ipv6_mask->hdr.payload_len,
712 sizeof(ipv6_mask->hdr.payload_len));
713 ulp_rte_prsr_mask_copy(params, &idx,
714 &ipv6_mask->hdr.proto,
715 sizeof(ipv6_mask->hdr.proto));
716 ulp_rte_prsr_mask_copy(params, &idx,
717 &ipv6_mask->hdr.hop_limits,
718 sizeof(ipv6_mask->hdr.hop_limits));
719 ulp_rte_prsr_mask_copy(params, &idx,
720 &ipv6_mask->hdr.src_addr,
721 sizeof(ipv6_mask->hdr.src_addr));
722 ulp_rte_prsr_mask_copy(params, &idx,
723 &ipv6_mask->hdr.dst_addr,
724 sizeof(ipv6_mask->hdr.dst_addr));
726 /* add number of ipv6 header elements */
727 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
729 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
730 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
732 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
733 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
734 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
735 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
737 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
738 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
740 return BNXT_TF_RC_SUCCESS;
743 /* Function to handle the parsing of RTE Flow item UDP Header. */
745 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
746 struct ulp_rte_parser_params *params)
748 const struct rte_flow_item_udp *udp_spec = item->spec;
749 const struct rte_flow_item_udp *udp_mask = item->mask;
750 struct ulp_rte_hdr_field *field;
751 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
752 uint32_t idx = params->field_idx;
754 uint32_t inner_l4, outer_l4;
756 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
758 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
759 return BNXT_TF_RC_ERROR;
763 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
767 size = sizeof(udp_spec->hdr.src_port);
768 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
769 &udp_spec->hdr.src_port,
771 size = sizeof(udp_spec->hdr.dst_port);
772 field = ulp_rte_parser_fld_copy(field,
773 &udp_spec->hdr.dst_port,
775 size = sizeof(udp_spec->hdr.dgram_len);
776 field = ulp_rte_parser_fld_copy(field,
777 &udp_spec->hdr.dgram_len,
779 size = sizeof(udp_spec->hdr.dgram_cksum);
780 field = ulp_rte_parser_fld_copy(field,
781 &udp_spec->hdr.dgram_cksum,
785 ulp_rte_prsr_mask_copy(params, &idx,
786 &udp_mask->hdr.src_port,
787 sizeof(udp_mask->hdr.src_port));
788 ulp_rte_prsr_mask_copy(params, &idx,
789 &udp_mask->hdr.dst_port,
790 sizeof(udp_mask->hdr.dst_port));
791 ulp_rte_prsr_mask_copy(params, &idx,
792 &udp_mask->hdr.dgram_len,
793 sizeof(udp_mask->hdr.dgram_len));
794 ulp_rte_prsr_mask_copy(params, &idx,
795 &udp_mask->hdr.dgram_cksum,
796 sizeof(udp_mask->hdr.dgram_cksum));
799 /* Add number of UDP header elements */
800 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
802 /* Set the udp header bitmap and computed l4 header bitmaps */
803 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
805 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
806 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
807 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
808 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
810 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
811 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
813 return BNXT_TF_RC_SUCCESS;
816 /* Function to handle the parsing of RTE Flow item TCP Header. */
818 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
819 struct ulp_rte_parser_params *params)
821 const struct rte_flow_item_tcp *tcp_spec = item->spec;
822 const struct rte_flow_item_tcp *tcp_mask = item->mask;
823 struct ulp_rte_hdr_field *field;
824 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
825 uint32_t idx = params->field_idx;
827 uint32_t inner_l4, outer_l4;
829 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
831 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
832 return BNXT_TF_RC_ERROR;
836 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
840 size = sizeof(tcp_spec->hdr.src_port);
841 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
842 &tcp_spec->hdr.src_port,
844 size = sizeof(tcp_spec->hdr.dst_port);
845 field = ulp_rte_parser_fld_copy(field,
846 &tcp_spec->hdr.dst_port,
848 size = sizeof(tcp_spec->hdr.sent_seq);
849 field = ulp_rte_parser_fld_copy(field,
850 &tcp_spec->hdr.sent_seq,
852 size = sizeof(tcp_spec->hdr.recv_ack);
853 field = ulp_rte_parser_fld_copy(field,
854 &tcp_spec->hdr.recv_ack,
856 size = sizeof(tcp_spec->hdr.data_off);
857 field = ulp_rte_parser_fld_copy(field,
858 &tcp_spec->hdr.data_off,
860 size = sizeof(tcp_spec->hdr.tcp_flags);
861 field = ulp_rte_parser_fld_copy(field,
862 &tcp_spec->hdr.tcp_flags,
864 size = sizeof(tcp_spec->hdr.rx_win);
865 field = ulp_rte_parser_fld_copy(field,
866 &tcp_spec->hdr.rx_win,
868 size = sizeof(tcp_spec->hdr.cksum);
869 field = ulp_rte_parser_fld_copy(field,
870 &tcp_spec->hdr.cksum,
872 size = sizeof(tcp_spec->hdr.tcp_urp);
873 field = ulp_rte_parser_fld_copy(field,
874 &tcp_spec->hdr.tcp_urp,
877 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
881 ulp_rte_prsr_mask_copy(params, &idx,
882 &tcp_mask->hdr.src_port,
883 sizeof(tcp_mask->hdr.src_port));
884 ulp_rte_prsr_mask_copy(params, &idx,
885 &tcp_mask->hdr.dst_port,
886 sizeof(tcp_mask->hdr.dst_port));
887 ulp_rte_prsr_mask_copy(params, &idx,
888 &tcp_mask->hdr.sent_seq,
889 sizeof(tcp_mask->hdr.sent_seq));
890 ulp_rte_prsr_mask_copy(params, &idx,
891 &tcp_mask->hdr.recv_ack,
892 sizeof(tcp_mask->hdr.recv_ack));
893 ulp_rte_prsr_mask_copy(params, &idx,
894 &tcp_mask->hdr.data_off,
895 sizeof(tcp_mask->hdr.data_off));
896 ulp_rte_prsr_mask_copy(params, &idx,
897 &tcp_mask->hdr.tcp_flags,
898 sizeof(tcp_mask->hdr.tcp_flags));
899 ulp_rte_prsr_mask_copy(params, &idx,
900 &tcp_mask->hdr.rx_win,
901 sizeof(tcp_mask->hdr.rx_win));
902 ulp_rte_prsr_mask_copy(params, &idx,
903 &tcp_mask->hdr.cksum,
904 sizeof(tcp_mask->hdr.cksum));
905 ulp_rte_prsr_mask_copy(params, &idx,
906 &tcp_mask->hdr.tcp_urp,
907 sizeof(tcp_mask->hdr.tcp_urp));
909 /* add number of TCP header elements */
910 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
912 /* Set the udp header bitmap and computed l4 header bitmaps */
913 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
915 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
916 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
917 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
918 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
920 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
921 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
923 return BNXT_TF_RC_SUCCESS;
926 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
928 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
929 struct ulp_rte_parser_params *params)
931 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
932 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
933 struct ulp_rte_hdr_field *field;
934 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
935 uint32_t idx = params->field_idx;
939 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
943 size = sizeof(vxlan_spec->flags);
944 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
947 size = sizeof(vxlan_spec->rsvd0);
948 field = ulp_rte_parser_fld_copy(field,
951 size = sizeof(vxlan_spec->vni);
952 field = ulp_rte_parser_fld_copy(field,
955 size = sizeof(vxlan_spec->rsvd1);
956 field = ulp_rte_parser_fld_copy(field,
961 ulp_rte_prsr_mask_copy(params, &idx,
963 sizeof(vxlan_mask->flags));
964 ulp_rte_prsr_mask_copy(params, &idx,
966 sizeof(vxlan_mask->rsvd0));
967 ulp_rte_prsr_mask_copy(params, &idx,
969 sizeof(vxlan_mask->vni));
970 ulp_rte_prsr_mask_copy(params, &idx,
972 sizeof(vxlan_mask->rsvd1));
974 /* Add number of vxlan header elements */
975 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
977 /* Update the hdr_bitmap with vxlan */
978 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
979 return BNXT_TF_RC_SUCCESS;
982 /* Function to handle the parsing of RTE Flow item void Header */
984 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
985 struct ulp_rte_parser_params *params __rte_unused)
987 return BNXT_TF_RC_SUCCESS;
990 /* Function to handle the parsing of RTE Flow action void Header. */
992 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
993 struct ulp_rte_parser_params *params __rte_unused)
995 return BNXT_TF_RC_SUCCESS;
998 /* Function to handle the parsing of RTE Flow action Mark Header. */
1000 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1001 struct ulp_rte_parser_params *param)
1003 const struct rte_flow_action_mark *mark;
1004 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1007 mark = action_item->conf;
1009 mark_id = tfp_cpu_to_be_32(mark->id);
1010 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1011 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1013 /* Update the hdr_bitmap with vxlan */
1014 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1015 return BNXT_TF_RC_SUCCESS;
1017 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1018 return BNXT_TF_RC_ERROR;
1021 /* Function to handle the parsing of RTE Flow action RSS Header. */
1023 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1024 struct ulp_rte_parser_params *param)
1026 const struct rte_flow_action_rss *rss = action_item->conf;
1029 /* Update the hdr_bitmap with vxlan */
1030 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1031 return BNXT_TF_RC_SUCCESS;
1033 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1034 return BNXT_TF_RC_ERROR;
1037 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1039 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1040 struct ulp_rte_parser_params *params)
1042 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1043 const struct rte_flow_item *item;
1044 const struct rte_flow_item_eth *eth_spec;
1045 const struct rte_flow_item_ipv4 *ipv4_spec;
1046 const struct rte_flow_item_ipv6 *ipv6_spec;
1047 struct rte_flow_item_vxlan vxlan_spec;
1048 uint32_t vlan_num = 0, vlan_size = 0;
1049 uint32_t ip_size = 0, ip_type = 0;
1050 uint32_t vxlan_size = 0;
1052 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1053 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1055 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1056 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1058 vxlan_encap = action_item->conf;
1060 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1061 return BNXT_TF_RC_ERROR;
1064 item = vxlan_encap->definition;
1066 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1067 return BNXT_TF_RC_ERROR;
1070 if (!ulp_rte_item_skip_void(&item, 0))
1071 return BNXT_TF_RC_ERROR;
1073 /* must have ethernet header */
1074 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1075 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1076 return BNXT_TF_RC_ERROR;
1078 eth_spec = item->spec;
1079 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1080 ulp_encap_buffer_copy(buff,
1081 eth_spec->dst.addr_bytes,
1082 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1084 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1085 ulp_encap_buffer_copy(buff,
1086 eth_spec->src.addr_bytes,
1087 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1089 /* Goto the next item */
1090 if (!ulp_rte_item_skip_void(&item, 1))
1091 return BNXT_TF_RC_ERROR;
1093 /* May have vlan header */
1094 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1096 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1097 ulp_encap_buffer_copy(buff,
1099 sizeof(struct rte_flow_item_vlan));
1101 if (!ulp_rte_item_skip_void(&item, 1))
1102 return BNXT_TF_RC_ERROR;
1105 /* may have two vlan headers */
1106 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1108 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1109 sizeof(struct rte_flow_item_vlan)],
1111 sizeof(struct rte_flow_item_vlan));
1112 if (!ulp_rte_item_skip_void(&item, 1))
1113 return BNXT_TF_RC_ERROR;
1115 /* Update the vlan count and size of more than one */
1117 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1118 vlan_num = tfp_cpu_to_be_32(vlan_num);
1119 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1122 vlan_size = tfp_cpu_to_be_32(vlan_size);
1123 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1128 /* L3 must be IPv4, IPv6 */
1129 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1130 ipv4_spec = item->spec;
1131 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1133 /* copy the ipv4 details */
1134 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1135 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1136 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1137 ulp_encap_buffer_copy(buff,
1139 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1140 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1142 const uint8_t *tmp_buff;
1144 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1145 ulp_encap_buffer_copy(buff,
1146 &ipv4_spec->hdr.version_ihl,
1147 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1148 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1149 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1150 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1151 ulp_encap_buffer_copy(buff,
1153 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1155 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1156 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1157 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1158 ulp_encap_buffer_copy(buff,
1159 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1160 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1162 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1163 ulp_encap_buffer_copy(buff,
1164 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1165 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1167 /* Update the ip size details */
1168 ip_size = tfp_cpu_to_be_32(ip_size);
1169 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1170 &ip_size, sizeof(uint32_t));
1172 /* update the ip type */
1173 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1174 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1175 &ip_type, sizeof(uint32_t));
1177 /* update the computed field to notify it is ipv4 header */
1178 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1181 if (!ulp_rte_item_skip_void(&item, 1))
1182 return BNXT_TF_RC_ERROR;
1183 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1184 ipv6_spec = item->spec;
1185 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1187 /* copy the ipv4 details */
1188 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1189 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1191 /* Update the ip size details */
1192 ip_size = tfp_cpu_to_be_32(ip_size);
1193 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1194 &ip_size, sizeof(uint32_t));
1196 /* update the ip type */
1197 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1198 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1199 &ip_type, sizeof(uint32_t));
1201 /* update the computed field to notify it is ipv6 header */
1202 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1205 if (!ulp_rte_item_skip_void(&item, 1))
1206 return BNXT_TF_RC_ERROR;
1208 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1209 return BNXT_TF_RC_ERROR;
1213 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1214 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1215 return BNXT_TF_RC_ERROR;
1217 /* copy the udp details */
1218 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1219 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1221 if (!ulp_rte_item_skip_void(&item, 1))
1222 return BNXT_TF_RC_ERROR;
1225 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1226 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1227 return BNXT_TF_RC_ERROR;
1229 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1230 /* copy the vxlan details */
1231 memcpy(&vxlan_spec, item->spec, vxlan_size);
1232 vxlan_spec.flags = 0x08;
1233 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1234 (const uint8_t *)&vxlan_spec,
1236 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1237 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1238 &vxlan_size, sizeof(uint32_t));
1240 /*update the hdr_bitmap with vxlan */
1241 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1242 return BNXT_TF_RC_SUCCESS;
1245 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1247 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1249 struct ulp_rte_parser_params *params)
1251 /* update the hdr_bitmap with vxlan */
1252 ULP_BITMAP_SET(params->act_bitmap.bits,
1253 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1254 return BNXT_TF_RC_SUCCESS;
1257 /* Function to handle the parsing of RTE Flow action drop Header. */
1259 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1260 struct ulp_rte_parser_params *params)
1262 /* Update the hdr_bitmap with drop */
1263 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1264 return BNXT_TF_RC_SUCCESS;
1267 /* Function to handle the parsing of RTE Flow action count. */
1269 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1270 struct ulp_rte_parser_params *params)
1273 const struct rte_flow_action_count *act_count;
1274 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1276 act_count = action_item->conf;
1278 if (act_count->shared) {
1280 "Parse Error:Shared count not supported\n");
1281 return BNXT_TF_RC_PARSE_ERR;
1283 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1285 BNXT_ULP_ACT_PROP_SZ_COUNT);
1288 /* Update the hdr_bitmap with count */
1289 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1290 return BNXT_TF_RC_SUCCESS;
1293 /* Function to handle the parsing of RTE Flow action PF. */
1295 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1296 struct ulp_rte_parser_params *params)
1300 /* Update the hdr_bitmap with vnic bit */
1301 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1303 /* copy the PF of the current device into VNIC Property */
1304 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1305 svif = bnxt_get_vnic_id(svif, BNXT_ULP_INTF_TYPE_INVALID);
1306 svif = rte_cpu_to_be_32(svif);
1307 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1308 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1310 return BNXT_TF_RC_SUCCESS;
1313 /* Function to handle the parsing of RTE Flow action VF. */
1315 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1316 struct ulp_rte_parser_params *param)
1318 const struct rte_flow_action_vf *vf_action;
1321 vf_action = action_item->conf;
1323 if (vf_action->original) {
1325 "Parse Error:VF Original not supported\n");
1326 return BNXT_TF_RC_PARSE_ERR;
1328 /* TBD: Update the computed VNIC using VF conversion */
1329 pid = bnxt_get_vnic_id(vf_action->id,
1330 BNXT_ULP_INTF_TYPE_INVALID);
1331 pid = rte_cpu_to_be_32(pid);
1332 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1333 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1336 /* Update the hdr_bitmap with count */
1337 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1338 return BNXT_TF_RC_SUCCESS;
1341 /* Function to handle the parsing of RTE Flow action port_id. */
1343 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1344 struct ulp_rte_parser_params *param)
1346 const struct rte_flow_action_port_id *port_id;
1349 port_id = act_item->conf;
1351 if (port_id->original) {
1353 "ParseErr:Portid Original not supported\n");
1354 return BNXT_TF_RC_PARSE_ERR;
1356 /* Update the computed VNIC using port conversion */
1357 if (port_id->id >= RTE_MAX_ETHPORTS) {
1359 "ParseErr:Portid is not valid\n");
1360 return BNXT_TF_RC_PARSE_ERR;
1362 pid = bnxt_get_vnic_id(port_id->id, BNXT_ULP_INTF_TYPE_INVALID);
1363 pid = rte_cpu_to_be_32(pid);
1364 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1365 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1368 /* Update the hdr_bitmap with count */
1369 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1370 return BNXT_TF_RC_SUCCESS;
1373 /* Function to handle the parsing of RTE Flow action phy_port. */
1375 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1376 struct ulp_rte_parser_params *prm)
1378 const struct rte_flow_action_phy_port *phy_port;
1380 struct bnxt_ulp_device_params *dparms;
1383 phy_port = action_item->conf;
1385 if (phy_port->original) {
1387 "Parse Err:Port Original not supported\n");
1388 return BNXT_TF_RC_PARSE_ERR;
1390 if (bnxt_ulp_cntxt_dev_id_get(prm->ulp_ctx, &dev_id)) {
1391 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
1395 dparms = bnxt_ulp_device_params_get(dev_id);
1397 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
1401 if (phy_port->index > dparms->num_phy_ports) {
1402 BNXT_TF_DBG(ERR, "ParseErr:Phy Port is not valid\n");
1403 return BNXT_TF_RC_PARSE_ERR;
1406 /* Get the vport of the physical port */
1407 /* TBD: shall be changed later to portdb call */
1408 vport = 1 << phy_port->index;
1409 vport = rte_cpu_to_be_32(vport);
1410 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1411 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1414 /* Update the hdr_bitmap with count */
1415 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1416 return BNXT_TF_RC_SUCCESS;
1419 /* Function to handle the parsing of RTE Flow action pop vlan. */
1421 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1422 struct ulp_rte_parser_params *params)
1424 /* Update the act_bitmap with pop */
1425 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1426 return BNXT_TF_RC_SUCCESS;
1429 /* Function to handle the parsing of RTE Flow action push vlan. */
1431 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1432 struct ulp_rte_parser_params *params)
1434 const struct rte_flow_action_of_push_vlan *push_vlan;
1436 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1438 push_vlan = action_item->conf;
1440 ethertype = push_vlan->ethertype;
1441 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1443 "Parse Err: Ethertype not supported\n");
1444 return BNXT_TF_RC_PARSE_ERR;
1446 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1447 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1448 /* Update the hdr_bitmap with push vlan */
1449 ULP_BITMAP_SET(params->act_bitmap.bits,
1450 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1451 return BNXT_TF_RC_SUCCESS;
1453 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1454 return BNXT_TF_RC_ERROR;
1457 /* Function to handle the parsing of RTE Flow action set vlan id. */
1459 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1460 struct ulp_rte_parser_params *params)
1462 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1464 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1466 vlan_vid = action_item->conf;
1467 if (vlan_vid && vlan_vid->vlan_vid) {
1468 vid = vlan_vid->vlan_vid;
1469 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1470 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1471 /* Update the hdr_bitmap with vlan vid */
1472 ULP_BITMAP_SET(params->act_bitmap.bits,
1473 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1474 return BNXT_TF_RC_SUCCESS;
1476 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1477 return BNXT_TF_RC_ERROR;
1480 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1482 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1483 struct ulp_rte_parser_params *params)
1485 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1487 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1489 vlan_pcp = action_item->conf;
1491 pcp = vlan_pcp->vlan_pcp;
1492 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1493 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1494 /* Update the hdr_bitmap with vlan vid */
1495 ULP_BITMAP_SET(params->act_bitmap.bits,
1496 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1497 return BNXT_TF_RC_SUCCESS;
1499 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1500 return BNXT_TF_RC_ERROR;