1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Parse all the items in the pattern */
92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93 /* get the header information from the flow_hdr_info table */
94 hdr_info = &ulp_hdr_info[item->type];
95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97 "Truflow parser does not support type %d\n",
99 return BNXT_TF_RC_PARSE_ERR;
100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101 /* call the registered callback handler */
102 if (hdr_info->proto_hdr_func) {
103 if (hdr_info->proto_hdr_func(item, params) !=
104 BNXT_TF_RC_SUCCESS) {
105 return BNXT_TF_RC_ERROR;
111 /* update the implied SVIF */
112 (void)ulp_rte_parser_svif_process(params);
113 return BNXT_TF_RC_SUCCESS;
117 * Function to handle the parsing of RTE Flows and placing
118 * the RTE flow actions into the ulp structures.
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122 struct ulp_rte_parser_params *params)
124 const struct rte_flow_action *action_item = actions;
125 struct bnxt_ulp_rte_act_info *hdr_info;
127 if (params->dir == ULP_DIR_EGRESS)
128 ULP_BITMAP_SET(params->act_bitmap.bits,
129 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
131 /* Parse all the items in the pattern */
132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133 /* get the header information from the flow_hdr_info table */
134 hdr_info = &ulp_act_info[action_item->type];
135 if (hdr_info->act_type ==
136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
138 "Truflow parser does not support act %u\n",
140 return BNXT_TF_RC_ERROR;
141 } else if (hdr_info->act_type ==
142 BNXT_ULP_ACT_TYPE_SUPPORTED) {
143 /* call the registered callback handler */
144 if (hdr_info->proto_act_func) {
145 if (hdr_info->proto_act_func(action_item,
147 BNXT_TF_RC_SUCCESS) {
148 return BNXT_TF_RC_ERROR;
154 /* update the implied VNIC */
155 ulp_rte_parser_vnic_process(params);
156 return BNXT_TF_RC_SUCCESS;
159 /* Function to handle the parsing of RTE Flow item PF Header. */
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162 enum rte_flow_item_type proto,
166 uint16_t port_id = svif;
168 struct ulp_rte_hdr_field *hdr_field;
169 enum bnxt_ulp_svif_type svif_type;
170 enum bnxt_ulp_intf_type if_type;
174 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
175 BNXT_ULP_INVALID_SVIF_VAL) {
177 "SVIF already set,multiple source not support'd\n");
178 return BNXT_TF_RC_ERROR;
181 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
182 dir = ULP_COMP_FLD_IDX_RD(params,
183 BNXT_ULP_CF_IDX_DIRECTION);
184 /* perform the conversion from dpdk port to bnxt svif */
185 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
189 "Invalid port id\n");
190 return BNXT_TF_RC_ERROR;
193 if (dir == ULP_DIR_INGRESS) {
194 svif_type = BNXT_ULP_PHY_PORT_SVIF;
196 if_type = bnxt_get_interface_type(port_id);
197 if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
198 svif_type = BNXT_ULP_VF_FUNC_SVIF;
200 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
202 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
204 svif = rte_cpu_to_be_16(svif);
206 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
207 memcpy(hdr_field->spec, &svif, sizeof(svif));
208 memcpy(hdr_field->mask, &mask, sizeof(mask));
209 hdr_field->size = sizeof(svif);
210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
211 rte_be_to_cpu_16(svif));
212 return BNXT_TF_RC_SUCCESS;
215 /* Function to handle the parsing of the RTE port id */
217 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
219 uint16_t port_id = 0;
220 uint16_t svif_mask = 0xFFFF;
222 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
223 BNXT_ULP_INVALID_SVIF_VAL)
224 return BNXT_TF_RC_SUCCESS;
226 /* SVIF not set. So get the port id */
227 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
229 /* Update the SVIF details */
230 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
234 /* Function to handle the implicit VNIC RTE port id */
236 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
238 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
240 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
241 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) {
243 * Reset the vnic/vport action bitmaps
244 * it is not required for match
246 ULP_BITMAP_RESET(params->act_bitmap.bits,
247 BNXT_ULP_ACTION_BIT_VNIC);
248 ULP_BITMAP_RESET(params->act_bitmap.bits,
249 BNXT_ULP_ACTION_BIT_VPORT);
250 return BNXT_TF_RC_SUCCESS;
253 /* Update the vnic details */
254 ulp_rte_pf_act_handler(NULL, params);
255 /* Reset the hdr_bitmap with vnic bit */
256 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
258 return BNXT_TF_RC_SUCCESS;
261 /* Function to handle the parsing of RTE Flow item PF Header. */
263 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
264 struct ulp_rte_parser_params *params)
266 uint16_t port_id = 0;
267 uint16_t svif_mask = 0xFFFF;
269 /* Get the port id */
270 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
272 /* Update the SVIF details */
273 return ulp_rte_parser_svif_set(params,
278 /* Function to handle the parsing of RTE Flow item VF Header. */
280 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
281 struct ulp_rte_parser_params *params)
283 const struct rte_flow_item_vf *vf_spec = item->spec;
284 const struct rte_flow_item_vf *vf_mask = item->mask;
285 uint16_t svif = 0, mask = 0;
287 /* Get VF rte_flow_item for Port details */
289 svif = (uint16_t)vf_spec->id;
291 mask = (uint16_t)vf_mask->id;
293 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
296 /* Function to handle the parsing of RTE Flow item port id Header. */
298 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
299 struct ulp_rte_parser_params *params)
301 const struct rte_flow_item_port_id *port_spec = item->spec;
302 const struct rte_flow_item_port_id *port_mask = item->mask;
303 uint16_t svif = 0, mask = 0;
306 * Copy the rte_flow_item for Port into hdr_field using port id
310 svif = (uint16_t)port_spec->id;
311 if (svif >= RTE_MAX_ETHPORTS) {
312 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
313 return BNXT_TF_RC_PARSE_ERR;
317 mask = (uint16_t)port_mask->id;
319 /* Update the SVIF details */
320 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
323 /* Function to handle the parsing of RTE Flow item phy port Header. */
325 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
326 struct ulp_rte_parser_params *params)
328 const struct rte_flow_item_phy_port *port_spec = item->spec;
329 const struct rte_flow_item_phy_port *port_mask = item->mask;
330 uint32_t svif = 0, mask = 0;
331 struct bnxt_ulp_device_params *dparms;
334 /* Copy the rte_flow_item for phy port into hdr_field */
336 svif = port_spec->index;
338 mask = port_mask->index;
340 if (bnxt_ulp_cntxt_dev_id_get(params->ulp_ctx, &dev_id)) {
341 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
345 dparms = bnxt_ulp_device_params_get(dev_id);
347 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
351 if (svif > dparms->num_phy_ports) {
352 BNXT_TF_DBG(ERR, "ParseErr:Phy Port is not valid\n");
353 return BNXT_TF_RC_PARSE_ERR;
356 /* Update the SVIF details */
357 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
360 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
362 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
363 struct ulp_rte_parser_params *params)
365 const struct rte_flow_item_eth *eth_spec = item->spec;
366 const struct rte_flow_item_eth *eth_mask = item->mask;
367 struct ulp_rte_hdr_field *field;
368 uint32_t idx = params->field_idx;
369 uint64_t set_flag = 0;
373 * Copy the rte_flow_item for eth into hdr_field using ethernet
377 size = sizeof(eth_spec->dst.addr_bytes);
378 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
379 eth_spec->dst.addr_bytes,
381 size = sizeof(eth_spec->src.addr_bytes);
382 field = ulp_rte_parser_fld_copy(field,
383 eth_spec->src.addr_bytes,
385 field = ulp_rte_parser_fld_copy(field,
387 sizeof(eth_spec->type));
390 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
391 sizeof(eth_mask->dst.addr_bytes));
392 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
393 sizeof(eth_mask->src.addr_bytes));
394 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
395 sizeof(eth_mask->type));
397 /* Add number of vlan header elements */
398 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
399 params->vlan_idx = params->field_idx;
400 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
402 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
403 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
404 BNXT_ULP_HDR_BIT_O_ETH);
406 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
408 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
409 BNXT_ULP_HDR_BIT_I_ETH);
411 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
412 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
414 return BNXT_TF_RC_SUCCESS;
417 /* Function to handle the parsing of RTE Flow item Vlan Header. */
419 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
420 struct ulp_rte_parser_params *params)
422 const struct rte_flow_item_vlan *vlan_spec = item->spec;
423 const struct rte_flow_item_vlan *vlan_mask = item->mask;
424 struct ulp_rte_hdr_field *field;
425 struct ulp_rte_hdr_bitmap *hdr_bit;
426 uint32_t idx = params->vlan_idx;
427 uint16_t vlan_tag, priority;
428 uint32_t outer_vtag_num;
429 uint32_t inner_vtag_num;
432 * Copy the rte_flow_item for vlan into hdr_field using Vlan
436 vlan_tag = ntohs(vlan_spec->tci);
437 priority = htons(vlan_tag >> 13);
439 vlan_tag = htons(vlan_tag);
441 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
444 field = ulp_rte_parser_fld_copy(field,
447 field = ulp_rte_parser_fld_copy(field,
448 &vlan_spec->inner_type,
449 sizeof(vlan_spec->inner_type));
453 vlan_tag = ntohs(vlan_mask->tci);
454 priority = htons(vlan_tag >> 13);
456 vlan_tag = htons(vlan_tag);
458 field = ¶ms->hdr_field[idx];
459 memcpy(field->mask, &priority, field->size);
461 memcpy(field->mask, &vlan_tag, field->size);
463 memcpy(field->mask, &vlan_mask->inner_type, field->size);
465 /* Set the vlan index to new incremented value */
466 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
468 /* Get the outer tag and inner tag counts */
469 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
470 BNXT_ULP_CF_IDX_O_VTAG_NUM);
471 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
472 BNXT_ULP_CF_IDX_I_VTAG_NUM);
474 /* Update the hdr_bitmap of the vlans */
475 hdr_bit = ¶ms->hdr_bitmap;
476 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
477 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
479 /* Update the vlan tag num */
481 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
483 ULP_BITMAP_SET(params->hdr_bitmap.bits,
484 BNXT_ULP_HDR_BIT_OO_VLAN);
485 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
486 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
487 outer_vtag_num == 1) {
488 /* update the vlan tag num */
490 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
492 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
493 ULP_BITMAP_SET(params->hdr_bitmap.bits,
494 BNXT_ULP_HDR_BIT_OI_VLAN);
495 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
496 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
498 /* update the vlan tag num */
500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
502 ULP_BITMAP_SET(params->hdr_bitmap.bits,
503 BNXT_ULP_HDR_BIT_IO_VLAN);
504 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
505 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
506 inner_vtag_num == 1) {
507 /* update the vlan tag num */
509 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
511 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
512 ULP_BITMAP_SET(params->hdr_bitmap.bits,
513 BNXT_ULP_HDR_BIT_II_VLAN);
515 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
516 return BNXT_TF_RC_ERROR;
518 return BNXT_TF_RC_SUCCESS;
521 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
523 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
524 struct ulp_rte_parser_params *params)
526 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
527 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
528 struct ulp_rte_hdr_field *field;
529 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
530 uint32_t idx = params->field_idx;
532 uint32_t inner_l3, outer_l3;
534 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
536 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
537 return BNXT_TF_RC_ERROR;
541 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
545 size = sizeof(ipv4_spec->hdr.version_ihl);
546 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
547 &ipv4_spec->hdr.version_ihl,
549 size = sizeof(ipv4_spec->hdr.type_of_service);
550 field = ulp_rte_parser_fld_copy(field,
551 &ipv4_spec->hdr.type_of_service,
553 size = sizeof(ipv4_spec->hdr.total_length);
554 field = ulp_rte_parser_fld_copy(field,
555 &ipv4_spec->hdr.total_length,
557 size = sizeof(ipv4_spec->hdr.packet_id);
558 field = ulp_rte_parser_fld_copy(field,
559 &ipv4_spec->hdr.packet_id,
561 size = sizeof(ipv4_spec->hdr.fragment_offset);
562 field = ulp_rte_parser_fld_copy(field,
563 &ipv4_spec->hdr.fragment_offset,
565 size = sizeof(ipv4_spec->hdr.time_to_live);
566 field = ulp_rte_parser_fld_copy(field,
567 &ipv4_spec->hdr.time_to_live,
569 size = sizeof(ipv4_spec->hdr.next_proto_id);
570 field = ulp_rte_parser_fld_copy(field,
571 &ipv4_spec->hdr.next_proto_id,
573 size = sizeof(ipv4_spec->hdr.hdr_checksum);
574 field = ulp_rte_parser_fld_copy(field,
575 &ipv4_spec->hdr.hdr_checksum,
577 size = sizeof(ipv4_spec->hdr.src_addr);
578 field = ulp_rte_parser_fld_copy(field,
579 &ipv4_spec->hdr.src_addr,
581 size = sizeof(ipv4_spec->hdr.dst_addr);
582 field = ulp_rte_parser_fld_copy(field,
583 &ipv4_spec->hdr.dst_addr,
587 ulp_rte_prsr_mask_copy(params, &idx,
588 &ipv4_mask->hdr.version_ihl,
589 sizeof(ipv4_mask->hdr.version_ihl));
590 ulp_rte_prsr_mask_copy(params, &idx,
591 &ipv4_mask->hdr.type_of_service,
592 sizeof(ipv4_mask->hdr.type_of_service));
593 ulp_rte_prsr_mask_copy(params, &idx,
594 &ipv4_mask->hdr.total_length,
595 sizeof(ipv4_mask->hdr.total_length));
596 ulp_rte_prsr_mask_copy(params, &idx,
597 &ipv4_mask->hdr.packet_id,
598 sizeof(ipv4_mask->hdr.packet_id));
599 ulp_rte_prsr_mask_copy(params, &idx,
600 &ipv4_mask->hdr.fragment_offset,
601 sizeof(ipv4_mask->hdr.fragment_offset));
602 ulp_rte_prsr_mask_copy(params, &idx,
603 &ipv4_mask->hdr.time_to_live,
604 sizeof(ipv4_mask->hdr.time_to_live));
605 ulp_rte_prsr_mask_copy(params, &idx,
606 &ipv4_mask->hdr.next_proto_id,
607 sizeof(ipv4_mask->hdr.next_proto_id));
608 ulp_rte_prsr_mask_copy(params, &idx,
609 &ipv4_mask->hdr.hdr_checksum,
610 sizeof(ipv4_mask->hdr.hdr_checksum));
611 ulp_rte_prsr_mask_copy(params, &idx,
612 &ipv4_mask->hdr.src_addr,
613 sizeof(ipv4_mask->hdr.src_addr));
614 ulp_rte_prsr_mask_copy(params, &idx,
615 &ipv4_mask->hdr.dst_addr,
616 sizeof(ipv4_mask->hdr.dst_addr));
618 /* Add the number of ipv4 header elements */
619 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
621 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
622 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
624 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
625 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
626 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
628 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
630 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
632 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
634 return BNXT_TF_RC_SUCCESS;
637 /* Function to handle the parsing of RTE Flow item IPV6 Header */
639 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
640 struct ulp_rte_parser_params *params)
642 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
643 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
644 struct ulp_rte_hdr_field *field;
645 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
646 uint32_t idx = params->field_idx;
648 uint32_t inner_l3, outer_l3;
649 uint32_t vtcf, vtcf_mask;
651 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
653 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
654 return BNXT_TF_RC_ERROR;
658 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
662 size = sizeof(ipv6_spec->hdr.vtc_flow);
664 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
665 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
669 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
670 field = ulp_rte_parser_fld_copy(field,
674 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
675 field = ulp_rte_parser_fld_copy(field,
679 size = sizeof(ipv6_spec->hdr.payload_len);
680 field = ulp_rte_parser_fld_copy(field,
681 &ipv6_spec->hdr.payload_len,
683 size = sizeof(ipv6_spec->hdr.proto);
684 field = ulp_rte_parser_fld_copy(field,
685 &ipv6_spec->hdr.proto,
687 size = sizeof(ipv6_spec->hdr.hop_limits);
688 field = ulp_rte_parser_fld_copy(field,
689 &ipv6_spec->hdr.hop_limits,
691 size = sizeof(ipv6_spec->hdr.src_addr);
692 field = ulp_rte_parser_fld_copy(field,
693 &ipv6_spec->hdr.src_addr,
695 size = sizeof(ipv6_spec->hdr.dst_addr);
696 field = ulp_rte_parser_fld_copy(field,
697 &ipv6_spec->hdr.dst_addr,
701 size = sizeof(ipv6_mask->hdr.vtc_flow);
703 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
704 ulp_rte_prsr_mask_copy(params, &idx,
708 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
709 ulp_rte_prsr_mask_copy(params, &idx,
714 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
715 ulp_rte_prsr_mask_copy(params, &idx,
719 ulp_rte_prsr_mask_copy(params, &idx,
720 &ipv6_mask->hdr.payload_len,
721 sizeof(ipv6_mask->hdr.payload_len));
722 ulp_rte_prsr_mask_copy(params, &idx,
723 &ipv6_mask->hdr.proto,
724 sizeof(ipv6_mask->hdr.proto));
725 ulp_rte_prsr_mask_copy(params, &idx,
726 &ipv6_mask->hdr.hop_limits,
727 sizeof(ipv6_mask->hdr.hop_limits));
728 ulp_rte_prsr_mask_copy(params, &idx,
729 &ipv6_mask->hdr.src_addr,
730 sizeof(ipv6_mask->hdr.src_addr));
731 ulp_rte_prsr_mask_copy(params, &idx,
732 &ipv6_mask->hdr.dst_addr,
733 sizeof(ipv6_mask->hdr.dst_addr));
735 /* add number of ipv6 header elements */
736 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
738 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
739 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
741 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
742 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
743 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
744 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
746 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
747 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
749 return BNXT_TF_RC_SUCCESS;
752 /* Function to handle the parsing of RTE Flow item UDP Header. */
754 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
755 struct ulp_rte_parser_params *params)
757 const struct rte_flow_item_udp *udp_spec = item->spec;
758 const struct rte_flow_item_udp *udp_mask = item->mask;
759 struct ulp_rte_hdr_field *field;
760 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
761 uint32_t idx = params->field_idx;
763 uint32_t inner_l4, outer_l4;
765 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
767 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
768 return BNXT_TF_RC_ERROR;
772 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
776 size = sizeof(udp_spec->hdr.src_port);
777 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
778 &udp_spec->hdr.src_port,
780 size = sizeof(udp_spec->hdr.dst_port);
781 field = ulp_rte_parser_fld_copy(field,
782 &udp_spec->hdr.dst_port,
784 size = sizeof(udp_spec->hdr.dgram_len);
785 field = ulp_rte_parser_fld_copy(field,
786 &udp_spec->hdr.dgram_len,
788 size = sizeof(udp_spec->hdr.dgram_cksum);
789 field = ulp_rte_parser_fld_copy(field,
790 &udp_spec->hdr.dgram_cksum,
794 ulp_rte_prsr_mask_copy(params, &idx,
795 &udp_mask->hdr.src_port,
796 sizeof(udp_mask->hdr.src_port));
797 ulp_rte_prsr_mask_copy(params, &idx,
798 &udp_mask->hdr.dst_port,
799 sizeof(udp_mask->hdr.dst_port));
800 ulp_rte_prsr_mask_copy(params, &idx,
801 &udp_mask->hdr.dgram_len,
802 sizeof(udp_mask->hdr.dgram_len));
803 ulp_rte_prsr_mask_copy(params, &idx,
804 &udp_mask->hdr.dgram_cksum,
805 sizeof(udp_mask->hdr.dgram_cksum));
808 /* Add number of UDP header elements */
809 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
811 /* Set the udp header bitmap and computed l4 header bitmaps */
812 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
814 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
815 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
816 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
817 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
819 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
820 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
822 return BNXT_TF_RC_SUCCESS;
825 /* Function to handle the parsing of RTE Flow item TCP Header. */
827 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
828 struct ulp_rte_parser_params *params)
830 const struct rte_flow_item_tcp *tcp_spec = item->spec;
831 const struct rte_flow_item_tcp *tcp_mask = item->mask;
832 struct ulp_rte_hdr_field *field;
833 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
834 uint32_t idx = params->field_idx;
836 uint32_t inner_l4, outer_l4;
838 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
840 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
841 return BNXT_TF_RC_ERROR;
845 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
849 size = sizeof(tcp_spec->hdr.src_port);
850 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
851 &tcp_spec->hdr.src_port,
853 size = sizeof(tcp_spec->hdr.dst_port);
854 field = ulp_rte_parser_fld_copy(field,
855 &tcp_spec->hdr.dst_port,
857 size = sizeof(tcp_spec->hdr.sent_seq);
858 field = ulp_rte_parser_fld_copy(field,
859 &tcp_spec->hdr.sent_seq,
861 size = sizeof(tcp_spec->hdr.recv_ack);
862 field = ulp_rte_parser_fld_copy(field,
863 &tcp_spec->hdr.recv_ack,
865 size = sizeof(tcp_spec->hdr.data_off);
866 field = ulp_rte_parser_fld_copy(field,
867 &tcp_spec->hdr.data_off,
869 size = sizeof(tcp_spec->hdr.tcp_flags);
870 field = ulp_rte_parser_fld_copy(field,
871 &tcp_spec->hdr.tcp_flags,
873 size = sizeof(tcp_spec->hdr.rx_win);
874 field = ulp_rte_parser_fld_copy(field,
875 &tcp_spec->hdr.rx_win,
877 size = sizeof(tcp_spec->hdr.cksum);
878 field = ulp_rte_parser_fld_copy(field,
879 &tcp_spec->hdr.cksum,
881 size = sizeof(tcp_spec->hdr.tcp_urp);
882 field = ulp_rte_parser_fld_copy(field,
883 &tcp_spec->hdr.tcp_urp,
886 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
890 ulp_rte_prsr_mask_copy(params, &idx,
891 &tcp_mask->hdr.src_port,
892 sizeof(tcp_mask->hdr.src_port));
893 ulp_rte_prsr_mask_copy(params, &idx,
894 &tcp_mask->hdr.dst_port,
895 sizeof(tcp_mask->hdr.dst_port));
896 ulp_rte_prsr_mask_copy(params, &idx,
897 &tcp_mask->hdr.sent_seq,
898 sizeof(tcp_mask->hdr.sent_seq));
899 ulp_rte_prsr_mask_copy(params, &idx,
900 &tcp_mask->hdr.recv_ack,
901 sizeof(tcp_mask->hdr.recv_ack));
902 ulp_rte_prsr_mask_copy(params, &idx,
903 &tcp_mask->hdr.data_off,
904 sizeof(tcp_mask->hdr.data_off));
905 ulp_rte_prsr_mask_copy(params, &idx,
906 &tcp_mask->hdr.tcp_flags,
907 sizeof(tcp_mask->hdr.tcp_flags));
908 ulp_rte_prsr_mask_copy(params, &idx,
909 &tcp_mask->hdr.rx_win,
910 sizeof(tcp_mask->hdr.rx_win));
911 ulp_rte_prsr_mask_copy(params, &idx,
912 &tcp_mask->hdr.cksum,
913 sizeof(tcp_mask->hdr.cksum));
914 ulp_rte_prsr_mask_copy(params, &idx,
915 &tcp_mask->hdr.tcp_urp,
916 sizeof(tcp_mask->hdr.tcp_urp));
918 /* add number of TCP header elements */
919 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
921 /* Set the udp header bitmap and computed l4 header bitmaps */
922 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
924 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
925 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
926 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
927 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
929 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
930 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
932 return BNXT_TF_RC_SUCCESS;
935 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
937 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
938 struct ulp_rte_parser_params *params)
940 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
941 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
942 struct ulp_rte_hdr_field *field;
943 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
944 uint32_t idx = params->field_idx;
948 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
952 size = sizeof(vxlan_spec->flags);
953 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
956 size = sizeof(vxlan_spec->rsvd0);
957 field = ulp_rte_parser_fld_copy(field,
960 size = sizeof(vxlan_spec->vni);
961 field = ulp_rte_parser_fld_copy(field,
964 size = sizeof(vxlan_spec->rsvd1);
965 field = ulp_rte_parser_fld_copy(field,
970 ulp_rte_prsr_mask_copy(params, &idx,
972 sizeof(vxlan_mask->flags));
973 ulp_rte_prsr_mask_copy(params, &idx,
975 sizeof(vxlan_mask->rsvd0));
976 ulp_rte_prsr_mask_copy(params, &idx,
978 sizeof(vxlan_mask->vni));
979 ulp_rte_prsr_mask_copy(params, &idx,
981 sizeof(vxlan_mask->rsvd1));
983 /* Add number of vxlan header elements */
984 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
986 /* Update the hdr_bitmap with vxlan */
987 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
988 return BNXT_TF_RC_SUCCESS;
991 /* Function to handle the parsing of RTE Flow item void Header */
993 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
994 struct ulp_rte_parser_params *params __rte_unused)
996 return BNXT_TF_RC_SUCCESS;
999 /* Function to handle the parsing of RTE Flow action void Header. */
1001 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1002 struct ulp_rte_parser_params *params __rte_unused)
1004 return BNXT_TF_RC_SUCCESS;
1007 /* Function to handle the parsing of RTE Flow action Mark Header. */
1009 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1010 struct ulp_rte_parser_params *param)
1012 const struct rte_flow_action_mark *mark;
1013 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1016 mark = action_item->conf;
1018 mark_id = tfp_cpu_to_be_32(mark->id);
1019 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1020 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1022 /* Update the hdr_bitmap with vxlan */
1023 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1024 return BNXT_TF_RC_SUCCESS;
1026 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1027 return BNXT_TF_RC_ERROR;
1030 /* Function to handle the parsing of RTE Flow action RSS Header. */
1032 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1033 struct ulp_rte_parser_params *param)
1035 const struct rte_flow_action_rss *rss = action_item->conf;
1038 /* Update the hdr_bitmap with vxlan */
1039 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1040 return BNXT_TF_RC_SUCCESS;
1042 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1043 return BNXT_TF_RC_ERROR;
1046 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1048 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1049 struct ulp_rte_parser_params *params)
1051 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1052 const struct rte_flow_item *item;
1053 const struct rte_flow_item_eth *eth_spec;
1054 const struct rte_flow_item_ipv4 *ipv4_spec;
1055 const struct rte_flow_item_ipv6 *ipv6_spec;
1056 struct rte_flow_item_vxlan vxlan_spec;
1057 uint32_t vlan_num = 0, vlan_size = 0;
1058 uint32_t ip_size = 0, ip_type = 0;
1059 uint32_t vxlan_size = 0;
1061 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1062 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1064 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1065 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1067 vxlan_encap = action_item->conf;
1069 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1070 return BNXT_TF_RC_ERROR;
1073 item = vxlan_encap->definition;
1075 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1076 return BNXT_TF_RC_ERROR;
1079 if (!ulp_rte_item_skip_void(&item, 0))
1080 return BNXT_TF_RC_ERROR;
1082 /* must have ethernet header */
1083 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1084 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1085 return BNXT_TF_RC_ERROR;
1087 eth_spec = item->spec;
1088 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1089 ulp_encap_buffer_copy(buff,
1090 eth_spec->dst.addr_bytes,
1091 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1093 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1094 ulp_encap_buffer_copy(buff,
1095 eth_spec->src.addr_bytes,
1096 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1098 /* Goto the next item */
1099 if (!ulp_rte_item_skip_void(&item, 1))
1100 return BNXT_TF_RC_ERROR;
1102 /* May have vlan header */
1103 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1105 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1106 ulp_encap_buffer_copy(buff,
1108 sizeof(struct rte_flow_item_vlan));
1110 if (!ulp_rte_item_skip_void(&item, 1))
1111 return BNXT_TF_RC_ERROR;
1114 /* may have two vlan headers */
1115 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1117 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1118 sizeof(struct rte_flow_item_vlan)],
1120 sizeof(struct rte_flow_item_vlan));
1121 if (!ulp_rte_item_skip_void(&item, 1))
1122 return BNXT_TF_RC_ERROR;
1124 /* Update the vlan count and size of more than one */
1126 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1127 vlan_num = tfp_cpu_to_be_32(vlan_num);
1128 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1131 vlan_size = tfp_cpu_to_be_32(vlan_size);
1132 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1137 /* L3 must be IPv4, IPv6 */
1138 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1139 ipv4_spec = item->spec;
1140 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1142 /* copy the ipv4 details */
1143 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1144 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1145 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1146 ulp_encap_buffer_copy(buff,
1148 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1149 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1151 const uint8_t *tmp_buff;
1153 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1154 ulp_encap_buffer_copy(buff,
1155 &ipv4_spec->hdr.version_ihl,
1156 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1157 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1158 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1159 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1160 ulp_encap_buffer_copy(buff,
1162 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1164 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1165 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1166 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1167 ulp_encap_buffer_copy(buff,
1168 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1169 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1171 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1172 ulp_encap_buffer_copy(buff,
1173 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1174 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1176 /* Update the ip size details */
1177 ip_size = tfp_cpu_to_be_32(ip_size);
1178 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1179 &ip_size, sizeof(uint32_t));
1181 /* update the ip type */
1182 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1183 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1184 &ip_type, sizeof(uint32_t));
1186 /* update the computed field to notify it is ipv4 header */
1187 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1190 if (!ulp_rte_item_skip_void(&item, 1))
1191 return BNXT_TF_RC_ERROR;
1192 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1193 ipv6_spec = item->spec;
1194 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1196 /* copy the ipv4 details */
1197 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1198 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1200 /* Update the ip size details */
1201 ip_size = tfp_cpu_to_be_32(ip_size);
1202 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1203 &ip_size, sizeof(uint32_t));
1205 /* update the ip type */
1206 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1207 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1208 &ip_type, sizeof(uint32_t));
1210 /* update the computed field to notify it is ipv6 header */
1211 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1214 if (!ulp_rte_item_skip_void(&item, 1))
1215 return BNXT_TF_RC_ERROR;
1217 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1218 return BNXT_TF_RC_ERROR;
1222 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1223 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1224 return BNXT_TF_RC_ERROR;
1226 /* copy the udp details */
1227 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1228 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1230 if (!ulp_rte_item_skip_void(&item, 1))
1231 return BNXT_TF_RC_ERROR;
1234 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1235 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1236 return BNXT_TF_RC_ERROR;
1238 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1239 /* copy the vxlan details */
1240 memcpy(&vxlan_spec, item->spec, vxlan_size);
1241 vxlan_spec.flags = 0x08;
1242 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1243 (const uint8_t *)&vxlan_spec,
1245 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1246 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1247 &vxlan_size, sizeof(uint32_t));
1249 /*update the hdr_bitmap with vxlan */
1250 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1251 return BNXT_TF_RC_SUCCESS;
1254 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1256 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1258 struct ulp_rte_parser_params *params)
1260 /* update the hdr_bitmap with vxlan */
1261 ULP_BITMAP_SET(params->act_bitmap.bits,
1262 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1263 return BNXT_TF_RC_SUCCESS;
1266 /* Function to handle the parsing of RTE Flow action drop Header. */
1268 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1269 struct ulp_rte_parser_params *params)
1271 /* Update the hdr_bitmap with drop */
1272 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1273 return BNXT_TF_RC_SUCCESS;
1276 /* Function to handle the parsing of RTE Flow action count. */
1278 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1279 struct ulp_rte_parser_params *params)
1282 const struct rte_flow_action_count *act_count;
1283 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1285 act_count = action_item->conf;
1287 if (act_count->shared) {
1289 "Parse Error:Shared count not supported\n");
1290 return BNXT_TF_RC_PARSE_ERR;
1292 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1294 BNXT_ULP_ACT_PROP_SZ_COUNT);
1297 /* Update the hdr_bitmap with count */
1298 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1299 return BNXT_TF_RC_SUCCESS;
1302 /* Function to handle the parsing of RTE Flow action PF. */
1304 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1305 struct ulp_rte_parser_params *params)
1309 /* Update the hdr_bitmap with vnic bit */
1310 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1312 /* copy the PF of the current device into VNIC Property */
1313 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1314 svif = bnxt_get_vnic_id(svif, BNXT_ULP_INTF_TYPE_INVALID);
1315 svif = rte_cpu_to_be_32(svif);
1316 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1317 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1319 return BNXT_TF_RC_SUCCESS;
1322 /* Function to handle the parsing of RTE Flow action VF. */
1324 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1325 struct ulp_rte_parser_params *param)
1327 const struct rte_flow_action_vf *vf_action;
1330 vf_action = action_item->conf;
1332 if (vf_action->original) {
1334 "Parse Error:VF Original not supported\n");
1335 return BNXT_TF_RC_PARSE_ERR;
1337 /* TBD: Update the computed VNIC using VF conversion */
1338 pid = bnxt_get_vnic_id(vf_action->id,
1339 BNXT_ULP_INTF_TYPE_INVALID);
1340 pid = rte_cpu_to_be_32(pid);
1341 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1342 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1345 /* Update the hdr_bitmap with count */
1346 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1347 return BNXT_TF_RC_SUCCESS;
1350 /* Function to handle the parsing of RTE Flow action port_id. */
1352 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1353 struct ulp_rte_parser_params *param)
1355 const struct rte_flow_action_port_id *port_id;
1356 struct ulp_rte_act_prop *act;
1362 port_id = act_item->conf;
1365 "ParseErr: Invalid Argument\n");
1366 return BNXT_TF_RC_PARSE_ERR;
1368 if (port_id->original) {
1370 "ParseErr:Portid Original not supported\n");
1371 return BNXT_TF_RC_PARSE_ERR;
1374 /* Get the port db ifindex */
1375 rc = ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx,
1379 BNXT_TF_DBG(ERR, "Invalid port id\n");
1380 return BNXT_TF_RC_ERROR;
1383 act = ¶m->act_prop;
1384 if (param->dir == ULP_DIR_EGRESS) {
1385 rc = ulp_port_db_vport_get(param->ulp_ctx,
1388 return BNXT_TF_RC_ERROR;
1391 pid = rte_cpu_to_be_32(pid);
1392 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1393 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1395 rc = ulp_port_db_default_vnic_get(param->ulp_ctx,
1397 BNXT_ULP_DRV_FUNC_VNIC,
1400 return BNXT_TF_RC_ERROR;
1403 pid = rte_cpu_to_be_32(pid);
1404 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1405 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1408 /*Update the hdr_bitmap with vnic */
1409 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1410 return BNXT_TF_RC_SUCCESS;
1413 /* Function to handle the parsing of RTE Flow action phy_port. */
1415 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1416 struct ulp_rte_parser_params *prm)
1418 const struct rte_flow_action_phy_port *phy_port;
1423 phy_port = action_item->conf;
1426 "ParseErr: Invalid Argument\n");
1427 return BNXT_TF_RC_PARSE_ERR;
1430 if (phy_port->original) {
1432 "Parse Err:Port Original not supported\n");
1433 return BNXT_TF_RC_PARSE_ERR;
1435 if (prm->dir != ULP_DIR_EGRESS) {
1437 "Parse Err:Phy ports are valid only for egress\n");
1438 return BNXT_TF_RC_PARSE_ERR;
1440 /* Get the physical port details from port db */
1441 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1444 BNXT_TF_DBG(DEBUG, "Failed to get port details\n");
1449 pid = rte_cpu_to_be_32(pid);
1450 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1451 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1453 /* update the hdr_bitmap with vport */
1454 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1455 return BNXT_TF_RC_SUCCESS;
1458 /* Function to handle the parsing of RTE Flow action pop vlan. */
1460 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1461 struct ulp_rte_parser_params *params)
1463 /* Update the act_bitmap with pop */
1464 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1465 return BNXT_TF_RC_SUCCESS;
1468 /* Function to handle the parsing of RTE Flow action push vlan. */
1470 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1471 struct ulp_rte_parser_params *params)
1473 const struct rte_flow_action_of_push_vlan *push_vlan;
1475 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1477 push_vlan = action_item->conf;
1479 ethertype = push_vlan->ethertype;
1480 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1482 "Parse Err: Ethertype not supported\n");
1483 return BNXT_TF_RC_PARSE_ERR;
1485 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1486 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1487 /* Update the hdr_bitmap with push vlan */
1488 ULP_BITMAP_SET(params->act_bitmap.bits,
1489 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1490 return BNXT_TF_RC_SUCCESS;
1492 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1493 return BNXT_TF_RC_ERROR;
1496 /* Function to handle the parsing of RTE Flow action set vlan id. */
1498 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1499 struct ulp_rte_parser_params *params)
1501 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1503 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1505 vlan_vid = action_item->conf;
1506 if (vlan_vid && vlan_vid->vlan_vid) {
1507 vid = vlan_vid->vlan_vid;
1508 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1509 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1510 /* Update the hdr_bitmap with vlan vid */
1511 ULP_BITMAP_SET(params->act_bitmap.bits,
1512 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1513 return BNXT_TF_RC_SUCCESS;
1515 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1516 return BNXT_TF_RC_ERROR;
1519 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1521 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1522 struct ulp_rte_parser_params *params)
1524 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1526 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1528 vlan_pcp = action_item->conf;
1530 pcp = vlan_pcp->vlan_pcp;
1531 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1532 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1533 /* Update the hdr_bitmap with vlan vid */
1534 ULP_BITMAP_SET(params->act_bitmap.bits,
1535 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1536 return BNXT_TF_RC_SUCCESS;
1538 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1539 return BNXT_TF_RC_ERROR;