1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Set the computed flags for no vlan tags before parsing */
92 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
93 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
95 /* Parse all the items in the pattern */
96 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
97 /* get the header information from the flow_hdr_info table */
98 hdr_info = &ulp_hdr_info[item->type];
99 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
101 "Truflow parser does not support type %d\n",
103 return BNXT_TF_RC_PARSE_ERR;
104 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
105 /* call the registered callback handler */
106 if (hdr_info->proto_hdr_func) {
107 if (hdr_info->proto_hdr_func(item, params) !=
108 BNXT_TF_RC_SUCCESS) {
109 return BNXT_TF_RC_ERROR;
115 /* update the implied SVIF */
116 (void)ulp_rte_parser_svif_process(params);
117 return BNXT_TF_RC_SUCCESS;
121 * Function to handle the parsing of RTE Flows and placing
122 * the RTE flow actions into the ulp structures.
125 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
126 struct ulp_rte_parser_params *params)
128 const struct rte_flow_action *action_item = actions;
129 struct bnxt_ulp_rte_act_info *hdr_info;
131 if (params->dir == ULP_DIR_EGRESS)
132 ULP_BITMAP_SET(params->act_bitmap.bits,
133 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
135 /* Parse all the items in the pattern */
136 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
137 /* get the header information from the flow_hdr_info table */
138 hdr_info = &ulp_act_info[action_item->type];
139 if (hdr_info->act_type ==
140 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
142 "Truflow parser does not support act %u\n",
144 return BNXT_TF_RC_ERROR;
145 } else if (hdr_info->act_type ==
146 BNXT_ULP_ACT_TYPE_SUPPORTED) {
147 /* call the registered callback handler */
148 if (hdr_info->proto_act_func) {
149 if (hdr_info->proto_act_func(action_item,
151 BNXT_TF_RC_SUCCESS) {
152 return BNXT_TF_RC_ERROR;
158 /* update the implied port details */
159 ulp_rte_parser_implied_act_port_process(params);
160 return BNXT_TF_RC_SUCCESS;
163 /* Function to handle the parsing of RTE Flow item PF Header. */
165 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
166 enum rte_flow_item_type proto,
170 uint16_t port_id = svif;
172 struct ulp_rte_hdr_field *hdr_field;
173 enum bnxt_ulp_svif_type svif_type;
174 enum bnxt_ulp_intf_type if_type;
178 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
179 BNXT_ULP_INVALID_SVIF_VAL) {
181 "SVIF already set,multiple source not support'd\n");
182 return BNXT_TF_RC_ERROR;
185 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
186 dir = ULP_COMP_FLD_IDX_RD(params,
187 BNXT_ULP_CF_IDX_DIRECTION);
188 /* perform the conversion from dpdk port to bnxt svif */
189 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
193 "Invalid port id\n");
194 return BNXT_TF_RC_ERROR;
197 if (dir == ULP_DIR_INGRESS) {
198 svif_type = BNXT_ULP_PHY_PORT_SVIF;
200 if_type = bnxt_get_interface_type(port_id);
201 if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
202 svif_type = BNXT_ULP_VF_FUNC_SVIF;
204 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
206 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
208 svif = rte_cpu_to_be_16(svif);
210 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
211 memcpy(hdr_field->spec, &svif, sizeof(svif));
212 memcpy(hdr_field->mask, &mask, sizeof(mask));
213 hdr_field->size = sizeof(svif);
214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
215 rte_be_to_cpu_16(svif));
216 return BNXT_TF_RC_SUCCESS;
219 /* Function to handle the parsing of the RTE port id */
221 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
223 uint16_t port_id = 0;
224 uint16_t svif_mask = 0xFFFF;
226 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
227 BNXT_ULP_INVALID_SVIF_VAL)
228 return BNXT_TF_RC_SUCCESS;
230 /* SVIF not set. So get the port id */
231 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
233 /* Update the SVIF details */
234 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
238 /* Function to handle the implicit action port id */
240 ulp_rte_parser_implied_act_port_process(struct ulp_rte_parser_params *params)
242 struct rte_flow_action action_item = {0};
243 struct rte_flow_action_port_id port_id = {0};
245 /* Read the action port set bit */
246 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
247 /* Already set, so just exit */
248 return BNXT_TF_RC_SUCCESS;
250 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
251 action_item.conf = &port_id;
253 /* Update the action port based on incoming port */
254 ulp_rte_port_id_act_handler(&action_item, params);
256 /* Reset the action port set bit */
257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
258 return BNXT_TF_RC_SUCCESS;
261 /* Function to handle the parsing of RTE Flow item PF Header. */
263 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
264 struct ulp_rte_parser_params *params)
266 uint16_t port_id = 0;
267 uint16_t svif_mask = 0xFFFF;
269 /* Get the port id */
270 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
272 /* Update the SVIF details */
273 return ulp_rte_parser_svif_set(params,
278 /* Function to handle the parsing of RTE Flow item VF Header. */
280 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
281 struct ulp_rte_parser_params *params)
283 const struct rte_flow_item_vf *vf_spec = item->spec;
284 const struct rte_flow_item_vf *vf_mask = item->mask;
285 uint16_t svif = 0, mask = 0;
287 /* Get VF rte_flow_item for Port details */
289 svif = (uint16_t)vf_spec->id;
291 mask = (uint16_t)vf_mask->id;
293 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
296 /* Function to handle the parsing of RTE Flow item port id Header. */
298 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
299 struct ulp_rte_parser_params *params)
301 const struct rte_flow_item_port_id *port_spec = item->spec;
302 const struct rte_flow_item_port_id *port_mask = item->mask;
303 uint16_t svif = 0, mask = 0;
306 * Copy the rte_flow_item for Port into hdr_field using port id
310 svif = (uint16_t)port_spec->id;
311 if (svif >= RTE_MAX_ETHPORTS) {
312 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
313 return BNXT_TF_RC_PARSE_ERR;
317 mask = (uint16_t)port_mask->id;
319 /* Update the SVIF details */
320 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
323 /* Function to handle the parsing of RTE Flow item phy port Header. */
325 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
326 struct ulp_rte_parser_params *params)
328 const struct rte_flow_item_phy_port *port_spec = item->spec;
329 const struct rte_flow_item_phy_port *port_mask = item->mask;
330 uint32_t svif = 0, mask = 0;
331 struct bnxt_ulp_device_params *dparms;
334 /* Copy the rte_flow_item for phy port into hdr_field */
336 svif = port_spec->index;
338 mask = port_mask->index;
340 if (bnxt_ulp_cntxt_dev_id_get(params->ulp_ctx, &dev_id)) {
341 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
345 dparms = bnxt_ulp_device_params_get(dev_id);
347 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
351 if (svif > dparms->num_phy_ports) {
352 BNXT_TF_DBG(ERR, "ParseErr:Phy Port is not valid\n");
353 return BNXT_TF_RC_PARSE_ERR;
356 /* Update the SVIF details */
357 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
360 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
362 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
363 struct ulp_rte_parser_params *params)
365 const struct rte_flow_item_eth *eth_spec = item->spec;
366 const struct rte_flow_item_eth *eth_mask = item->mask;
367 struct ulp_rte_hdr_field *field;
368 uint32_t idx = params->field_idx;
369 uint64_t set_flag = 0;
373 * Copy the rte_flow_item for eth into hdr_field using ethernet
377 size = sizeof(eth_spec->dst.addr_bytes);
378 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
379 eth_spec->dst.addr_bytes,
381 size = sizeof(eth_spec->src.addr_bytes);
382 field = ulp_rte_parser_fld_copy(field,
383 eth_spec->src.addr_bytes,
385 field = ulp_rte_parser_fld_copy(field,
387 sizeof(eth_spec->type));
390 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
391 sizeof(eth_mask->dst.addr_bytes));
392 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
393 sizeof(eth_mask->src.addr_bytes));
394 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
395 sizeof(eth_mask->type));
397 /* Add number of vlan header elements */
398 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
399 params->vlan_idx = params->field_idx;
400 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
402 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
403 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
404 BNXT_ULP_HDR_BIT_O_ETH);
406 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
408 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
409 BNXT_ULP_HDR_BIT_I_ETH);
411 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
412 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
414 return BNXT_TF_RC_SUCCESS;
417 /* Function to handle the parsing of RTE Flow item Vlan Header. */
419 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
420 struct ulp_rte_parser_params *params)
422 const struct rte_flow_item_vlan *vlan_spec = item->spec;
423 const struct rte_flow_item_vlan *vlan_mask = item->mask;
424 struct ulp_rte_hdr_field *field;
425 struct ulp_rte_hdr_bitmap *hdr_bit;
426 uint32_t idx = params->vlan_idx;
427 uint16_t vlan_tag, priority;
428 uint32_t outer_vtag_num;
429 uint32_t inner_vtag_num;
432 * Copy the rte_flow_item for vlan into hdr_field using Vlan
436 vlan_tag = ntohs(vlan_spec->tci);
437 priority = htons(vlan_tag >> 13);
439 vlan_tag = htons(vlan_tag);
441 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
444 field = ulp_rte_parser_fld_copy(field,
447 field = ulp_rte_parser_fld_copy(field,
448 &vlan_spec->inner_type,
449 sizeof(vlan_spec->inner_type));
453 vlan_tag = ntohs(vlan_mask->tci);
454 priority = htons(vlan_tag >> 13);
456 vlan_tag = htons(vlan_tag);
458 field = ¶ms->hdr_field[idx];
459 memcpy(field->mask, &priority, field->size);
461 memcpy(field->mask, &vlan_tag, field->size);
463 memcpy(field->mask, &vlan_mask->inner_type, field->size);
465 /* Set the vlan index to new incremented value */
466 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
468 /* Get the outer tag and inner tag counts */
469 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
470 BNXT_ULP_CF_IDX_O_VTAG_NUM);
471 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
472 BNXT_ULP_CF_IDX_I_VTAG_NUM);
474 /* Update the hdr_bitmap of the vlans */
475 hdr_bit = ¶ms->hdr_bitmap;
476 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
477 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
479 /* Update the vlan tag num */
481 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
483 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
484 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
485 ULP_BITMAP_SET(params->hdr_bitmap.bits,
486 BNXT_ULP_HDR_BIT_OO_VLAN);
487 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
488 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
489 outer_vtag_num == 1) {
490 /* update the vlan tag num */
492 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
494 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
495 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
496 ULP_BITMAP_SET(params->hdr_bitmap.bits,
497 BNXT_ULP_HDR_BIT_OI_VLAN);
498 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
499 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
501 /* update the vlan tag num */
503 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
505 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
507 ULP_BITMAP_SET(params->hdr_bitmap.bits,
508 BNXT_ULP_HDR_BIT_IO_VLAN);
509 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
510 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
511 inner_vtag_num == 1) {
512 /* update the vlan tag num */
514 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
516 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
517 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
518 ULP_BITMAP_SET(params->hdr_bitmap.bits,
519 BNXT_ULP_HDR_BIT_II_VLAN);
521 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
522 return BNXT_TF_RC_ERROR;
524 return BNXT_TF_RC_SUCCESS;
527 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
529 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
530 struct ulp_rte_parser_params *params)
532 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
533 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
534 struct ulp_rte_hdr_field *field;
535 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
536 uint32_t idx = params->field_idx;
538 uint32_t inner_l3, outer_l3;
540 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
542 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
543 return BNXT_TF_RC_ERROR;
547 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
551 size = sizeof(ipv4_spec->hdr.version_ihl);
552 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
553 &ipv4_spec->hdr.version_ihl,
555 size = sizeof(ipv4_spec->hdr.type_of_service);
556 field = ulp_rte_parser_fld_copy(field,
557 &ipv4_spec->hdr.type_of_service,
559 size = sizeof(ipv4_spec->hdr.total_length);
560 field = ulp_rte_parser_fld_copy(field,
561 &ipv4_spec->hdr.total_length,
563 size = sizeof(ipv4_spec->hdr.packet_id);
564 field = ulp_rte_parser_fld_copy(field,
565 &ipv4_spec->hdr.packet_id,
567 size = sizeof(ipv4_spec->hdr.fragment_offset);
568 field = ulp_rte_parser_fld_copy(field,
569 &ipv4_spec->hdr.fragment_offset,
571 size = sizeof(ipv4_spec->hdr.time_to_live);
572 field = ulp_rte_parser_fld_copy(field,
573 &ipv4_spec->hdr.time_to_live,
575 size = sizeof(ipv4_spec->hdr.next_proto_id);
576 field = ulp_rte_parser_fld_copy(field,
577 &ipv4_spec->hdr.next_proto_id,
579 size = sizeof(ipv4_spec->hdr.hdr_checksum);
580 field = ulp_rte_parser_fld_copy(field,
581 &ipv4_spec->hdr.hdr_checksum,
583 size = sizeof(ipv4_spec->hdr.src_addr);
584 field = ulp_rte_parser_fld_copy(field,
585 &ipv4_spec->hdr.src_addr,
587 size = sizeof(ipv4_spec->hdr.dst_addr);
588 field = ulp_rte_parser_fld_copy(field,
589 &ipv4_spec->hdr.dst_addr,
593 ulp_rte_prsr_mask_copy(params, &idx,
594 &ipv4_mask->hdr.version_ihl,
595 sizeof(ipv4_mask->hdr.version_ihl));
596 ulp_rte_prsr_mask_copy(params, &idx,
597 &ipv4_mask->hdr.type_of_service,
598 sizeof(ipv4_mask->hdr.type_of_service));
599 ulp_rte_prsr_mask_copy(params, &idx,
600 &ipv4_mask->hdr.total_length,
601 sizeof(ipv4_mask->hdr.total_length));
602 ulp_rte_prsr_mask_copy(params, &idx,
603 &ipv4_mask->hdr.packet_id,
604 sizeof(ipv4_mask->hdr.packet_id));
605 ulp_rte_prsr_mask_copy(params, &idx,
606 &ipv4_mask->hdr.fragment_offset,
607 sizeof(ipv4_mask->hdr.fragment_offset));
608 ulp_rte_prsr_mask_copy(params, &idx,
609 &ipv4_mask->hdr.time_to_live,
610 sizeof(ipv4_mask->hdr.time_to_live));
611 ulp_rte_prsr_mask_copy(params, &idx,
612 &ipv4_mask->hdr.next_proto_id,
613 sizeof(ipv4_mask->hdr.next_proto_id));
614 ulp_rte_prsr_mask_copy(params, &idx,
615 &ipv4_mask->hdr.hdr_checksum,
616 sizeof(ipv4_mask->hdr.hdr_checksum));
617 ulp_rte_prsr_mask_copy(params, &idx,
618 &ipv4_mask->hdr.src_addr,
619 sizeof(ipv4_mask->hdr.src_addr));
620 ulp_rte_prsr_mask_copy(params, &idx,
621 &ipv4_mask->hdr.dst_addr,
622 sizeof(ipv4_mask->hdr.dst_addr));
624 /* Add the number of ipv4 header elements */
625 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
627 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
628 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
630 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
631 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
632 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
634 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
636 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
638 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
640 return BNXT_TF_RC_SUCCESS;
643 /* Function to handle the parsing of RTE Flow item IPV6 Header */
645 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
646 struct ulp_rte_parser_params *params)
648 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
649 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
650 struct ulp_rte_hdr_field *field;
651 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
652 uint32_t idx = params->field_idx;
654 uint32_t inner_l3, outer_l3;
655 uint32_t vtcf, vtcf_mask;
657 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
659 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
660 return BNXT_TF_RC_ERROR;
664 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
668 size = sizeof(ipv6_spec->hdr.vtc_flow);
670 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
671 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
675 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
676 field = ulp_rte_parser_fld_copy(field,
680 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
681 field = ulp_rte_parser_fld_copy(field,
685 size = sizeof(ipv6_spec->hdr.payload_len);
686 field = ulp_rte_parser_fld_copy(field,
687 &ipv6_spec->hdr.payload_len,
689 size = sizeof(ipv6_spec->hdr.proto);
690 field = ulp_rte_parser_fld_copy(field,
691 &ipv6_spec->hdr.proto,
693 size = sizeof(ipv6_spec->hdr.hop_limits);
694 field = ulp_rte_parser_fld_copy(field,
695 &ipv6_spec->hdr.hop_limits,
697 size = sizeof(ipv6_spec->hdr.src_addr);
698 field = ulp_rte_parser_fld_copy(field,
699 &ipv6_spec->hdr.src_addr,
701 size = sizeof(ipv6_spec->hdr.dst_addr);
702 field = ulp_rte_parser_fld_copy(field,
703 &ipv6_spec->hdr.dst_addr,
707 size = sizeof(ipv6_mask->hdr.vtc_flow);
709 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
710 ulp_rte_prsr_mask_copy(params, &idx,
714 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
715 ulp_rte_prsr_mask_copy(params, &idx,
720 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
721 ulp_rte_prsr_mask_copy(params, &idx,
725 ulp_rte_prsr_mask_copy(params, &idx,
726 &ipv6_mask->hdr.payload_len,
727 sizeof(ipv6_mask->hdr.payload_len));
728 ulp_rte_prsr_mask_copy(params, &idx,
729 &ipv6_mask->hdr.proto,
730 sizeof(ipv6_mask->hdr.proto));
731 ulp_rte_prsr_mask_copy(params, &idx,
732 &ipv6_mask->hdr.hop_limits,
733 sizeof(ipv6_mask->hdr.hop_limits));
734 ulp_rte_prsr_mask_copy(params, &idx,
735 &ipv6_mask->hdr.src_addr,
736 sizeof(ipv6_mask->hdr.src_addr));
737 ulp_rte_prsr_mask_copy(params, &idx,
738 &ipv6_mask->hdr.dst_addr,
739 sizeof(ipv6_mask->hdr.dst_addr));
741 /* add number of ipv6 header elements */
742 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
744 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
745 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
747 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
748 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
749 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
750 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
752 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
753 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
755 return BNXT_TF_RC_SUCCESS;
758 /* Function to handle the parsing of RTE Flow item UDP Header. */
760 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
761 struct ulp_rte_parser_params *params)
763 const struct rte_flow_item_udp *udp_spec = item->spec;
764 const struct rte_flow_item_udp *udp_mask = item->mask;
765 struct ulp_rte_hdr_field *field;
766 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
767 uint32_t idx = params->field_idx;
769 uint32_t inner_l4, outer_l4;
771 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
773 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
774 return BNXT_TF_RC_ERROR;
778 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
782 size = sizeof(udp_spec->hdr.src_port);
783 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
784 &udp_spec->hdr.src_port,
786 size = sizeof(udp_spec->hdr.dst_port);
787 field = ulp_rte_parser_fld_copy(field,
788 &udp_spec->hdr.dst_port,
790 size = sizeof(udp_spec->hdr.dgram_len);
791 field = ulp_rte_parser_fld_copy(field,
792 &udp_spec->hdr.dgram_len,
794 size = sizeof(udp_spec->hdr.dgram_cksum);
795 field = ulp_rte_parser_fld_copy(field,
796 &udp_spec->hdr.dgram_cksum,
800 ulp_rte_prsr_mask_copy(params, &idx,
801 &udp_mask->hdr.src_port,
802 sizeof(udp_mask->hdr.src_port));
803 ulp_rte_prsr_mask_copy(params, &idx,
804 &udp_mask->hdr.dst_port,
805 sizeof(udp_mask->hdr.dst_port));
806 ulp_rte_prsr_mask_copy(params, &idx,
807 &udp_mask->hdr.dgram_len,
808 sizeof(udp_mask->hdr.dgram_len));
809 ulp_rte_prsr_mask_copy(params, &idx,
810 &udp_mask->hdr.dgram_cksum,
811 sizeof(udp_mask->hdr.dgram_cksum));
814 /* Add number of UDP header elements */
815 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
817 /* Set the udp header bitmap and computed l4 header bitmaps */
818 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
820 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
821 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
822 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
823 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
825 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
826 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
828 return BNXT_TF_RC_SUCCESS;
831 /* Function to handle the parsing of RTE Flow item TCP Header. */
833 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
834 struct ulp_rte_parser_params *params)
836 const struct rte_flow_item_tcp *tcp_spec = item->spec;
837 const struct rte_flow_item_tcp *tcp_mask = item->mask;
838 struct ulp_rte_hdr_field *field;
839 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
840 uint32_t idx = params->field_idx;
842 uint32_t inner_l4, outer_l4;
844 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
846 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
847 return BNXT_TF_RC_ERROR;
851 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
855 size = sizeof(tcp_spec->hdr.src_port);
856 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
857 &tcp_spec->hdr.src_port,
859 size = sizeof(tcp_spec->hdr.dst_port);
860 field = ulp_rte_parser_fld_copy(field,
861 &tcp_spec->hdr.dst_port,
863 size = sizeof(tcp_spec->hdr.sent_seq);
864 field = ulp_rte_parser_fld_copy(field,
865 &tcp_spec->hdr.sent_seq,
867 size = sizeof(tcp_spec->hdr.recv_ack);
868 field = ulp_rte_parser_fld_copy(field,
869 &tcp_spec->hdr.recv_ack,
871 size = sizeof(tcp_spec->hdr.data_off);
872 field = ulp_rte_parser_fld_copy(field,
873 &tcp_spec->hdr.data_off,
875 size = sizeof(tcp_spec->hdr.tcp_flags);
876 field = ulp_rte_parser_fld_copy(field,
877 &tcp_spec->hdr.tcp_flags,
879 size = sizeof(tcp_spec->hdr.rx_win);
880 field = ulp_rte_parser_fld_copy(field,
881 &tcp_spec->hdr.rx_win,
883 size = sizeof(tcp_spec->hdr.cksum);
884 field = ulp_rte_parser_fld_copy(field,
885 &tcp_spec->hdr.cksum,
887 size = sizeof(tcp_spec->hdr.tcp_urp);
888 field = ulp_rte_parser_fld_copy(field,
889 &tcp_spec->hdr.tcp_urp,
892 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
896 ulp_rte_prsr_mask_copy(params, &idx,
897 &tcp_mask->hdr.src_port,
898 sizeof(tcp_mask->hdr.src_port));
899 ulp_rte_prsr_mask_copy(params, &idx,
900 &tcp_mask->hdr.dst_port,
901 sizeof(tcp_mask->hdr.dst_port));
902 ulp_rte_prsr_mask_copy(params, &idx,
903 &tcp_mask->hdr.sent_seq,
904 sizeof(tcp_mask->hdr.sent_seq));
905 ulp_rte_prsr_mask_copy(params, &idx,
906 &tcp_mask->hdr.recv_ack,
907 sizeof(tcp_mask->hdr.recv_ack));
908 ulp_rte_prsr_mask_copy(params, &idx,
909 &tcp_mask->hdr.data_off,
910 sizeof(tcp_mask->hdr.data_off));
911 ulp_rte_prsr_mask_copy(params, &idx,
912 &tcp_mask->hdr.tcp_flags,
913 sizeof(tcp_mask->hdr.tcp_flags));
914 ulp_rte_prsr_mask_copy(params, &idx,
915 &tcp_mask->hdr.rx_win,
916 sizeof(tcp_mask->hdr.rx_win));
917 ulp_rte_prsr_mask_copy(params, &idx,
918 &tcp_mask->hdr.cksum,
919 sizeof(tcp_mask->hdr.cksum));
920 ulp_rte_prsr_mask_copy(params, &idx,
921 &tcp_mask->hdr.tcp_urp,
922 sizeof(tcp_mask->hdr.tcp_urp));
924 /* add number of TCP header elements */
925 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
927 /* Set the udp header bitmap and computed l4 header bitmaps */
928 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
930 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
931 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
932 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
933 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
935 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
936 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
938 return BNXT_TF_RC_SUCCESS;
941 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
943 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
944 struct ulp_rte_parser_params *params)
946 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
947 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
948 struct ulp_rte_hdr_field *field;
949 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
950 uint32_t idx = params->field_idx;
954 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
958 size = sizeof(vxlan_spec->flags);
959 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
962 size = sizeof(vxlan_spec->rsvd0);
963 field = ulp_rte_parser_fld_copy(field,
966 size = sizeof(vxlan_spec->vni);
967 field = ulp_rte_parser_fld_copy(field,
970 size = sizeof(vxlan_spec->rsvd1);
971 field = ulp_rte_parser_fld_copy(field,
976 ulp_rte_prsr_mask_copy(params, &idx,
978 sizeof(vxlan_mask->flags));
979 ulp_rte_prsr_mask_copy(params, &idx,
981 sizeof(vxlan_mask->rsvd0));
982 ulp_rte_prsr_mask_copy(params, &idx,
984 sizeof(vxlan_mask->vni));
985 ulp_rte_prsr_mask_copy(params, &idx,
987 sizeof(vxlan_mask->rsvd1));
989 /* Add number of vxlan header elements */
990 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
992 /* Update the hdr_bitmap with vxlan */
993 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
994 return BNXT_TF_RC_SUCCESS;
997 /* Function to handle the parsing of RTE Flow item void Header */
999 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1000 struct ulp_rte_parser_params *params __rte_unused)
1002 return BNXT_TF_RC_SUCCESS;
1005 /* Function to handle the parsing of RTE Flow action void Header. */
1007 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1008 struct ulp_rte_parser_params *params __rte_unused)
1010 return BNXT_TF_RC_SUCCESS;
1013 /* Function to handle the parsing of RTE Flow action Mark Header. */
1015 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1016 struct ulp_rte_parser_params *param)
1018 const struct rte_flow_action_mark *mark;
1019 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1022 mark = action_item->conf;
1024 mark_id = tfp_cpu_to_be_32(mark->id);
1025 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1026 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1028 /* Update the hdr_bitmap with vxlan */
1029 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1030 return BNXT_TF_RC_SUCCESS;
1032 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1033 return BNXT_TF_RC_ERROR;
1036 /* Function to handle the parsing of RTE Flow action RSS Header. */
1038 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1039 struct ulp_rte_parser_params *param)
1041 const struct rte_flow_action_rss *rss = action_item->conf;
1044 /* Update the hdr_bitmap with vxlan */
1045 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1046 return BNXT_TF_RC_SUCCESS;
1048 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1049 return BNXT_TF_RC_ERROR;
1052 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1054 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1055 struct ulp_rte_parser_params *params)
1057 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1058 const struct rte_flow_item *item;
1059 const struct rte_flow_item_eth *eth_spec;
1060 const struct rte_flow_item_ipv4 *ipv4_spec;
1061 const struct rte_flow_item_ipv6 *ipv6_spec;
1062 struct rte_flow_item_vxlan vxlan_spec;
1063 uint32_t vlan_num = 0, vlan_size = 0;
1064 uint32_t ip_size = 0, ip_type = 0;
1065 uint32_t vxlan_size = 0;
1067 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1068 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1070 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1071 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1073 vxlan_encap = action_item->conf;
1075 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1076 return BNXT_TF_RC_ERROR;
1079 item = vxlan_encap->definition;
1081 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1082 return BNXT_TF_RC_ERROR;
1085 if (!ulp_rte_item_skip_void(&item, 0))
1086 return BNXT_TF_RC_ERROR;
1088 /* must have ethernet header */
1089 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1090 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1091 return BNXT_TF_RC_ERROR;
1093 eth_spec = item->spec;
1094 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1095 ulp_encap_buffer_copy(buff,
1096 eth_spec->dst.addr_bytes,
1097 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1099 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1100 ulp_encap_buffer_copy(buff,
1101 eth_spec->src.addr_bytes,
1102 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1104 /* Goto the next item */
1105 if (!ulp_rte_item_skip_void(&item, 1))
1106 return BNXT_TF_RC_ERROR;
1108 /* May have vlan header */
1109 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1111 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1112 ulp_encap_buffer_copy(buff,
1114 sizeof(struct rte_flow_item_vlan));
1116 if (!ulp_rte_item_skip_void(&item, 1))
1117 return BNXT_TF_RC_ERROR;
1120 /* may have two vlan headers */
1121 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1123 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1124 sizeof(struct rte_flow_item_vlan)],
1126 sizeof(struct rte_flow_item_vlan));
1127 if (!ulp_rte_item_skip_void(&item, 1))
1128 return BNXT_TF_RC_ERROR;
1130 /* Update the vlan count and size of more than one */
1132 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1133 vlan_num = tfp_cpu_to_be_32(vlan_num);
1134 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1137 vlan_size = tfp_cpu_to_be_32(vlan_size);
1138 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1143 /* L3 must be IPv4, IPv6 */
1144 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1145 ipv4_spec = item->spec;
1146 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1148 /* copy the ipv4 details */
1149 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1150 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1151 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1152 ulp_encap_buffer_copy(buff,
1154 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1155 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1157 const uint8_t *tmp_buff;
1159 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1160 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1161 ulp_encap_buffer_copy(buff,
1163 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1164 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1165 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1166 ulp_encap_buffer_copy(buff,
1167 &ipv4_spec->hdr.version_ihl,
1168 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1170 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1171 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1172 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1173 ulp_encap_buffer_copy(buff,
1174 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1175 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1177 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1178 ulp_encap_buffer_copy(buff,
1179 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1180 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1182 /* Update the ip size details */
1183 ip_size = tfp_cpu_to_be_32(ip_size);
1184 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1185 &ip_size, sizeof(uint32_t));
1187 /* update the ip type */
1188 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1189 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1190 &ip_type, sizeof(uint32_t));
1192 /* update the computed field to notify it is ipv4 header */
1193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1196 if (!ulp_rte_item_skip_void(&item, 1))
1197 return BNXT_TF_RC_ERROR;
1198 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1199 ipv6_spec = item->spec;
1200 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1202 /* copy the ipv4 details */
1203 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1204 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1206 /* Update the ip size details */
1207 ip_size = tfp_cpu_to_be_32(ip_size);
1208 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1209 &ip_size, sizeof(uint32_t));
1211 /* update the ip type */
1212 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1213 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1214 &ip_type, sizeof(uint32_t));
1216 /* update the computed field to notify it is ipv6 header */
1217 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1220 if (!ulp_rte_item_skip_void(&item, 1))
1221 return BNXT_TF_RC_ERROR;
1223 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1224 return BNXT_TF_RC_ERROR;
1228 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1229 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1230 return BNXT_TF_RC_ERROR;
1232 /* copy the udp details */
1233 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1234 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1236 if (!ulp_rte_item_skip_void(&item, 1))
1237 return BNXT_TF_RC_ERROR;
1240 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1241 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1242 return BNXT_TF_RC_ERROR;
1244 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1245 /* copy the vxlan details */
1246 memcpy(&vxlan_spec, item->spec, vxlan_size);
1247 vxlan_spec.flags = 0x08;
1248 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1249 (const uint8_t *)&vxlan_spec,
1251 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1252 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1253 &vxlan_size, sizeof(uint32_t));
1255 /*update the hdr_bitmap with vxlan */
1256 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1257 return BNXT_TF_RC_SUCCESS;
1260 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1262 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1264 struct ulp_rte_parser_params *params)
1266 /* update the hdr_bitmap with vxlan */
1267 ULP_BITMAP_SET(params->act_bitmap.bits,
1268 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1269 return BNXT_TF_RC_SUCCESS;
1272 /* Function to handle the parsing of RTE Flow action drop Header. */
1274 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1275 struct ulp_rte_parser_params *params)
1277 /* Update the hdr_bitmap with drop */
1278 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1279 return BNXT_TF_RC_SUCCESS;
1282 /* Function to handle the parsing of RTE Flow action count. */
1284 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1285 struct ulp_rte_parser_params *params)
1288 const struct rte_flow_action_count *act_count;
1289 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1291 act_count = action_item->conf;
1293 if (act_count->shared) {
1295 "Parse Error:Shared count not supported\n");
1296 return BNXT_TF_RC_PARSE_ERR;
1298 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1300 BNXT_ULP_ACT_PROP_SZ_COUNT);
1303 /* Update the hdr_bitmap with count */
1304 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1305 return BNXT_TF_RC_SUCCESS;
1308 /* Function to handle the parsing of RTE Flow action PF. */
1310 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1311 struct ulp_rte_parser_params *params)
1313 uint32_t port_id, pid;
1316 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1318 /* Get the port id of the current device */
1319 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1321 /* Get the port db ifindex */
1322 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1324 BNXT_TF_DBG(ERR, "Invalid port id\n");
1325 return BNXT_TF_RC_ERROR;
1328 /* Check the port is PF port */
1329 if (ulp_port_db_port_type_get(params->ulp_ctx,
1330 ifindex) != BNXT_ULP_INTF_TYPE_PF) {
1331 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1332 return BNXT_TF_RC_ERROR;
1335 if (params->dir == ULP_DIR_EGRESS) {
1336 /* For egress direction, fill vport */
1337 if (ulp_port_db_vport_get(params->ulp_ctx, ifindex, &pid_s))
1338 return BNXT_TF_RC_ERROR;
1340 pid = rte_cpu_to_be_32(pid);
1341 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1342 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1344 /* For ingress direction, fill vnic */
1345 if (ulp_port_db_default_vnic_get(params->ulp_ctx, ifindex,
1346 BNXT_ULP_DRV_FUNC_VNIC,
1348 return BNXT_TF_RC_ERROR;
1350 pid = rte_cpu_to_be_32(pid);
1351 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1352 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1355 /*Update the action port set bit */
1356 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1357 return BNXT_TF_RC_SUCCESS;
1360 /* Function to handle the parsing of RTE Flow action VF. */
1362 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1363 struct ulp_rte_parser_params *params)
1365 const struct rte_flow_action_vf *vf_action;
1369 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1370 enum bnxt_ulp_intf_type intf_type;
1372 vf_action = action_item->conf;
1374 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1375 return BNXT_TF_RC_PARSE_ERR;
1378 if (vf_action->original) {
1379 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1380 return BNXT_TF_RC_PARSE_ERR;
1383 /* Check the port is VF port */
1384 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1386 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1387 return BNXT_TF_RC_ERROR;
1389 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1390 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1391 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1392 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1393 return BNXT_TF_RC_ERROR;
1396 if (params->dir == ULP_DIR_EGRESS) {
1397 /* For egress direction, fill vport */
1398 if (ulp_port_db_vport_get(params->ulp_ctx, ifindex, &pid_s))
1399 return BNXT_TF_RC_ERROR;
1401 pid = rte_cpu_to_be_32(pid);
1402 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1403 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1405 /* For ingress direction, fill vnic */
1406 if (ulp_port_db_default_vnic_get(params->ulp_ctx, ifindex,
1407 BNXT_ULP_DRV_FUNC_VNIC,
1409 return BNXT_TF_RC_ERROR;
1411 pid = rte_cpu_to_be_32(pid);
1412 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1413 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1416 /*Update the action port set bit */
1417 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1418 return BNXT_TF_RC_SUCCESS;
1421 /* Function to handle the parsing of RTE Flow action port_id. */
1423 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1424 struct ulp_rte_parser_params *param)
1426 const struct rte_flow_action_port_id *port_id;
1427 struct ulp_rte_act_prop *act;
1433 port_id = act_item->conf;
1436 "ParseErr: Invalid Argument\n");
1437 return BNXT_TF_RC_PARSE_ERR;
1439 if (port_id->original) {
1441 "ParseErr:Portid Original not supported\n");
1442 return BNXT_TF_RC_PARSE_ERR;
1445 /* Get the port db ifindex */
1446 rc = ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx,
1450 BNXT_TF_DBG(ERR, "Invalid port id\n");
1451 return BNXT_TF_RC_ERROR;
1454 act = ¶m->act_prop;
1455 if (param->dir == ULP_DIR_EGRESS) {
1456 rc = ulp_port_db_vport_get(param->ulp_ctx,
1459 return BNXT_TF_RC_ERROR;
1462 pid = rte_cpu_to_be_32(pid);
1463 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1464 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1466 rc = ulp_port_db_default_vnic_get(param->ulp_ctx,
1468 BNXT_ULP_DRV_FUNC_VNIC,
1471 return BNXT_TF_RC_ERROR;
1474 pid = rte_cpu_to_be_32(pid);
1475 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1476 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1479 /*Update the action port set bit */
1480 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1481 return BNXT_TF_RC_SUCCESS;
1484 /* Function to handle the parsing of RTE Flow action phy_port. */
1486 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1487 struct ulp_rte_parser_params *prm)
1489 const struct rte_flow_action_phy_port *phy_port;
1494 phy_port = action_item->conf;
1497 "ParseErr: Invalid Argument\n");
1498 return BNXT_TF_RC_PARSE_ERR;
1501 if (phy_port->original) {
1503 "Parse Err:Port Original not supported\n");
1504 return BNXT_TF_RC_PARSE_ERR;
1506 if (prm->dir != ULP_DIR_EGRESS) {
1508 "Parse Err:Phy ports are valid only for egress\n");
1509 return BNXT_TF_RC_PARSE_ERR;
1511 /* Get the physical port details from port db */
1512 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1515 BNXT_TF_DBG(DEBUG, "Failed to get port details\n");
1520 pid = rte_cpu_to_be_32(pid);
1521 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1522 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1524 /*Update the action port set bit */
1525 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1526 return BNXT_TF_RC_SUCCESS;
1529 /* Function to handle the parsing of RTE Flow action pop vlan. */
1531 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1532 struct ulp_rte_parser_params *params)
1534 /* Update the act_bitmap with pop */
1535 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1536 return BNXT_TF_RC_SUCCESS;
1539 /* Function to handle the parsing of RTE Flow action push vlan. */
1541 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1542 struct ulp_rte_parser_params *params)
1544 const struct rte_flow_action_of_push_vlan *push_vlan;
1546 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1548 push_vlan = action_item->conf;
1550 ethertype = push_vlan->ethertype;
1551 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1553 "Parse Err: Ethertype not supported\n");
1554 return BNXT_TF_RC_PARSE_ERR;
1556 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1557 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1558 /* Update the hdr_bitmap with push vlan */
1559 ULP_BITMAP_SET(params->act_bitmap.bits,
1560 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1561 return BNXT_TF_RC_SUCCESS;
1563 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1564 return BNXT_TF_RC_ERROR;
1567 /* Function to handle the parsing of RTE Flow action set vlan id. */
1569 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1570 struct ulp_rte_parser_params *params)
1572 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1574 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1576 vlan_vid = action_item->conf;
1577 if (vlan_vid && vlan_vid->vlan_vid) {
1578 vid = vlan_vid->vlan_vid;
1579 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1580 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1581 /* Update the hdr_bitmap with vlan vid */
1582 ULP_BITMAP_SET(params->act_bitmap.bits,
1583 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1584 return BNXT_TF_RC_SUCCESS;
1586 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1587 return BNXT_TF_RC_ERROR;
1590 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1592 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1593 struct ulp_rte_parser_params *params)
1595 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1597 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1599 vlan_pcp = action_item->conf;
1601 pcp = vlan_pcp->vlan_pcp;
1602 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1603 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1604 /* Update the hdr_bitmap with vlan vid */
1605 ULP_BITMAP_SET(params->act_bitmap.bits,
1606 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1607 return BNXT_TF_RC_SUCCESS;
1609 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1610 return BNXT_TF_RC_ERROR;