1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
13 #include "ulp_port_db.h"
15 /* Utility function to skip the void items. */
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 /* Utility function to update the field_bitmap */
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
35 struct ulp_rte_hdr_field *field;
37 field = ¶ms->hdr_field[idx];
38 if (ulp_bitmap_notzero(field->mask, field->size)) {
39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
41 if (!ulp_bitmap_is_ones(field->mask, field->size))
42 ULP_BITMAP_SET(params->fld_bitmap.bits,
43 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
56 memcpy(field->spec, buffer, field->size);
61 /* Utility function to copy field masks items */
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
70 memcpy(field->mask, buffer, size);
71 ulp_rte_parser_field_bitmap_update(params, *idx);
76 * Function to handle the parsing of RTE Flows and placing
77 * the RTE flow items into the ulp structures.
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81 struct ulp_rte_parser_params *params)
83 const struct rte_flow_item *item = pattern;
84 struct bnxt_ulp_rte_hdr_info *hdr_info;
86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87 if (params->dir == ULP_DIR_EGRESS)
88 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
91 /* Set the computed flags for no vlan tags before parsing */
92 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
93 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
95 /* Parse all the items in the pattern */
96 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
97 /* get the header information from the flow_hdr_info table */
98 hdr_info = &ulp_hdr_info[item->type];
99 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
101 "Truflow parser does not support type %d\n",
103 return BNXT_TF_RC_PARSE_ERR;
104 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
105 /* call the registered callback handler */
106 if (hdr_info->proto_hdr_func) {
107 if (hdr_info->proto_hdr_func(item, params) !=
108 BNXT_TF_RC_SUCCESS) {
109 return BNXT_TF_RC_ERROR;
115 /* update the implied SVIF */
116 (void)ulp_rte_parser_svif_process(params);
117 return BNXT_TF_RC_SUCCESS;
121 * Function to handle the parsing of RTE Flows and placing
122 * the RTE flow actions into the ulp structures.
125 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
126 struct ulp_rte_parser_params *params)
128 const struct rte_flow_action *action_item = actions;
129 struct bnxt_ulp_rte_act_info *hdr_info;
131 if (params->dir == ULP_DIR_EGRESS)
132 ULP_BITMAP_SET(params->act_bitmap.bits,
133 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
135 /* Parse all the items in the pattern */
136 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
137 /* get the header information from the flow_hdr_info table */
138 hdr_info = &ulp_act_info[action_item->type];
139 if (hdr_info->act_type ==
140 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
142 "Truflow parser does not support act %u\n",
144 return BNXT_TF_RC_ERROR;
145 } else if (hdr_info->act_type ==
146 BNXT_ULP_ACT_TYPE_SUPPORTED) {
147 /* call the registered callback handler */
148 if (hdr_info->proto_act_func) {
149 if (hdr_info->proto_act_func(action_item,
151 BNXT_TF_RC_SUCCESS) {
152 return BNXT_TF_RC_ERROR;
158 /* update the implied VNIC */
159 ulp_rte_parser_vnic_process(params);
160 return BNXT_TF_RC_SUCCESS;
163 /* Function to handle the parsing of RTE Flow item PF Header. */
165 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
166 enum rte_flow_item_type proto,
170 uint16_t port_id = svif;
172 struct ulp_rte_hdr_field *hdr_field;
173 enum bnxt_ulp_svif_type svif_type;
174 enum bnxt_ulp_intf_type if_type;
178 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
179 BNXT_ULP_INVALID_SVIF_VAL) {
181 "SVIF already set,multiple source not support'd\n");
182 return BNXT_TF_RC_ERROR;
185 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
186 dir = ULP_COMP_FLD_IDX_RD(params,
187 BNXT_ULP_CF_IDX_DIRECTION);
188 /* perform the conversion from dpdk port to bnxt svif */
189 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
193 "Invalid port id\n");
194 return BNXT_TF_RC_ERROR;
197 if (dir == ULP_DIR_INGRESS) {
198 svif_type = BNXT_ULP_PHY_PORT_SVIF;
200 if_type = bnxt_get_interface_type(port_id);
201 if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
202 svif_type = BNXT_ULP_VF_FUNC_SVIF;
204 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
206 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
208 svif = rte_cpu_to_be_16(svif);
210 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
211 memcpy(hdr_field->spec, &svif, sizeof(svif));
212 memcpy(hdr_field->mask, &mask, sizeof(mask));
213 hdr_field->size = sizeof(svif);
214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
215 rte_be_to_cpu_16(svif));
216 return BNXT_TF_RC_SUCCESS;
219 /* Function to handle the parsing of the RTE port id */
221 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
223 uint16_t port_id = 0;
224 uint16_t svif_mask = 0xFFFF;
226 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
227 BNXT_ULP_INVALID_SVIF_VAL)
228 return BNXT_TF_RC_SUCCESS;
230 /* SVIF not set. So get the port id */
231 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
233 /* Update the SVIF details */
234 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
238 /* Function to handle the implicit VNIC RTE port id */
240 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
242 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
244 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
245 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) {
247 * Reset the vnic/vport action bitmaps
248 * it is not required for match
250 ULP_BITMAP_RESET(params->act_bitmap.bits,
251 BNXT_ULP_ACTION_BIT_VNIC);
252 ULP_BITMAP_RESET(params->act_bitmap.bits,
253 BNXT_ULP_ACTION_BIT_VPORT);
254 return BNXT_TF_RC_SUCCESS;
257 /* Update the vnic details */
258 ulp_rte_pf_act_handler(NULL, params);
259 /* Reset the hdr_bitmap with vnic bit */
260 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
262 return BNXT_TF_RC_SUCCESS;
265 /* Function to handle the parsing of RTE Flow item PF Header. */
267 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
268 struct ulp_rte_parser_params *params)
270 uint16_t port_id = 0;
271 uint16_t svif_mask = 0xFFFF;
273 /* Get the port id */
274 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
276 /* Update the SVIF details */
277 return ulp_rte_parser_svif_set(params,
282 /* Function to handle the parsing of RTE Flow item VF Header. */
284 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
285 struct ulp_rte_parser_params *params)
287 const struct rte_flow_item_vf *vf_spec = item->spec;
288 const struct rte_flow_item_vf *vf_mask = item->mask;
289 uint16_t svif = 0, mask = 0;
291 /* Get VF rte_flow_item for Port details */
293 svif = (uint16_t)vf_spec->id;
295 mask = (uint16_t)vf_mask->id;
297 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
300 /* Function to handle the parsing of RTE Flow item port id Header. */
302 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
303 struct ulp_rte_parser_params *params)
305 const struct rte_flow_item_port_id *port_spec = item->spec;
306 const struct rte_flow_item_port_id *port_mask = item->mask;
307 uint16_t svif = 0, mask = 0;
310 * Copy the rte_flow_item for Port into hdr_field using port id
314 svif = (uint16_t)port_spec->id;
315 if (svif >= RTE_MAX_ETHPORTS) {
316 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
317 return BNXT_TF_RC_PARSE_ERR;
321 mask = (uint16_t)port_mask->id;
323 /* Update the SVIF details */
324 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
327 /* Function to handle the parsing of RTE Flow item phy port Header. */
329 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
330 struct ulp_rte_parser_params *params)
332 const struct rte_flow_item_phy_port *port_spec = item->spec;
333 const struct rte_flow_item_phy_port *port_mask = item->mask;
334 uint32_t svif = 0, mask = 0;
335 struct bnxt_ulp_device_params *dparms;
338 /* Copy the rte_flow_item for phy port into hdr_field */
340 svif = port_spec->index;
342 mask = port_mask->index;
344 if (bnxt_ulp_cntxt_dev_id_get(params->ulp_ctx, &dev_id)) {
345 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
349 dparms = bnxt_ulp_device_params_get(dev_id);
351 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
355 if (svif > dparms->num_phy_ports) {
356 BNXT_TF_DBG(ERR, "ParseErr:Phy Port is not valid\n");
357 return BNXT_TF_RC_PARSE_ERR;
360 /* Update the SVIF details */
361 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
364 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
366 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
367 struct ulp_rte_parser_params *params)
369 const struct rte_flow_item_eth *eth_spec = item->spec;
370 const struct rte_flow_item_eth *eth_mask = item->mask;
371 struct ulp_rte_hdr_field *field;
372 uint32_t idx = params->field_idx;
373 uint64_t set_flag = 0;
377 * Copy the rte_flow_item for eth into hdr_field using ethernet
381 size = sizeof(eth_spec->dst.addr_bytes);
382 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
383 eth_spec->dst.addr_bytes,
385 size = sizeof(eth_spec->src.addr_bytes);
386 field = ulp_rte_parser_fld_copy(field,
387 eth_spec->src.addr_bytes,
389 field = ulp_rte_parser_fld_copy(field,
391 sizeof(eth_spec->type));
394 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
395 sizeof(eth_mask->dst.addr_bytes));
396 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
397 sizeof(eth_mask->src.addr_bytes));
398 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
399 sizeof(eth_mask->type));
401 /* Add number of vlan header elements */
402 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
403 params->vlan_idx = params->field_idx;
404 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
406 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
407 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
408 BNXT_ULP_HDR_BIT_O_ETH);
410 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
412 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
413 BNXT_ULP_HDR_BIT_I_ETH);
415 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
416 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
418 return BNXT_TF_RC_SUCCESS;
421 /* Function to handle the parsing of RTE Flow item Vlan Header. */
423 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
424 struct ulp_rte_parser_params *params)
426 const struct rte_flow_item_vlan *vlan_spec = item->spec;
427 const struct rte_flow_item_vlan *vlan_mask = item->mask;
428 struct ulp_rte_hdr_field *field;
429 struct ulp_rte_hdr_bitmap *hdr_bit;
430 uint32_t idx = params->vlan_idx;
431 uint16_t vlan_tag, priority;
432 uint32_t outer_vtag_num;
433 uint32_t inner_vtag_num;
436 * Copy the rte_flow_item for vlan into hdr_field using Vlan
440 vlan_tag = ntohs(vlan_spec->tci);
441 priority = htons(vlan_tag >> 13);
443 vlan_tag = htons(vlan_tag);
445 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
448 field = ulp_rte_parser_fld_copy(field,
451 field = ulp_rte_parser_fld_copy(field,
452 &vlan_spec->inner_type,
453 sizeof(vlan_spec->inner_type));
457 vlan_tag = ntohs(vlan_mask->tci);
458 priority = htons(vlan_tag >> 13);
460 vlan_tag = htons(vlan_tag);
462 field = ¶ms->hdr_field[idx];
463 memcpy(field->mask, &priority, field->size);
465 memcpy(field->mask, &vlan_tag, field->size);
467 memcpy(field->mask, &vlan_mask->inner_type, field->size);
469 /* Set the vlan index to new incremented value */
470 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
472 /* Get the outer tag and inner tag counts */
473 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
474 BNXT_ULP_CF_IDX_O_VTAG_NUM);
475 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
476 BNXT_ULP_CF_IDX_I_VTAG_NUM);
478 /* Update the hdr_bitmap of the vlans */
479 hdr_bit = ¶ms->hdr_bitmap;
480 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
481 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
483 /* Update the vlan tag num */
485 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
487 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
488 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
489 ULP_BITMAP_SET(params->hdr_bitmap.bits,
490 BNXT_ULP_HDR_BIT_OO_VLAN);
491 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
492 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
493 outer_vtag_num == 1) {
494 /* update the vlan tag num */
496 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
498 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
499 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
500 ULP_BITMAP_SET(params->hdr_bitmap.bits,
501 BNXT_ULP_HDR_BIT_OI_VLAN);
502 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
503 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
505 /* update the vlan tag num */
507 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
509 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
510 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
511 ULP_BITMAP_SET(params->hdr_bitmap.bits,
512 BNXT_ULP_HDR_BIT_IO_VLAN);
513 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
514 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
515 inner_vtag_num == 1) {
516 /* update the vlan tag num */
518 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
520 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
521 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
522 ULP_BITMAP_SET(params->hdr_bitmap.bits,
523 BNXT_ULP_HDR_BIT_II_VLAN);
525 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
526 return BNXT_TF_RC_ERROR;
528 return BNXT_TF_RC_SUCCESS;
531 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
533 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
534 struct ulp_rte_parser_params *params)
536 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
537 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
538 struct ulp_rte_hdr_field *field;
539 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
540 uint32_t idx = params->field_idx;
542 uint32_t inner_l3, outer_l3;
544 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
546 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
547 return BNXT_TF_RC_ERROR;
551 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
555 size = sizeof(ipv4_spec->hdr.version_ihl);
556 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
557 &ipv4_spec->hdr.version_ihl,
559 size = sizeof(ipv4_spec->hdr.type_of_service);
560 field = ulp_rte_parser_fld_copy(field,
561 &ipv4_spec->hdr.type_of_service,
563 size = sizeof(ipv4_spec->hdr.total_length);
564 field = ulp_rte_parser_fld_copy(field,
565 &ipv4_spec->hdr.total_length,
567 size = sizeof(ipv4_spec->hdr.packet_id);
568 field = ulp_rte_parser_fld_copy(field,
569 &ipv4_spec->hdr.packet_id,
571 size = sizeof(ipv4_spec->hdr.fragment_offset);
572 field = ulp_rte_parser_fld_copy(field,
573 &ipv4_spec->hdr.fragment_offset,
575 size = sizeof(ipv4_spec->hdr.time_to_live);
576 field = ulp_rte_parser_fld_copy(field,
577 &ipv4_spec->hdr.time_to_live,
579 size = sizeof(ipv4_spec->hdr.next_proto_id);
580 field = ulp_rte_parser_fld_copy(field,
581 &ipv4_spec->hdr.next_proto_id,
583 size = sizeof(ipv4_spec->hdr.hdr_checksum);
584 field = ulp_rte_parser_fld_copy(field,
585 &ipv4_spec->hdr.hdr_checksum,
587 size = sizeof(ipv4_spec->hdr.src_addr);
588 field = ulp_rte_parser_fld_copy(field,
589 &ipv4_spec->hdr.src_addr,
591 size = sizeof(ipv4_spec->hdr.dst_addr);
592 field = ulp_rte_parser_fld_copy(field,
593 &ipv4_spec->hdr.dst_addr,
597 ulp_rte_prsr_mask_copy(params, &idx,
598 &ipv4_mask->hdr.version_ihl,
599 sizeof(ipv4_mask->hdr.version_ihl));
600 ulp_rte_prsr_mask_copy(params, &idx,
601 &ipv4_mask->hdr.type_of_service,
602 sizeof(ipv4_mask->hdr.type_of_service));
603 ulp_rte_prsr_mask_copy(params, &idx,
604 &ipv4_mask->hdr.total_length,
605 sizeof(ipv4_mask->hdr.total_length));
606 ulp_rte_prsr_mask_copy(params, &idx,
607 &ipv4_mask->hdr.packet_id,
608 sizeof(ipv4_mask->hdr.packet_id));
609 ulp_rte_prsr_mask_copy(params, &idx,
610 &ipv4_mask->hdr.fragment_offset,
611 sizeof(ipv4_mask->hdr.fragment_offset));
612 ulp_rte_prsr_mask_copy(params, &idx,
613 &ipv4_mask->hdr.time_to_live,
614 sizeof(ipv4_mask->hdr.time_to_live));
615 ulp_rte_prsr_mask_copy(params, &idx,
616 &ipv4_mask->hdr.next_proto_id,
617 sizeof(ipv4_mask->hdr.next_proto_id));
618 ulp_rte_prsr_mask_copy(params, &idx,
619 &ipv4_mask->hdr.hdr_checksum,
620 sizeof(ipv4_mask->hdr.hdr_checksum));
621 ulp_rte_prsr_mask_copy(params, &idx,
622 &ipv4_mask->hdr.src_addr,
623 sizeof(ipv4_mask->hdr.src_addr));
624 ulp_rte_prsr_mask_copy(params, &idx,
625 &ipv4_mask->hdr.dst_addr,
626 sizeof(ipv4_mask->hdr.dst_addr));
628 /* Add the number of ipv4 header elements */
629 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
631 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
632 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
634 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
635 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
636 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
638 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
640 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
642 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
644 return BNXT_TF_RC_SUCCESS;
647 /* Function to handle the parsing of RTE Flow item IPV6 Header */
649 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
650 struct ulp_rte_parser_params *params)
652 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
653 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
654 struct ulp_rte_hdr_field *field;
655 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
656 uint32_t idx = params->field_idx;
658 uint32_t inner_l3, outer_l3;
659 uint32_t vtcf, vtcf_mask;
661 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
663 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
664 return BNXT_TF_RC_ERROR;
668 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
672 size = sizeof(ipv6_spec->hdr.vtc_flow);
674 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
675 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
679 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
680 field = ulp_rte_parser_fld_copy(field,
684 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
685 field = ulp_rte_parser_fld_copy(field,
689 size = sizeof(ipv6_spec->hdr.payload_len);
690 field = ulp_rte_parser_fld_copy(field,
691 &ipv6_spec->hdr.payload_len,
693 size = sizeof(ipv6_spec->hdr.proto);
694 field = ulp_rte_parser_fld_copy(field,
695 &ipv6_spec->hdr.proto,
697 size = sizeof(ipv6_spec->hdr.hop_limits);
698 field = ulp_rte_parser_fld_copy(field,
699 &ipv6_spec->hdr.hop_limits,
701 size = sizeof(ipv6_spec->hdr.src_addr);
702 field = ulp_rte_parser_fld_copy(field,
703 &ipv6_spec->hdr.src_addr,
705 size = sizeof(ipv6_spec->hdr.dst_addr);
706 field = ulp_rte_parser_fld_copy(field,
707 &ipv6_spec->hdr.dst_addr,
711 size = sizeof(ipv6_mask->hdr.vtc_flow);
713 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
714 ulp_rte_prsr_mask_copy(params, &idx,
718 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
719 ulp_rte_prsr_mask_copy(params, &idx,
724 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
725 ulp_rte_prsr_mask_copy(params, &idx,
729 ulp_rte_prsr_mask_copy(params, &idx,
730 &ipv6_mask->hdr.payload_len,
731 sizeof(ipv6_mask->hdr.payload_len));
732 ulp_rte_prsr_mask_copy(params, &idx,
733 &ipv6_mask->hdr.proto,
734 sizeof(ipv6_mask->hdr.proto));
735 ulp_rte_prsr_mask_copy(params, &idx,
736 &ipv6_mask->hdr.hop_limits,
737 sizeof(ipv6_mask->hdr.hop_limits));
738 ulp_rte_prsr_mask_copy(params, &idx,
739 &ipv6_mask->hdr.src_addr,
740 sizeof(ipv6_mask->hdr.src_addr));
741 ulp_rte_prsr_mask_copy(params, &idx,
742 &ipv6_mask->hdr.dst_addr,
743 sizeof(ipv6_mask->hdr.dst_addr));
745 /* add number of ipv6 header elements */
746 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
748 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
749 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
751 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
752 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
753 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
754 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
756 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
757 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
759 return BNXT_TF_RC_SUCCESS;
762 /* Function to handle the parsing of RTE Flow item UDP Header. */
764 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
765 struct ulp_rte_parser_params *params)
767 const struct rte_flow_item_udp *udp_spec = item->spec;
768 const struct rte_flow_item_udp *udp_mask = item->mask;
769 struct ulp_rte_hdr_field *field;
770 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
771 uint32_t idx = params->field_idx;
773 uint32_t inner_l4, outer_l4;
775 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
777 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
778 return BNXT_TF_RC_ERROR;
782 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
786 size = sizeof(udp_spec->hdr.src_port);
787 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
788 &udp_spec->hdr.src_port,
790 size = sizeof(udp_spec->hdr.dst_port);
791 field = ulp_rte_parser_fld_copy(field,
792 &udp_spec->hdr.dst_port,
794 size = sizeof(udp_spec->hdr.dgram_len);
795 field = ulp_rte_parser_fld_copy(field,
796 &udp_spec->hdr.dgram_len,
798 size = sizeof(udp_spec->hdr.dgram_cksum);
799 field = ulp_rte_parser_fld_copy(field,
800 &udp_spec->hdr.dgram_cksum,
804 ulp_rte_prsr_mask_copy(params, &idx,
805 &udp_mask->hdr.src_port,
806 sizeof(udp_mask->hdr.src_port));
807 ulp_rte_prsr_mask_copy(params, &idx,
808 &udp_mask->hdr.dst_port,
809 sizeof(udp_mask->hdr.dst_port));
810 ulp_rte_prsr_mask_copy(params, &idx,
811 &udp_mask->hdr.dgram_len,
812 sizeof(udp_mask->hdr.dgram_len));
813 ulp_rte_prsr_mask_copy(params, &idx,
814 &udp_mask->hdr.dgram_cksum,
815 sizeof(udp_mask->hdr.dgram_cksum));
818 /* Add number of UDP header elements */
819 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
821 /* Set the udp header bitmap and computed l4 header bitmaps */
822 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
824 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
825 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
826 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
827 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
829 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
830 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
832 return BNXT_TF_RC_SUCCESS;
835 /* Function to handle the parsing of RTE Flow item TCP Header. */
837 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
838 struct ulp_rte_parser_params *params)
840 const struct rte_flow_item_tcp *tcp_spec = item->spec;
841 const struct rte_flow_item_tcp *tcp_mask = item->mask;
842 struct ulp_rte_hdr_field *field;
843 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
844 uint32_t idx = params->field_idx;
846 uint32_t inner_l4, outer_l4;
848 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
850 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
851 return BNXT_TF_RC_ERROR;
855 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
859 size = sizeof(tcp_spec->hdr.src_port);
860 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
861 &tcp_spec->hdr.src_port,
863 size = sizeof(tcp_spec->hdr.dst_port);
864 field = ulp_rte_parser_fld_copy(field,
865 &tcp_spec->hdr.dst_port,
867 size = sizeof(tcp_spec->hdr.sent_seq);
868 field = ulp_rte_parser_fld_copy(field,
869 &tcp_spec->hdr.sent_seq,
871 size = sizeof(tcp_spec->hdr.recv_ack);
872 field = ulp_rte_parser_fld_copy(field,
873 &tcp_spec->hdr.recv_ack,
875 size = sizeof(tcp_spec->hdr.data_off);
876 field = ulp_rte_parser_fld_copy(field,
877 &tcp_spec->hdr.data_off,
879 size = sizeof(tcp_spec->hdr.tcp_flags);
880 field = ulp_rte_parser_fld_copy(field,
881 &tcp_spec->hdr.tcp_flags,
883 size = sizeof(tcp_spec->hdr.rx_win);
884 field = ulp_rte_parser_fld_copy(field,
885 &tcp_spec->hdr.rx_win,
887 size = sizeof(tcp_spec->hdr.cksum);
888 field = ulp_rte_parser_fld_copy(field,
889 &tcp_spec->hdr.cksum,
891 size = sizeof(tcp_spec->hdr.tcp_urp);
892 field = ulp_rte_parser_fld_copy(field,
893 &tcp_spec->hdr.tcp_urp,
896 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
900 ulp_rte_prsr_mask_copy(params, &idx,
901 &tcp_mask->hdr.src_port,
902 sizeof(tcp_mask->hdr.src_port));
903 ulp_rte_prsr_mask_copy(params, &idx,
904 &tcp_mask->hdr.dst_port,
905 sizeof(tcp_mask->hdr.dst_port));
906 ulp_rte_prsr_mask_copy(params, &idx,
907 &tcp_mask->hdr.sent_seq,
908 sizeof(tcp_mask->hdr.sent_seq));
909 ulp_rte_prsr_mask_copy(params, &idx,
910 &tcp_mask->hdr.recv_ack,
911 sizeof(tcp_mask->hdr.recv_ack));
912 ulp_rte_prsr_mask_copy(params, &idx,
913 &tcp_mask->hdr.data_off,
914 sizeof(tcp_mask->hdr.data_off));
915 ulp_rte_prsr_mask_copy(params, &idx,
916 &tcp_mask->hdr.tcp_flags,
917 sizeof(tcp_mask->hdr.tcp_flags));
918 ulp_rte_prsr_mask_copy(params, &idx,
919 &tcp_mask->hdr.rx_win,
920 sizeof(tcp_mask->hdr.rx_win));
921 ulp_rte_prsr_mask_copy(params, &idx,
922 &tcp_mask->hdr.cksum,
923 sizeof(tcp_mask->hdr.cksum));
924 ulp_rte_prsr_mask_copy(params, &idx,
925 &tcp_mask->hdr.tcp_urp,
926 sizeof(tcp_mask->hdr.tcp_urp));
928 /* add number of TCP header elements */
929 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
931 /* Set the udp header bitmap and computed l4 header bitmaps */
932 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
934 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
935 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
936 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
937 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
939 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
940 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
942 return BNXT_TF_RC_SUCCESS;
945 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
947 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
948 struct ulp_rte_parser_params *params)
950 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
951 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
952 struct ulp_rte_hdr_field *field;
953 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
954 uint32_t idx = params->field_idx;
958 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
962 size = sizeof(vxlan_spec->flags);
963 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
966 size = sizeof(vxlan_spec->rsvd0);
967 field = ulp_rte_parser_fld_copy(field,
970 size = sizeof(vxlan_spec->vni);
971 field = ulp_rte_parser_fld_copy(field,
974 size = sizeof(vxlan_spec->rsvd1);
975 field = ulp_rte_parser_fld_copy(field,
980 ulp_rte_prsr_mask_copy(params, &idx,
982 sizeof(vxlan_mask->flags));
983 ulp_rte_prsr_mask_copy(params, &idx,
985 sizeof(vxlan_mask->rsvd0));
986 ulp_rte_prsr_mask_copy(params, &idx,
988 sizeof(vxlan_mask->vni));
989 ulp_rte_prsr_mask_copy(params, &idx,
991 sizeof(vxlan_mask->rsvd1));
993 /* Add number of vxlan header elements */
994 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
996 /* Update the hdr_bitmap with vxlan */
997 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
998 return BNXT_TF_RC_SUCCESS;
1001 /* Function to handle the parsing of RTE Flow item void Header */
1003 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1004 struct ulp_rte_parser_params *params __rte_unused)
1006 return BNXT_TF_RC_SUCCESS;
1009 /* Function to handle the parsing of RTE Flow action void Header. */
1011 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1012 struct ulp_rte_parser_params *params __rte_unused)
1014 return BNXT_TF_RC_SUCCESS;
1017 /* Function to handle the parsing of RTE Flow action Mark Header. */
1019 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1020 struct ulp_rte_parser_params *param)
1022 const struct rte_flow_action_mark *mark;
1023 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1026 mark = action_item->conf;
1028 mark_id = tfp_cpu_to_be_32(mark->id);
1029 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1030 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1032 /* Update the hdr_bitmap with vxlan */
1033 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1034 return BNXT_TF_RC_SUCCESS;
1036 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1037 return BNXT_TF_RC_ERROR;
1040 /* Function to handle the parsing of RTE Flow action RSS Header. */
1042 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1043 struct ulp_rte_parser_params *param)
1045 const struct rte_flow_action_rss *rss = action_item->conf;
1048 /* Update the hdr_bitmap with vxlan */
1049 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1050 return BNXT_TF_RC_SUCCESS;
1052 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1053 return BNXT_TF_RC_ERROR;
1056 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1058 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1059 struct ulp_rte_parser_params *params)
1061 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1062 const struct rte_flow_item *item;
1063 const struct rte_flow_item_eth *eth_spec;
1064 const struct rte_flow_item_ipv4 *ipv4_spec;
1065 const struct rte_flow_item_ipv6 *ipv6_spec;
1066 struct rte_flow_item_vxlan vxlan_spec;
1067 uint32_t vlan_num = 0, vlan_size = 0;
1068 uint32_t ip_size = 0, ip_type = 0;
1069 uint32_t vxlan_size = 0;
1071 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1072 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1074 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1075 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1077 vxlan_encap = action_item->conf;
1079 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1080 return BNXT_TF_RC_ERROR;
1083 item = vxlan_encap->definition;
1085 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1086 return BNXT_TF_RC_ERROR;
1089 if (!ulp_rte_item_skip_void(&item, 0))
1090 return BNXT_TF_RC_ERROR;
1092 /* must have ethernet header */
1093 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1094 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1095 return BNXT_TF_RC_ERROR;
1097 eth_spec = item->spec;
1098 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1099 ulp_encap_buffer_copy(buff,
1100 eth_spec->dst.addr_bytes,
1101 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1103 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1104 ulp_encap_buffer_copy(buff,
1105 eth_spec->src.addr_bytes,
1106 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1108 /* Goto the next item */
1109 if (!ulp_rte_item_skip_void(&item, 1))
1110 return BNXT_TF_RC_ERROR;
1112 /* May have vlan header */
1113 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1115 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1116 ulp_encap_buffer_copy(buff,
1118 sizeof(struct rte_flow_item_vlan));
1120 if (!ulp_rte_item_skip_void(&item, 1))
1121 return BNXT_TF_RC_ERROR;
1124 /* may have two vlan headers */
1125 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1127 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1128 sizeof(struct rte_flow_item_vlan)],
1130 sizeof(struct rte_flow_item_vlan));
1131 if (!ulp_rte_item_skip_void(&item, 1))
1132 return BNXT_TF_RC_ERROR;
1134 /* Update the vlan count and size of more than one */
1136 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1137 vlan_num = tfp_cpu_to_be_32(vlan_num);
1138 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1141 vlan_size = tfp_cpu_to_be_32(vlan_size);
1142 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1147 /* L3 must be IPv4, IPv6 */
1148 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1149 ipv4_spec = item->spec;
1150 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1152 /* copy the ipv4 details */
1153 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1154 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1155 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1156 ulp_encap_buffer_copy(buff,
1158 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1159 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1161 const uint8_t *tmp_buff;
1163 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1164 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1165 ulp_encap_buffer_copy(buff,
1167 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1168 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1169 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1170 ulp_encap_buffer_copy(buff,
1171 &ipv4_spec->hdr.version_ihl,
1172 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1174 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1175 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1176 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1177 ulp_encap_buffer_copy(buff,
1178 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1179 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1181 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1182 ulp_encap_buffer_copy(buff,
1183 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1184 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1186 /* Update the ip size details */
1187 ip_size = tfp_cpu_to_be_32(ip_size);
1188 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1189 &ip_size, sizeof(uint32_t));
1191 /* update the ip type */
1192 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1193 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1194 &ip_type, sizeof(uint32_t));
1196 /* update the computed field to notify it is ipv4 header */
1197 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1200 if (!ulp_rte_item_skip_void(&item, 1))
1201 return BNXT_TF_RC_ERROR;
1202 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1203 ipv6_spec = item->spec;
1204 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1206 /* copy the ipv4 details */
1207 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1208 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1210 /* Update the ip size details */
1211 ip_size = tfp_cpu_to_be_32(ip_size);
1212 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1213 &ip_size, sizeof(uint32_t));
1215 /* update the ip type */
1216 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1217 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1218 &ip_type, sizeof(uint32_t));
1220 /* update the computed field to notify it is ipv6 header */
1221 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1224 if (!ulp_rte_item_skip_void(&item, 1))
1225 return BNXT_TF_RC_ERROR;
1227 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1228 return BNXT_TF_RC_ERROR;
1232 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1233 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1234 return BNXT_TF_RC_ERROR;
1236 /* copy the udp details */
1237 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1238 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1240 if (!ulp_rte_item_skip_void(&item, 1))
1241 return BNXT_TF_RC_ERROR;
1244 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1245 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1246 return BNXT_TF_RC_ERROR;
1248 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1249 /* copy the vxlan details */
1250 memcpy(&vxlan_spec, item->spec, vxlan_size);
1251 vxlan_spec.flags = 0x08;
1252 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1253 (const uint8_t *)&vxlan_spec,
1255 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1256 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1257 &vxlan_size, sizeof(uint32_t));
1259 /*update the hdr_bitmap with vxlan */
1260 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1261 return BNXT_TF_RC_SUCCESS;
1264 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1266 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1268 struct ulp_rte_parser_params *params)
1270 /* update the hdr_bitmap with vxlan */
1271 ULP_BITMAP_SET(params->act_bitmap.bits,
1272 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1273 return BNXT_TF_RC_SUCCESS;
1276 /* Function to handle the parsing of RTE Flow action drop Header. */
1278 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1279 struct ulp_rte_parser_params *params)
1281 /* Update the hdr_bitmap with drop */
1282 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1283 return BNXT_TF_RC_SUCCESS;
1286 /* Function to handle the parsing of RTE Flow action count. */
1288 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1289 struct ulp_rte_parser_params *params)
1292 const struct rte_flow_action_count *act_count;
1293 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1295 act_count = action_item->conf;
1297 if (act_count->shared) {
1299 "Parse Error:Shared count not supported\n");
1300 return BNXT_TF_RC_PARSE_ERR;
1302 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1304 BNXT_ULP_ACT_PROP_SZ_COUNT);
1307 /* Update the hdr_bitmap with count */
1308 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1309 return BNXT_TF_RC_SUCCESS;
1312 /* Function to handle the parsing of RTE Flow action PF. */
1314 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1315 struct ulp_rte_parser_params *params)
1319 /* Update the hdr_bitmap with vnic bit */
1320 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1322 /* copy the PF of the current device into VNIC Property */
1323 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1324 svif = bnxt_get_vnic_id(svif, BNXT_ULP_INTF_TYPE_INVALID);
1325 svif = rte_cpu_to_be_32(svif);
1326 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1327 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1329 return BNXT_TF_RC_SUCCESS;
1332 /* Function to handle the parsing of RTE Flow action VF. */
1334 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1335 struct ulp_rte_parser_params *param)
1337 const struct rte_flow_action_vf *vf_action;
1340 vf_action = action_item->conf;
1342 if (vf_action->original) {
1344 "Parse Error:VF Original not supported\n");
1345 return BNXT_TF_RC_PARSE_ERR;
1347 /* TBD: Update the computed VNIC using VF conversion */
1348 pid = bnxt_get_vnic_id(vf_action->id,
1349 BNXT_ULP_INTF_TYPE_INVALID);
1350 pid = rte_cpu_to_be_32(pid);
1351 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1352 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1355 /* Update the hdr_bitmap with count */
1356 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1357 return BNXT_TF_RC_SUCCESS;
1360 /* Function to handle the parsing of RTE Flow action port_id. */
1362 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1363 struct ulp_rte_parser_params *param)
1365 const struct rte_flow_action_port_id *port_id;
1366 struct ulp_rte_act_prop *act;
1372 port_id = act_item->conf;
1375 "ParseErr: Invalid Argument\n");
1376 return BNXT_TF_RC_PARSE_ERR;
1378 if (port_id->original) {
1380 "ParseErr:Portid Original not supported\n");
1381 return BNXT_TF_RC_PARSE_ERR;
1384 /* Get the port db ifindex */
1385 rc = ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx,
1389 BNXT_TF_DBG(ERR, "Invalid port id\n");
1390 return BNXT_TF_RC_ERROR;
1393 act = ¶m->act_prop;
1394 if (param->dir == ULP_DIR_EGRESS) {
1395 rc = ulp_port_db_vport_get(param->ulp_ctx,
1398 return BNXT_TF_RC_ERROR;
1401 pid = rte_cpu_to_be_32(pid);
1402 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1403 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1405 rc = ulp_port_db_default_vnic_get(param->ulp_ctx,
1407 BNXT_ULP_DRV_FUNC_VNIC,
1410 return BNXT_TF_RC_ERROR;
1413 pid = rte_cpu_to_be_32(pid);
1414 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1415 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1418 /*Update the hdr_bitmap with vnic */
1419 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1420 return BNXT_TF_RC_SUCCESS;
1423 /* Function to handle the parsing of RTE Flow action phy_port. */
1425 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1426 struct ulp_rte_parser_params *prm)
1428 const struct rte_flow_action_phy_port *phy_port;
1433 phy_port = action_item->conf;
1436 "ParseErr: Invalid Argument\n");
1437 return BNXT_TF_RC_PARSE_ERR;
1440 if (phy_port->original) {
1442 "Parse Err:Port Original not supported\n");
1443 return BNXT_TF_RC_PARSE_ERR;
1445 if (prm->dir != ULP_DIR_EGRESS) {
1447 "Parse Err:Phy ports are valid only for egress\n");
1448 return BNXT_TF_RC_PARSE_ERR;
1450 /* Get the physical port details from port db */
1451 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1454 BNXT_TF_DBG(DEBUG, "Failed to get port details\n");
1459 pid = rte_cpu_to_be_32(pid);
1460 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1461 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1463 /* update the hdr_bitmap with vport */
1464 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1465 return BNXT_TF_RC_SUCCESS;
1468 /* Function to handle the parsing of RTE Flow action pop vlan. */
1470 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1471 struct ulp_rte_parser_params *params)
1473 /* Update the act_bitmap with pop */
1474 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1475 return BNXT_TF_RC_SUCCESS;
1478 /* Function to handle the parsing of RTE Flow action push vlan. */
1480 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1481 struct ulp_rte_parser_params *params)
1483 const struct rte_flow_action_of_push_vlan *push_vlan;
1485 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1487 push_vlan = action_item->conf;
1489 ethertype = push_vlan->ethertype;
1490 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1492 "Parse Err: Ethertype not supported\n");
1493 return BNXT_TF_RC_PARSE_ERR;
1495 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1496 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1497 /* Update the hdr_bitmap with push vlan */
1498 ULP_BITMAP_SET(params->act_bitmap.bits,
1499 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1500 return BNXT_TF_RC_SUCCESS;
1502 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1503 return BNXT_TF_RC_ERROR;
1506 /* Function to handle the parsing of RTE Flow action set vlan id. */
1508 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1509 struct ulp_rte_parser_params *params)
1511 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1513 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1515 vlan_vid = action_item->conf;
1516 if (vlan_vid && vlan_vid->vlan_vid) {
1517 vid = vlan_vid->vlan_vid;
1518 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1519 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1520 /* Update the hdr_bitmap with vlan vid */
1521 ULP_BITMAP_SET(params->act_bitmap.bits,
1522 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1523 return BNXT_TF_RC_SUCCESS;
1525 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1526 return BNXT_TF_RC_ERROR;
1529 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1531 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1532 struct ulp_rte_parser_params *params)
1534 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1536 struct ulp_rte_act_prop *act = ¶ms->act_prop;
1538 vlan_pcp = action_item->conf;
1540 pcp = vlan_pcp->vlan_pcp;
1541 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1542 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1543 /* Update the hdr_bitmap with vlan vid */
1544 ULP_BITMAP_SET(params->act_bitmap.bits,
1545 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1546 return BNXT_TF_RC_SUCCESS;
1548 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1549 return BNXT_TF_RC_ERROR;