1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
20 memcpy(&temp_val, buffer, sizeof(uint32_t));
21 *val = rte_be_to_cpu_32(temp_val);
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
28 uint32_t temp_val = rte_cpu_to_be_32(val);
30 memcpy(buffer, &temp_val, sizeof(uint32_t));
33 /* Utility function to skip the void items. */
35 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
41 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
48 /* Utility function to copy field spec items */
49 static struct ulp_rte_hdr_field *
50 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
55 memcpy(field->spec, buffer, field->size);
60 /* Utility function to copy field masks items */
62 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
67 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
69 memcpy(field->mask, buffer, size);
74 * Function to handle the parsing of RTE Flows and placing
75 * the RTE flow items into the ulp structures.
78 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
79 struct ulp_rte_parser_params *params)
81 const struct rte_flow_item *item = pattern;
82 struct bnxt_ulp_rte_hdr_info *hdr_info;
84 params->field_idx = BNXT_ULP_HDR_FIELD_LAST;
85 /* Parse all the items in the pattern */
86 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
87 /* get the header information from the flow_hdr_info table */
88 hdr_info = &ulp_hdr_info[item->type];
89 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
91 "Truflow parser does not support type %d\n",
93 return BNXT_TF_RC_PARSE_ERR;
94 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
95 /* call the registered callback handler */
96 if (hdr_info->proto_hdr_func) {
97 if (hdr_info->proto_hdr_func(item, params) !=
99 return BNXT_TF_RC_ERROR;
105 /* update the implied SVIF */
106 (void)ulp_rte_parser_svif_process(params);
107 return BNXT_TF_RC_SUCCESS;
111 * Function to handle the parsing of RTE Flows and placing
112 * the RTE flow actions into the ulp structures.
115 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
116 struct ulp_rte_parser_params *params)
118 const struct rte_flow_action *action_item = actions;
119 struct bnxt_ulp_rte_act_info *hdr_info;
121 /* Parse all the items in the pattern */
122 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
123 /* get the header information from the flow_hdr_info table */
124 hdr_info = &ulp_act_info[action_item->type];
125 if (hdr_info->act_type ==
126 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
128 "Truflow parser does not support act %u\n",
130 return BNXT_TF_RC_ERROR;
131 } else if (hdr_info->act_type ==
132 BNXT_ULP_ACT_TYPE_SUPPORTED) {
133 /* call the registered callback handler */
134 if (hdr_info->proto_act_func) {
135 if (hdr_info->proto_act_func(action_item,
137 BNXT_TF_RC_SUCCESS) {
138 return BNXT_TF_RC_ERROR;
144 return BNXT_TF_RC_SUCCESS;
147 /* Function to handle the parsing of RTE Flow item PF Header. */
149 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
150 enum rte_flow_item_type proto,
154 uint16_t port_id = svif;
157 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) {
160 " multiple sources not supported\n");
161 return BNXT_TF_RC_ERROR;
164 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
165 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF);
167 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
168 /* perform the conversion from dpdk port to svif */
170 if (dir == ULP_DIR_EGRESS)
171 svif = bnxt_get_svif(port_id, true);
173 svif = bnxt_get_svif(port_id, false);
176 memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
177 &svif, sizeof(svif));
178 memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
179 &mask, sizeof(mask));
180 params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
181 return BNXT_TF_RC_SUCCESS;
184 /* Function to handle the parsing of the RTE port id
187 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
189 uint16_t port_id = 0;
191 uint16_t svif_mask = 0xFFFF;
193 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF))
194 return BNXT_TF_RC_SUCCESS;
196 /* SVIF not set. So get the port id and direction */
197 buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
198 memcpy(&port_id, buffer, sizeof(port_id));
199 memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE);
201 return ulp_rte_parser_svif_set(params,
202 RTE_FLOW_ITEM_TYPE_PORT_ID,
206 /* Function to handle the parsing of RTE Flow item PF Header. */
208 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
209 struct ulp_rte_parser_params *params)
211 uint16_t port_id = 0;
213 uint16_t svif_mask = 0xFFFF;
215 buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
216 memcpy(&port_id, buffer, sizeof(port_id));
217 memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE);
219 return ulp_rte_parser_svif_set(params,
224 /* Function to handle the parsing of RTE Flow item VF Header. */
226 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
227 struct ulp_rte_parser_params *params)
229 const struct rte_flow_item_vf *vf_spec = item->spec;
230 const struct rte_flow_item_vf *vf_mask = item->mask;
231 uint16_t svif = 0, mask = 0;
233 /* Get VF rte_flow_item for Port details */
235 svif = (uint16_t)vf_spec->id;
237 mask = (uint16_t)vf_mask->id;
239 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
242 /* Function to handle the parsing of RTE Flow item port id Header. */
244 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
245 struct ulp_rte_parser_params *params)
247 const struct rte_flow_item_port_id *port_spec = item->spec;
248 const struct rte_flow_item_port_id *port_mask = item->mask;
249 uint16_t svif = 0, mask = 0;
252 * Copy the rte_flow_item for Port into hdr_field using port id
256 svif = (uint16_t)port_spec->id;
258 mask = (uint16_t)port_mask->id;
260 /* Update the SVIF details */
261 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
264 /* Function to handle the parsing of RTE Flow item phy port Header. */
266 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
267 struct ulp_rte_parser_params *params)
269 const struct rte_flow_item_phy_port *port_spec = item->spec;
270 const struct rte_flow_item_phy_port *port_mask = item->mask;
271 uint32_t svif = 0, mask = 0;
273 /* Copy the rte_flow_item for phy port into hdr_field */
275 svif = port_spec->index;
277 mask = port_mask->index;
279 /* Update the SVIF details */
280 return ulp_rte_parser_svif_set(params, item->type, svif, mask);
283 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
285 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
286 struct ulp_rte_parser_params *params)
288 const struct rte_flow_item_eth *eth_spec = item->spec;
289 const struct rte_flow_item_eth *eth_mask = item->mask;
290 struct ulp_rte_hdr_field *field;
291 uint32_t idx = params->field_idx;
292 uint64_t set_flag = 0;
296 * Copy the rte_flow_item for eth into hdr_field using ethernet
300 size = sizeof(eth_spec->dst.addr_bytes);
301 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
302 eth_spec->dst.addr_bytes,
304 size = sizeof(eth_spec->src.addr_bytes);
305 field = ulp_rte_parser_fld_copy(field,
306 eth_spec->src.addr_bytes,
308 field = ulp_rte_parser_fld_copy(field,
310 sizeof(eth_spec->type));
313 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
314 sizeof(eth_mask->dst.addr_bytes));
315 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
316 sizeof(eth_mask->src.addr_bytes));
317 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
318 sizeof(eth_mask->type));
320 /* Add number of vlan header elements */
321 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
322 params->vlan_idx = params->field_idx;
323 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
325 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
326 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
327 BNXT_ULP_HDR_BIT_O_ETH);
329 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
331 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
332 BNXT_ULP_HDR_BIT_I_ETH);
334 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
335 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
337 return BNXT_TF_RC_SUCCESS;
340 /* Function to handle the parsing of RTE Flow item Vlan Header. */
342 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
343 struct ulp_rte_parser_params *params)
345 const struct rte_flow_item_vlan *vlan_spec = item->spec;
346 const struct rte_flow_item_vlan *vlan_mask = item->mask;
347 struct ulp_rte_hdr_field *field;
348 struct ulp_rte_hdr_bitmap *hdr_bitmap;
349 uint32_t idx = params->vlan_idx;
350 uint16_t vlan_tag, priority;
351 uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
352 uint8_t *outer_tag_buff;
353 uint8_t *inner_tag_buff;
355 outer_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
356 inner_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
359 * Copy the rte_flow_item for vlan into hdr_field using Vlan
363 vlan_tag = ntohs(vlan_spec->tci);
364 priority = htons(vlan_tag >> 13);
366 vlan_tag = htons(vlan_tag);
368 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
371 field = ulp_rte_parser_fld_copy(field,
374 field = ulp_rte_parser_fld_copy(field,
375 &vlan_spec->inner_type,
376 sizeof(vlan_spec->inner_type));
380 vlan_tag = ntohs(vlan_mask->tci);
381 priority = htons(vlan_tag >> 13);
383 vlan_tag = htons(vlan_tag);
385 field = ¶ms->hdr_field[idx];
386 memcpy(field->mask, &priority, field->size);
388 memcpy(field->mask, &vlan_tag, field->size);
390 memcpy(field->mask, &vlan_mask->inner_type, field->size);
392 /* Set the vlan index to new incremented value */
393 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
395 /* Get the outer tag and inner tag counts */
396 ulp_util_field_int_read(outer_tag_buff, &outer_vtag_num);
397 ulp_util_field_int_read(inner_tag_buff, &inner_vtag_num);
399 /* Update the hdr_bitmap of the vlans */
400 hdr_bitmap = ¶ms->hdr_bitmap;
401 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
402 !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
403 /* Set the outer vlan bit and update the vlan tag num */
404 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
406 ulp_util_field_int_write(outer_tag_buff, outer_vtag_num);
407 params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
409 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
410 ULP_BITMAP_ISSET(hdr_bitmap->bits,
411 BNXT_ULP_HDR_BIT_OO_VLAN) &&
412 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
413 BNXT_ULP_HDR_BIT_OI_VLAN)) {
414 /* Set the outer vlan bit and update the vlan tag num */
415 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
417 ulp_util_field_int_write(outer_tag_buff, outer_vtag_num);
418 params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
420 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
421 BNXT_ULP_HDR_BIT_O_ETH) &&
422 ULP_BITMAP_ISSET(hdr_bitmap->bits,
423 BNXT_ULP_HDR_BIT_OO_VLAN) &&
424 ULP_BITMAP_ISSET(hdr_bitmap->bits,
425 BNXT_ULP_HDR_BIT_OI_VLAN) &&
426 ULP_BITMAP_ISSET(hdr_bitmap->bits,
427 BNXT_ULP_HDR_BIT_I_ETH) &&
428 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
429 BNXT_ULP_HDR_BIT_IO_VLAN)) {
430 /* Set the inner vlan bit and update the vlan tag num */
431 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
433 ulp_util_field_int_write(inner_tag_buff, inner_vtag_num);
434 params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
436 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
437 BNXT_ULP_HDR_BIT_O_ETH) &&
438 ULP_BITMAP_ISSET(hdr_bitmap->bits,
439 BNXT_ULP_HDR_BIT_OO_VLAN) &&
440 ULP_BITMAP_ISSET(hdr_bitmap->bits,
441 BNXT_ULP_HDR_BIT_OI_VLAN) &&
442 ULP_BITMAP_ISSET(hdr_bitmap->bits,
443 BNXT_ULP_HDR_BIT_I_ETH) &&
444 ULP_BITMAP_ISSET(hdr_bitmap->bits,
445 BNXT_ULP_HDR_BIT_IO_VLAN) &&
446 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
447 BNXT_ULP_HDR_BIT_II_VLAN)) {
448 /* Set the inner vlan bit and update the vlan tag num */
449 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
451 ulp_util_field_int_write(inner_tag_buff, inner_vtag_num);
452 params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
455 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
456 return BNXT_TF_RC_ERROR;
458 return BNXT_TF_RC_SUCCESS;
461 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
463 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
464 struct ulp_rte_parser_params *params)
466 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
467 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
468 struct ulp_rte_hdr_field *field;
469 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
470 uint32_t idx = params->field_idx;
473 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
474 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
475 return BNXT_TF_RC_ERROR;
479 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
483 size = sizeof(ipv4_spec->hdr.version_ihl);
484 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
485 &ipv4_spec->hdr.version_ihl,
487 size = sizeof(ipv4_spec->hdr.type_of_service);
488 field = ulp_rte_parser_fld_copy(field,
489 &ipv4_spec->hdr.type_of_service,
491 size = sizeof(ipv4_spec->hdr.total_length);
492 field = ulp_rte_parser_fld_copy(field,
493 &ipv4_spec->hdr.total_length,
495 size = sizeof(ipv4_spec->hdr.packet_id);
496 field = ulp_rte_parser_fld_copy(field,
497 &ipv4_spec->hdr.packet_id,
499 size = sizeof(ipv4_spec->hdr.fragment_offset);
500 field = ulp_rte_parser_fld_copy(field,
501 &ipv4_spec->hdr.fragment_offset,
503 size = sizeof(ipv4_spec->hdr.time_to_live);
504 field = ulp_rte_parser_fld_copy(field,
505 &ipv4_spec->hdr.time_to_live,
507 size = sizeof(ipv4_spec->hdr.next_proto_id);
508 field = ulp_rte_parser_fld_copy(field,
509 &ipv4_spec->hdr.next_proto_id,
511 size = sizeof(ipv4_spec->hdr.hdr_checksum);
512 field = ulp_rte_parser_fld_copy(field,
513 &ipv4_spec->hdr.hdr_checksum,
515 size = sizeof(ipv4_spec->hdr.src_addr);
516 field = ulp_rte_parser_fld_copy(field,
517 &ipv4_spec->hdr.src_addr,
519 size = sizeof(ipv4_spec->hdr.dst_addr);
520 field = ulp_rte_parser_fld_copy(field,
521 &ipv4_spec->hdr.dst_addr,
525 ulp_rte_prsr_mask_copy(params, &idx,
526 &ipv4_mask->hdr.version_ihl,
527 sizeof(ipv4_mask->hdr.version_ihl));
528 ulp_rte_prsr_mask_copy(params, &idx,
529 &ipv4_mask->hdr.type_of_service,
530 sizeof(ipv4_mask->hdr.type_of_service));
531 ulp_rte_prsr_mask_copy(params, &idx,
532 &ipv4_mask->hdr.total_length,
533 sizeof(ipv4_mask->hdr.total_length));
534 ulp_rte_prsr_mask_copy(params, &idx,
535 &ipv4_mask->hdr.packet_id,
536 sizeof(ipv4_mask->hdr.packet_id));
537 ulp_rte_prsr_mask_copy(params, &idx,
538 &ipv4_mask->hdr.fragment_offset,
539 sizeof(ipv4_mask->hdr.fragment_offset));
540 ulp_rte_prsr_mask_copy(params, &idx,
541 &ipv4_mask->hdr.time_to_live,
542 sizeof(ipv4_mask->hdr.time_to_live));
543 ulp_rte_prsr_mask_copy(params, &idx,
544 &ipv4_mask->hdr.next_proto_id,
545 sizeof(ipv4_mask->hdr.next_proto_id));
546 ulp_rte_prsr_mask_copy(params, &idx,
547 &ipv4_mask->hdr.hdr_checksum,
548 sizeof(ipv4_mask->hdr.hdr_checksum));
549 ulp_rte_prsr_mask_copy(params, &idx,
550 &ipv4_mask->hdr.src_addr,
551 sizeof(ipv4_mask->hdr.src_addr));
552 ulp_rte_prsr_mask_copy(params, &idx,
553 &ipv4_mask->hdr.dst_addr,
554 sizeof(ipv4_mask->hdr.dst_addr));
556 /* Add the number of ipv4 header elements */
557 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
559 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
560 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
561 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
562 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
563 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
564 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
566 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
567 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
569 return BNXT_TF_RC_SUCCESS;
572 /* Function to handle the parsing of RTE Flow item IPV6 Header */
574 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
575 struct ulp_rte_parser_params *params)
577 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
578 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
579 struct ulp_rte_hdr_field *field;
580 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
581 uint32_t idx = params->field_idx;
584 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
585 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
586 return BNXT_TF_RC_ERROR;
590 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
594 size = sizeof(ipv6_spec->hdr.vtc_flow);
595 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
596 &ipv6_spec->hdr.vtc_flow,
598 size = sizeof(ipv6_spec->hdr.payload_len);
599 field = ulp_rte_parser_fld_copy(field,
600 &ipv6_spec->hdr.payload_len,
602 size = sizeof(ipv6_spec->hdr.proto);
603 field = ulp_rte_parser_fld_copy(field,
604 &ipv6_spec->hdr.proto,
606 size = sizeof(ipv6_spec->hdr.hop_limits);
607 field = ulp_rte_parser_fld_copy(field,
608 &ipv6_spec->hdr.hop_limits,
610 size = sizeof(ipv6_spec->hdr.src_addr);
611 field = ulp_rte_parser_fld_copy(field,
612 &ipv6_spec->hdr.src_addr,
614 size = sizeof(ipv6_spec->hdr.dst_addr);
615 field = ulp_rte_parser_fld_copy(field,
616 &ipv6_spec->hdr.dst_addr,
620 ulp_rte_prsr_mask_copy(params, &idx,
621 &ipv6_mask->hdr.vtc_flow,
622 sizeof(ipv6_mask->hdr.vtc_flow));
623 ulp_rte_prsr_mask_copy(params, &idx,
624 &ipv6_mask->hdr.payload_len,
625 sizeof(ipv6_mask->hdr.payload_len));
626 ulp_rte_prsr_mask_copy(params, &idx,
627 &ipv6_mask->hdr.proto,
628 sizeof(ipv6_mask->hdr.proto));
629 ulp_rte_prsr_mask_copy(params, &idx,
630 &ipv6_mask->hdr.hop_limits,
631 sizeof(ipv6_mask->hdr.hop_limits));
632 ulp_rte_prsr_mask_copy(params, &idx,
633 &ipv6_mask->hdr.src_addr,
634 sizeof(ipv6_mask->hdr.src_addr));
635 ulp_rte_prsr_mask_copy(params, &idx,
636 &ipv6_mask->hdr.dst_addr,
637 sizeof(ipv6_mask->hdr.dst_addr));
639 /* add number of ipv6 header elements */
640 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
642 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
643 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
644 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
645 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
646 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
647 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
649 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
650 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
652 return BNXT_TF_RC_SUCCESS;
655 /* Function to handle the parsing of RTE Flow item UDP Header. */
657 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
658 struct ulp_rte_parser_params *params)
660 const struct rte_flow_item_udp *udp_spec = item->spec;
661 const struct rte_flow_item_udp *udp_mask = item->mask;
662 struct ulp_rte_hdr_field *field;
663 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
664 uint32_t idx = params->field_idx;
667 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
668 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
669 return BNXT_TF_RC_ERROR;
673 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
677 size = sizeof(udp_spec->hdr.src_port);
678 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
679 &udp_spec->hdr.src_port,
681 size = sizeof(udp_spec->hdr.dst_port);
682 field = ulp_rte_parser_fld_copy(field,
683 &udp_spec->hdr.dst_port,
685 size = sizeof(udp_spec->hdr.dgram_len);
686 field = ulp_rte_parser_fld_copy(field,
687 &udp_spec->hdr.dgram_len,
689 size = sizeof(udp_spec->hdr.dgram_cksum);
690 field = ulp_rte_parser_fld_copy(field,
691 &udp_spec->hdr.dgram_cksum,
695 ulp_rte_prsr_mask_copy(params, &idx,
696 &udp_mask->hdr.src_port,
697 sizeof(udp_mask->hdr.src_port));
698 ulp_rte_prsr_mask_copy(params, &idx,
699 &udp_mask->hdr.dst_port,
700 sizeof(udp_mask->hdr.dst_port));
701 ulp_rte_prsr_mask_copy(params, &idx,
702 &udp_mask->hdr.dgram_len,
703 sizeof(udp_mask->hdr.dgram_len));
704 ulp_rte_prsr_mask_copy(params, &idx,
705 &udp_mask->hdr.dgram_cksum,
706 sizeof(udp_mask->hdr.dgram_cksum));
709 /* Add number of UDP header elements */
710 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
712 /* Set the udp header bitmap and computed l4 header bitmaps */
713 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
714 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
715 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
716 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
717 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
719 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
720 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
722 return BNXT_TF_RC_SUCCESS;
725 /* Function to handle the parsing of RTE Flow item TCP Header. */
727 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
728 struct ulp_rte_parser_params *params)
730 const struct rte_flow_item_tcp *tcp_spec = item->spec;
731 const struct rte_flow_item_tcp *tcp_mask = item->mask;
732 struct ulp_rte_hdr_field *field;
733 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
734 uint32_t idx = params->field_idx;
737 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
738 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
739 return BNXT_TF_RC_ERROR;
743 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
747 size = sizeof(tcp_spec->hdr.src_port);
748 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
749 &tcp_spec->hdr.src_port,
751 size = sizeof(tcp_spec->hdr.dst_port);
752 field = ulp_rte_parser_fld_copy(field,
753 &tcp_spec->hdr.dst_port,
755 size = sizeof(tcp_spec->hdr.sent_seq);
756 field = ulp_rte_parser_fld_copy(field,
757 &tcp_spec->hdr.sent_seq,
759 size = sizeof(tcp_spec->hdr.recv_ack);
760 field = ulp_rte_parser_fld_copy(field,
761 &tcp_spec->hdr.recv_ack,
763 size = sizeof(tcp_spec->hdr.data_off);
764 field = ulp_rte_parser_fld_copy(field,
765 &tcp_spec->hdr.data_off,
767 size = sizeof(tcp_spec->hdr.tcp_flags);
768 field = ulp_rte_parser_fld_copy(field,
769 &tcp_spec->hdr.tcp_flags,
771 size = sizeof(tcp_spec->hdr.rx_win);
772 field = ulp_rte_parser_fld_copy(field,
773 &tcp_spec->hdr.rx_win,
775 size = sizeof(tcp_spec->hdr.cksum);
776 field = ulp_rte_parser_fld_copy(field,
777 &tcp_spec->hdr.cksum,
779 size = sizeof(tcp_spec->hdr.tcp_urp);
780 field = ulp_rte_parser_fld_copy(field,
781 &tcp_spec->hdr.tcp_urp,
784 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
788 ulp_rte_prsr_mask_copy(params, &idx,
789 &tcp_mask->hdr.src_port,
790 sizeof(tcp_mask->hdr.src_port));
791 ulp_rte_prsr_mask_copy(params, &idx,
792 &tcp_mask->hdr.dst_port,
793 sizeof(tcp_mask->hdr.dst_port));
794 ulp_rte_prsr_mask_copy(params, &idx,
795 &tcp_mask->hdr.sent_seq,
796 sizeof(tcp_mask->hdr.sent_seq));
797 ulp_rte_prsr_mask_copy(params, &idx,
798 &tcp_mask->hdr.recv_ack,
799 sizeof(tcp_mask->hdr.recv_ack));
800 ulp_rte_prsr_mask_copy(params, &idx,
801 &tcp_mask->hdr.data_off,
802 sizeof(tcp_mask->hdr.data_off));
803 ulp_rte_prsr_mask_copy(params, &idx,
804 &tcp_mask->hdr.tcp_flags,
805 sizeof(tcp_mask->hdr.tcp_flags));
806 ulp_rte_prsr_mask_copy(params, &idx,
807 &tcp_mask->hdr.rx_win,
808 sizeof(tcp_mask->hdr.rx_win));
809 ulp_rte_prsr_mask_copy(params, &idx,
810 &tcp_mask->hdr.cksum,
811 sizeof(tcp_mask->hdr.cksum));
812 ulp_rte_prsr_mask_copy(params, &idx,
813 &tcp_mask->hdr.tcp_urp,
814 sizeof(tcp_mask->hdr.tcp_urp));
816 /* add number of TCP header elements */
817 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
819 /* Set the udp header bitmap and computed l4 header bitmaps */
820 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
821 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
822 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
823 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
824 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
826 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
827 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
829 return BNXT_TF_RC_SUCCESS;
832 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
834 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
835 struct ulp_rte_parser_params *params)
837 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
838 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
839 struct ulp_rte_hdr_field *field;
840 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
841 uint32_t idx = params->field_idx;
845 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
849 size = sizeof(vxlan_spec->flags);
850 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
853 size = sizeof(vxlan_spec->rsvd0);
854 field = ulp_rte_parser_fld_copy(field,
857 size = sizeof(vxlan_spec->vni);
858 field = ulp_rte_parser_fld_copy(field,
861 size = sizeof(vxlan_spec->rsvd1);
862 field = ulp_rte_parser_fld_copy(field,
867 ulp_rte_prsr_mask_copy(params, &idx,
869 sizeof(vxlan_mask->flags));
870 ulp_rte_prsr_mask_copy(params, &idx,
872 sizeof(vxlan_mask->rsvd0));
873 ulp_rte_prsr_mask_copy(params, &idx,
875 sizeof(vxlan_mask->vni));
876 ulp_rte_prsr_mask_copy(params, &idx,
878 sizeof(vxlan_mask->rsvd1));
880 /* Add number of vxlan header elements */
881 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
883 /* Update the hdr_bitmap with vxlan */
884 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
885 return BNXT_TF_RC_SUCCESS;
888 /* Function to handle the parsing of RTE Flow item void Header */
890 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
891 struct ulp_rte_parser_params *params __rte_unused)
893 return BNXT_TF_RC_SUCCESS;
896 /* Function to handle the parsing of RTE Flow action void Header. */
898 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
899 struct ulp_rte_parser_params *params __rte_unused)
901 return BNXT_TF_RC_SUCCESS;
904 /* Function to handle the parsing of RTE Flow action Mark Header. */
906 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
907 struct ulp_rte_parser_params *param)
909 const struct rte_flow_action_mark *mark;
910 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
913 mark = action_item->conf;
915 mark_id = tfp_cpu_to_be_32(mark->id);
916 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
917 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
919 /* Update the hdr_bitmap with vxlan */
920 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
921 return BNXT_TF_RC_SUCCESS;
923 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
924 return BNXT_TF_RC_ERROR;
927 /* Function to handle the parsing of RTE Flow action RSS Header. */
929 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
930 struct ulp_rte_parser_params *param)
932 const struct rte_flow_action_rss *rss = action_item->conf;
935 /* Update the hdr_bitmap with vxlan */
936 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
937 return BNXT_TF_RC_SUCCESS;
939 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
940 return BNXT_TF_RC_ERROR;
943 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
945 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
946 struct ulp_rte_parser_params *params)
948 const struct rte_flow_action_vxlan_encap *vxlan_encap;
949 const struct rte_flow_item *item;
950 const struct rte_flow_item_eth *eth_spec;
951 const struct rte_flow_item_ipv4 *ipv4_spec;
952 const struct rte_flow_item_ipv6 *ipv6_spec;
953 struct rte_flow_item_vxlan vxlan_spec;
954 uint32_t vlan_num = 0, vlan_size = 0;
955 uint32_t ip_size = 0, ip_type = 0;
956 uint32_t vxlan_size = 0;
958 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
959 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
961 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
962 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
964 vxlan_encap = action_item->conf;
966 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
967 return BNXT_TF_RC_ERROR;
970 item = vxlan_encap->definition;
972 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
973 return BNXT_TF_RC_ERROR;
976 if (!ulp_rte_item_skip_void(&item, 0))
977 return BNXT_TF_RC_ERROR;
979 /* must have ethernet header */
980 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
981 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
982 return BNXT_TF_RC_ERROR;
984 eth_spec = item->spec;
985 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
986 ulp_encap_buffer_copy(buff,
987 eth_spec->dst.addr_bytes,
988 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
990 /* Goto the next item */
991 if (!ulp_rte_item_skip_void(&item, 1))
992 return BNXT_TF_RC_ERROR;
994 /* May have vlan header */
995 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
997 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
998 ulp_encap_buffer_copy(buff,
1000 sizeof(struct rte_flow_item_vlan));
1002 if (!ulp_rte_item_skip_void(&item, 1))
1003 return BNXT_TF_RC_ERROR;
1006 /* may have two vlan headers */
1007 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1009 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1010 sizeof(struct rte_flow_item_vlan)],
1012 sizeof(struct rte_flow_item_vlan));
1013 if (!ulp_rte_item_skip_void(&item, 1))
1014 return BNXT_TF_RC_ERROR;
1016 /* Update the vlan count and size of more than one */
1018 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1019 vlan_num = tfp_cpu_to_be_32(vlan_num);
1020 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1023 vlan_size = tfp_cpu_to_be_32(vlan_size);
1024 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1029 /* L3 must be IPv4, IPv6 */
1030 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1031 ipv4_spec = item->spec;
1032 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1034 /* copy the ipv4 details */
1035 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1036 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1037 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1038 ulp_encap_buffer_copy(buff,
1040 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1041 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1043 const uint8_t *tmp_buff;
1045 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1046 ulp_encap_buffer_copy(buff,
1047 &ipv4_spec->hdr.version_ihl,
1048 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1049 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1050 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1051 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1052 ulp_encap_buffer_copy(buff,
1054 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1056 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1057 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1058 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1059 ulp_encap_buffer_copy(buff,
1060 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1061 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1063 /* Update the ip size details */
1064 ip_size = tfp_cpu_to_be_32(ip_size);
1065 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1066 &ip_size, sizeof(uint32_t));
1068 /* update the ip type */
1069 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1070 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1071 &ip_type, sizeof(uint32_t));
1073 if (!ulp_rte_item_skip_void(&item, 1))
1074 return BNXT_TF_RC_ERROR;
1075 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1076 ipv6_spec = item->spec;
1077 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1079 /* copy the ipv4 details */
1080 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1081 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1083 /* Update the ip size details */
1084 ip_size = tfp_cpu_to_be_32(ip_size);
1085 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1086 &ip_size, sizeof(uint32_t));
1088 /* update the ip type */
1089 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1090 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1091 &ip_type, sizeof(uint32_t));
1093 if (!ulp_rte_item_skip_void(&item, 1))
1094 return BNXT_TF_RC_ERROR;
1096 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1097 return BNXT_TF_RC_ERROR;
1101 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1102 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1103 return BNXT_TF_RC_ERROR;
1105 /* copy the udp details */
1106 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1107 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1109 if (!ulp_rte_item_skip_void(&item, 1))
1110 return BNXT_TF_RC_ERROR;
1113 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1114 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1115 return BNXT_TF_RC_ERROR;
1117 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1118 /* copy the vxlan details */
1119 memcpy(&vxlan_spec, item->spec, vxlan_size);
1120 vxlan_spec.flags = 0x08;
1121 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1122 (const uint8_t *)&vxlan_spec,
1124 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1125 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1126 &vxlan_size, sizeof(uint32_t));
1128 /*update the hdr_bitmap with vxlan */
1129 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1130 return BNXT_TF_RC_SUCCESS;
1133 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1135 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1137 struct ulp_rte_parser_params *params)
1139 /* update the hdr_bitmap with vxlan */
1140 ULP_BITMAP_SET(params->act_bitmap.bits,
1141 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1142 return BNXT_TF_RC_SUCCESS;
1145 /* Function to handle the parsing of RTE Flow action drop Header. */
1147 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1148 struct ulp_rte_parser_params *params)
1150 /* Update the hdr_bitmap with drop */
1151 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1152 return BNXT_TF_RC_SUCCESS;
1155 /* Function to handle the parsing of RTE Flow action count. */
1157 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1158 struct ulp_rte_parser_params *params)
1161 const struct rte_flow_action_count *act_count;
1162 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1164 act_count = action_item->conf;
1166 if (act_count->shared) {
1168 "Parse Error:Shared count not supported\n");
1169 return BNXT_TF_RC_PARSE_ERR;
1171 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1173 BNXT_ULP_ACT_PROP_SZ_COUNT);
1176 /* Update the hdr_bitmap with count */
1177 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1178 return BNXT_TF_RC_SUCCESS;
1181 /* Function to handle the parsing of RTE Flow action PF. */
1183 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1184 struct ulp_rte_parser_params *param)
1187 uint8_t *vnic_buffer;
1190 /* Update the hdr_bitmap with vnic bit */
1191 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1193 /* copy the PF of the current device into VNIC Property */
1194 svif_buf = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1195 ulp_util_field_int_read(svif_buf, &svif);
1196 svif = (uint32_t)bnxt_get_vnic_id(svif);
1198 vnic_buffer = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1199 ulp_util_field_int_write(vnic_buffer, svif);
1201 return BNXT_TF_RC_SUCCESS;
1204 /* Function to handle the parsing of RTE Flow action VF. */
1206 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1207 struct ulp_rte_parser_params *param)
1209 const struct rte_flow_action_vf *vf_action;
1211 vf_action = action_item->conf;
1213 if (vf_action->original) {
1215 "Parse Error:VF Original not supported\n");
1216 return BNXT_TF_RC_PARSE_ERR;
1218 /* TBD: Update the computed VNIC using VF conversion */
1219 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1221 BNXT_ULP_ACT_PROP_SZ_VNIC);
1224 /* Update the hdr_bitmap with count */
1225 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1226 return BNXT_TF_RC_SUCCESS;
1229 /* Function to handle the parsing of RTE Flow action port_id. */
1231 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1232 struct ulp_rte_parser_params *param)
1234 const struct rte_flow_action_port_id *port_id;
1236 port_id = act_item->conf;
1238 if (port_id->original) {
1240 "ParseErr:Portid Original not supported\n");
1241 return BNXT_TF_RC_PARSE_ERR;
1243 /* TBD: Update the computed VNIC using port conversion */
1244 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1246 BNXT_ULP_ACT_PROP_SZ_VNIC);
1249 /* Update the hdr_bitmap with count */
1250 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1251 return BNXT_TF_RC_SUCCESS;
1254 /* Function to handle the parsing of RTE Flow action phy_port. */
1256 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1257 struct ulp_rte_parser_params *prm)
1259 const struct rte_flow_action_phy_port *phy_port;
1261 phy_port = action_item->conf;
1263 if (phy_port->original) {
1265 "Parse Err:Port Original not supported\n");
1266 return BNXT_TF_RC_PARSE_ERR;
1268 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1270 BNXT_ULP_ACT_PROP_SZ_VPORT);
1273 /* Update the hdr_bitmap with count */
1274 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1275 return BNXT_TF_RC_SUCCESS;