1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
20 memcpy(&temp_val, buffer, sizeof(uint32_t));
21 *val = rte_be_to_cpu_32(temp_val);
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
28 uint32_t temp_val = rte_cpu_to_be_32(val);
30 memcpy(buffer, &temp_val, sizeof(uint32_t));
33 /* Utility function to skip the void items. */
35 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
41 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
49 * Function to handle the parsing of RTE Flows and placing
50 * the RTE flow items into the ulp structures.
53 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
54 struct ulp_rte_hdr_bitmap *hdr_bitmap,
55 struct ulp_rte_hdr_field *hdr_field)
57 const struct rte_flow_item *item = pattern;
58 uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
59 uint32_t vlan_idx = 0;
60 struct bnxt_ulp_rte_hdr_info *hdr_info;
62 /* Parse all the items in the pattern */
63 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
64 /* get the header information from the flow_hdr_info table */
65 hdr_info = &ulp_hdr_info[item->type];
66 if (hdr_info->hdr_type ==
67 BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
69 "Truflow parser does not support type %d\n",
71 return BNXT_TF_RC_PARSE_ERR;
72 } else if (hdr_info->hdr_type ==
73 BNXT_ULP_HDR_TYPE_SUPPORTED) {
74 /* call the registered callback handler */
75 if (hdr_info->proto_hdr_func) {
76 if (hdr_info->proto_hdr_func(item,
82 return BNXT_TF_RC_ERROR;
88 /* update the implied SVIF */
89 (void)ulp_rte_parser_svif_process(hdr_bitmap, hdr_field);
90 return BNXT_TF_RC_SUCCESS;
94 * Function to handle the parsing of RTE Flows and placing
95 * the RTE flow actions into the ulp structures.
98 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
99 struct ulp_rte_act_bitmap *act_bitmap,
100 struct ulp_rte_act_prop *act_prop)
102 const struct rte_flow_action *action_item = actions;
103 struct bnxt_ulp_rte_act_info *hdr_info;
105 /* Parse all the items in the pattern */
106 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
107 /* get the header information from the flow_hdr_info table */
108 hdr_info = &ulp_act_info[action_item->type];
109 if (hdr_info->act_type ==
110 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
112 "Truflow parser does not support act %u\n",
114 return BNXT_TF_RC_ERROR;
115 } else if (hdr_info->act_type ==
116 BNXT_ULP_ACT_TYPE_SUPPORTED) {
117 /* call the registered callback handler */
118 if (hdr_info->proto_act_func) {
119 if (hdr_info->proto_act_func(action_item,
122 BNXT_TF_RC_SUCCESS) {
123 return BNXT_TF_RC_ERROR;
129 return BNXT_TF_RC_SUCCESS;
132 /* Function to handle the parsing of RTE Flow item PF Header. */
134 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
135 struct ulp_rte_hdr_field *hdr_field,
136 enum rte_flow_item_type proto,
141 uint16_t port_id = svif;
143 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
146 " multiple sources not supported\n");
147 return BNXT_TF_RC_ERROR;
150 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
151 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
153 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
154 /* perform the conversion from dpdk port to svif */
155 if (dir == ULP_DIR_EGRESS)
156 svif = bnxt_get_svif(port_id, true);
158 svif = bnxt_get_svif(port_id, false);
161 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
162 &svif, sizeof(svif));
163 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
164 &mask, sizeof(mask));
165 hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
166 return BNXT_TF_RC_SUCCESS;
169 /* Function to handle the parsing of the RTE port id
172 ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap,
173 struct ulp_rte_hdr_field *hdr_field)
175 uint16_t port_id = 0;
178 uint16_t svif_mask = 0xFFFF;
180 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF))
181 return BNXT_TF_RC_SUCCESS;
183 /* SVIF not set. So get the port id and direction */
184 buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
185 memcpy(&port_id, buffer, sizeof(port_id));
186 memcpy(&dir, buffer + sizeof(port_id), sizeof(dir));
187 memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0,
188 RTE_PARSER_FLOW_HDR_FIELD_SIZE);
190 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
191 RTE_FLOW_ITEM_TYPE_PORT_ID,
192 dir, port_id, svif_mask);
195 /* Function to handle the parsing of RTE Flow item PF Header. */
197 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
198 struct ulp_rte_hdr_bitmap *hdr_bitmap,
199 struct ulp_rte_hdr_field *hdr_field,
200 uint32_t *field_idx __rte_unused,
201 uint32_t *vlan_idx __rte_unused)
203 uint16_t port_id = 0;
206 uint16_t svif_mask = 0xFFFF;
208 buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
209 memcpy(&port_id, buffer, sizeof(port_id));
210 memcpy(&dir, buffer + sizeof(port_id), sizeof(dir));
211 memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0,
212 RTE_PARSER_FLOW_HDR_FIELD_SIZE);
214 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
216 dir, port_id, svif_mask);
219 /* Function to handle the parsing of RTE Flow item VF Header. */
221 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
222 struct ulp_rte_hdr_bitmap *hdr_bitmap,
223 struct ulp_rte_hdr_field *hdr_field,
224 uint32_t *field_idx __rte_unused,
225 uint32_t *vlan_idx __rte_unused)
227 const struct rte_flow_item_vf *vf_spec, *vf_mask;
228 uint16_t svif = 0, mask = 0;
230 vf_spec = item->spec;
231 vf_mask = item->mask;
234 * Copy the rte_flow_item for eth into hdr_field using ethernet
238 svif = (uint16_t)vf_spec->id;
240 mask = (uint16_t)vf_mask->id;
242 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
243 item->type, 0, svif, mask);
246 /* Function to handle the parsing of RTE Flow item port id Header. */
248 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
249 struct ulp_rte_hdr_bitmap *hdr_bitmap,
250 struct ulp_rte_hdr_field *hdr_field,
251 uint32_t *field_idx __rte_unused,
252 uint32_t *vlan_idx __rte_unused)
254 const struct rte_flow_item_port_id *port_spec, *port_mask;
255 uint16_t svif = 0, mask = 0;
259 port_spec = item->spec;
260 port_mask = item->mask;
263 * Copy the rte_flow_item for Port into hdr_field using port id
267 svif = (uint16_t)port_spec->id;
269 mask = (uint16_t)port_mask->id;
271 buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
272 memcpy(&dir, buffer + sizeof(uint16_t), sizeof(uint16_t));
274 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
275 item->type, dir, svif, mask);
278 /* Function to handle the parsing of RTE Flow item phy port Header. */
280 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
281 struct ulp_rte_hdr_bitmap *hdr_bitmap,
282 struct ulp_rte_hdr_field *hdr_field,
283 uint32_t *field_idx __rte_unused,
284 uint32_t *vlan_idx __rte_unused)
286 const struct rte_flow_item_phy_port *port_spec, *port_mask;
287 uint32_t svif = 0, mask = 0;
289 port_spec = item->spec;
290 port_mask = item->mask;
292 /* Copy the rte_flow_item for phy port into hdr_field */
294 svif = port_spec->index;
296 mask = port_mask->index;
298 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
299 item->type, 0, svif, mask);
302 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
304 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
305 struct ulp_rte_hdr_bitmap *hdr_bitmap,
306 struct ulp_rte_hdr_field *hdr_field,
310 const struct rte_flow_item_eth *eth_spec, *eth_mask;
311 uint32_t idx = *field_idx;
312 uint32_t mdx = *field_idx;
313 uint64_t set_flag = 0;
315 eth_spec = item->spec;
316 eth_mask = item->mask;
319 * Copy the rte_flow_item for eth into hdr_field using ethernet
323 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
324 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
325 sizeof(eth_spec->dst.addr_bytes));
326 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
327 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
328 sizeof(eth_spec->src.addr_bytes));
329 hdr_field[idx].size = sizeof(eth_spec->type);
330 memcpy(hdr_field[idx++].spec, ð_spec->type,
331 sizeof(eth_spec->type));
333 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
337 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
338 sizeof(eth_mask->dst.addr_bytes));
339 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
340 sizeof(eth_mask->src.addr_bytes));
341 memcpy(hdr_field[mdx++].mask, ð_mask->type,
342 sizeof(eth_mask->type));
344 /* Add number of vlan header elements */
345 *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
348 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
349 set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
351 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
353 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
355 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
356 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
358 return BNXT_TF_RC_SUCCESS;
361 /* Function to handle the parsing of RTE Flow item Vlan Header. */
363 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
364 struct ulp_rte_hdr_bitmap *hdr_bitmap,
365 struct ulp_rte_hdr_field *hdr_field,
366 uint32_t *field_idx __rte_unused,
369 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
370 uint32_t idx = *vlan_idx;
371 uint32_t mdx = *vlan_idx;
372 uint16_t vlan_tag, priority;
373 uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
374 uint8_t *outer_tag_buffer;
375 uint8_t *inner_tag_buffer;
377 vlan_spec = item->spec;
378 vlan_mask = item->mask;
379 outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
380 inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
383 * Copy the rte_flow_item for vlan into hdr_field using Vlan
387 vlan_tag = ntohs(vlan_spec->tci);
388 priority = htons(vlan_tag >> 13);
390 vlan_tag = htons(vlan_tag);
392 hdr_field[idx].size = sizeof(priority);
393 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
394 hdr_field[idx].size = sizeof(vlan_tag);
395 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
396 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
397 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
398 sizeof(vlan_spec->inner_type));
400 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
404 vlan_tag = ntohs(vlan_mask->tci);
405 priority = htons(vlan_tag >> 13);
407 vlan_tag = htons(vlan_tag);
409 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
410 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
411 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
412 sizeof(vlan_mask->inner_type));
414 /* Set the vlan index to new incremented value */
417 /* Get the outer tag and inner tag counts */
418 ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
419 ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
421 /* Update the hdr_bitmap of the vlans */
422 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
423 !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
424 /* Set the outer vlan bit and update the vlan tag num */
425 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
427 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
428 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
430 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
431 ULP_BITMAP_ISSET(hdr_bitmap->bits,
432 BNXT_ULP_HDR_BIT_OO_VLAN) &&
433 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
434 BNXT_ULP_HDR_BIT_OI_VLAN)) {
435 /* Set the outer vlan bit and update the vlan tag num */
436 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
438 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
439 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
441 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
442 BNXT_ULP_HDR_BIT_O_ETH) &&
443 ULP_BITMAP_ISSET(hdr_bitmap->bits,
444 BNXT_ULP_HDR_BIT_OO_VLAN) &&
445 ULP_BITMAP_ISSET(hdr_bitmap->bits,
446 BNXT_ULP_HDR_BIT_OI_VLAN) &&
447 ULP_BITMAP_ISSET(hdr_bitmap->bits,
448 BNXT_ULP_HDR_BIT_I_ETH) &&
449 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
450 BNXT_ULP_HDR_BIT_IO_VLAN)) {
451 /* Set the inner vlan bit and update the vlan tag num */
452 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
454 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
455 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
457 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
458 BNXT_ULP_HDR_BIT_O_ETH) &&
459 ULP_BITMAP_ISSET(hdr_bitmap->bits,
460 BNXT_ULP_HDR_BIT_OO_VLAN) &&
461 ULP_BITMAP_ISSET(hdr_bitmap->bits,
462 BNXT_ULP_HDR_BIT_OI_VLAN) &&
463 ULP_BITMAP_ISSET(hdr_bitmap->bits,
464 BNXT_ULP_HDR_BIT_I_ETH) &&
465 ULP_BITMAP_ISSET(hdr_bitmap->bits,
466 BNXT_ULP_HDR_BIT_IO_VLAN) &&
467 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
468 BNXT_ULP_HDR_BIT_II_VLAN)) {
469 /* Set the inner vlan bit and update the vlan tag num */
470 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
472 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
473 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
476 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
477 return BNXT_TF_RC_ERROR;
479 return BNXT_TF_RC_SUCCESS;
482 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
484 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
485 struct ulp_rte_hdr_bitmap *hdr_bitmap,
486 struct ulp_rte_hdr_field *hdr_field,
488 uint32_t *vlan_idx __rte_unused)
490 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
491 uint32_t idx = *field_idx;
492 uint32_t mdx = *field_idx;
494 ipv4_spec = item->spec;
495 ipv4_mask = item->mask;
497 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
498 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
499 return BNXT_TF_RC_ERROR;
503 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
507 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
508 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
509 sizeof(ipv4_spec->hdr.version_ihl));
510 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
511 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
512 sizeof(ipv4_spec->hdr.type_of_service));
513 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
514 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
515 sizeof(ipv4_spec->hdr.total_length));
516 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
517 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
518 sizeof(ipv4_spec->hdr.packet_id));
519 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
520 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
521 sizeof(ipv4_spec->hdr.fragment_offset));
522 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
523 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
524 sizeof(ipv4_spec->hdr.time_to_live));
525 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
526 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
527 sizeof(ipv4_spec->hdr.next_proto_id));
528 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
529 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
530 sizeof(ipv4_spec->hdr.hdr_checksum));
531 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
532 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
533 sizeof(ipv4_spec->hdr.src_addr));
534 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
535 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
536 sizeof(ipv4_spec->hdr.dst_addr));
538 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
542 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
543 sizeof(ipv4_mask->hdr.version_ihl));
544 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
545 sizeof(ipv4_mask->hdr.type_of_service));
546 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
547 sizeof(ipv4_mask->hdr.total_length));
548 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
549 sizeof(ipv4_mask->hdr.packet_id));
550 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
551 sizeof(ipv4_mask->hdr.fragment_offset));
552 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
553 sizeof(ipv4_mask->hdr.time_to_live));
554 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
555 sizeof(ipv4_mask->hdr.next_proto_id));
556 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
557 sizeof(ipv4_mask->hdr.hdr_checksum));
558 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
559 sizeof(ipv4_mask->hdr.src_addr));
560 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
561 sizeof(ipv4_mask->hdr.dst_addr));
563 *field_idx = idx; /* Number of ipv4 header elements */
565 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
566 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
567 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
568 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
569 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
570 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
572 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
573 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
575 return BNXT_TF_RC_SUCCESS;
578 /* Function to handle the parsing of RTE Flow item IPV6 Header */
580 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
581 struct ulp_rte_hdr_bitmap *hdr_bitmap,
582 struct ulp_rte_hdr_field *hdr_field,
584 uint32_t *vlan_idx __rte_unused)
586 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
587 uint32_t idx = *field_idx;
588 uint32_t mdx = *field_idx;
590 ipv6_spec = item->spec;
591 ipv6_mask = item->mask;
593 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
594 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
595 return BNXT_TF_RC_ERROR;
599 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
603 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
604 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
605 sizeof(ipv6_spec->hdr.vtc_flow));
606 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
607 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
608 sizeof(ipv6_spec->hdr.payload_len));
609 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
610 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
611 sizeof(ipv6_spec->hdr.proto));
612 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
613 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
614 sizeof(ipv6_spec->hdr.hop_limits));
615 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
616 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
617 sizeof(ipv6_spec->hdr.src_addr));
618 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
619 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
620 sizeof(ipv6_spec->hdr.dst_addr));
622 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
626 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
627 sizeof(ipv6_mask->hdr.vtc_flow));
628 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
629 sizeof(ipv6_mask->hdr.payload_len));
630 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
631 sizeof(ipv6_mask->hdr.proto));
632 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
633 sizeof(ipv6_mask->hdr.hop_limits));
634 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
635 sizeof(ipv6_mask->hdr.src_addr));
636 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
637 sizeof(ipv6_mask->hdr.dst_addr));
639 *field_idx = idx; /* add number of ipv6 header elements */
641 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
642 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
643 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
644 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
645 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
646 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
648 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
649 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
651 return BNXT_TF_RC_SUCCESS;
654 /* Function to handle the parsing of RTE Flow item UDP Header. */
656 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
657 struct ulp_rte_hdr_bitmap *hdr_bitmap,
658 struct ulp_rte_hdr_field *hdr_field,
660 uint32_t *vlan_idx __rte_unused)
662 const struct rte_flow_item_udp *udp_spec, *udp_mask;
663 uint32_t idx = *field_idx;
664 uint32_t mdx = *field_idx;
666 udp_spec = item->spec;
667 udp_mask = item->mask;
669 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
670 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
671 return BNXT_TF_RC_ERROR;
675 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
679 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
680 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
681 sizeof(udp_spec->hdr.src_port));
682 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
683 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
684 sizeof(udp_spec->hdr.dst_port));
685 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
686 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
687 sizeof(udp_spec->hdr.dgram_len));
688 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
689 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
690 sizeof(udp_spec->hdr.dgram_cksum));
692 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
696 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
697 sizeof(udp_mask->hdr.src_port));
698 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
699 sizeof(udp_mask->hdr.dst_port));
700 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
701 sizeof(udp_mask->hdr.dgram_len));
702 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
703 sizeof(udp_mask->hdr.dgram_cksum));
705 *field_idx = idx; /* Add number of UDP header elements */
707 /* Set the udp header bitmap and computed l4 header bitmaps */
708 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
709 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
710 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
711 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
712 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
714 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
715 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
717 return BNXT_TF_RC_SUCCESS;
720 /* Function to handle the parsing of RTE Flow item TCP Header. */
722 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
723 struct ulp_rte_hdr_bitmap *hdr_bitmap,
724 struct ulp_rte_hdr_field *hdr_field,
726 uint32_t *vlan_idx __rte_unused)
728 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
729 uint32_t idx = *field_idx;
730 uint32_t mdx = *field_idx;
732 tcp_spec = item->spec;
733 tcp_mask = item->mask;
735 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
736 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
737 return BNXT_TF_RC_ERROR;
741 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
745 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
746 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
747 sizeof(tcp_spec->hdr.src_port));
748 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
749 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
750 sizeof(tcp_spec->hdr.dst_port));
751 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
752 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
753 sizeof(tcp_spec->hdr.sent_seq));
754 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
755 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
756 sizeof(tcp_spec->hdr.recv_ack));
757 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
758 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
759 sizeof(tcp_spec->hdr.data_off));
760 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
761 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
762 sizeof(tcp_spec->hdr.tcp_flags));
763 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
764 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
765 sizeof(tcp_spec->hdr.rx_win));
766 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
767 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
768 sizeof(tcp_spec->hdr.cksum));
769 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
770 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
771 sizeof(tcp_spec->hdr.tcp_urp));
773 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
777 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
778 sizeof(tcp_mask->hdr.src_port));
779 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
780 sizeof(tcp_mask->hdr.dst_port));
781 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
782 sizeof(tcp_mask->hdr.sent_seq));
783 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
784 sizeof(tcp_mask->hdr.recv_ack));
785 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
786 sizeof(tcp_mask->hdr.data_off));
787 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
788 sizeof(tcp_mask->hdr.tcp_flags));
789 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
790 sizeof(tcp_mask->hdr.rx_win));
791 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
792 sizeof(tcp_mask->hdr.cksum));
793 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
794 sizeof(tcp_mask->hdr.tcp_urp));
796 *field_idx = idx; /* add number of TCP header elements */
798 /* Set the udp header bitmap and computed l4 header bitmaps */
799 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
800 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
801 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
802 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
803 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
805 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
806 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
808 return BNXT_TF_RC_SUCCESS;
811 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
813 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
814 struct ulp_rte_hdr_bitmap *hdrbitmap,
815 struct ulp_rte_hdr_field *hdr_field,
817 uint32_t *vlan_idx __rte_unused)
819 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
820 uint32_t idx = *field_idx;
821 uint32_t mdx = *field_idx;
823 vxlan_spec = item->spec;
824 vxlan_mask = item->mask;
827 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
831 hdr_field[idx].size = sizeof(vxlan_spec->flags);
832 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
833 sizeof(vxlan_spec->flags));
834 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
835 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
836 sizeof(vxlan_spec->rsvd0));
837 hdr_field[idx].size = sizeof(vxlan_spec->vni);
838 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
839 sizeof(vxlan_spec->vni));
840 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
841 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
842 sizeof(vxlan_spec->rsvd1));
844 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
848 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
849 sizeof(vxlan_mask->flags));
850 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
851 sizeof(vxlan_mask->rsvd0));
852 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
853 sizeof(vxlan_mask->vni));
854 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
855 sizeof(vxlan_mask->rsvd1));
857 *field_idx = idx; /* Add number of vxlan header elements */
859 /* Update the hdr_bitmap with vxlan */
860 ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
861 return BNXT_TF_RC_SUCCESS;
864 /* Function to handle the parsing of RTE Flow item void Header */
866 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
867 struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
868 struct ulp_rte_hdr_field *hdr_field __rte_unused,
869 uint32_t *field_idx __rte_unused,
870 uint32_t *vlan_idx __rte_unused)
872 return BNXT_TF_RC_SUCCESS;
875 /* Function to handle the parsing of RTE Flow action void Header. */
877 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
878 struct ulp_rte_act_bitmap *act __rte_unused,
879 struct ulp_rte_act_prop *act_prop __rte_unused)
881 return BNXT_TF_RC_SUCCESS;
884 /* Function to handle the parsing of RTE Flow action Mark Header. */
886 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
887 struct ulp_rte_act_bitmap *act,
888 struct ulp_rte_act_prop *act_prop)
890 const struct rte_flow_action_mark *mark;
891 uint32_t mark_id = 0;
893 mark = action_item->conf;
895 mark_id = tfp_cpu_to_be_32(mark->id);
896 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
897 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
899 /* Update the hdr_bitmap with vxlan */
900 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
901 return BNXT_TF_RC_SUCCESS;
903 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
904 return BNXT_TF_RC_ERROR;
907 /* Function to handle the parsing of RTE Flow action RSS Header. */
909 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
910 struct ulp_rte_act_bitmap *act,
911 struct ulp_rte_act_prop *act_prop __rte_unused)
913 const struct rte_flow_action_rss *rss;
915 rss = action_item->conf;
917 /* Update the hdr_bitmap with vxlan */
918 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
919 return BNXT_TF_RC_SUCCESS;
921 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
922 return BNXT_TF_RC_ERROR;
925 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
927 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
928 struct ulp_rte_act_bitmap *act,
929 struct ulp_rte_act_prop *ap)
931 const struct rte_flow_action_vxlan_encap *vxlan_encap;
932 const struct rte_flow_item *item;
933 const struct rte_flow_item_eth *eth_spec;
934 const struct rte_flow_item_ipv4 *ipv4_spec;
935 const struct rte_flow_item_ipv6 *ipv6_spec;
936 struct rte_flow_item_vxlan vxlan_spec;
937 uint32_t vlan_num = 0, vlan_size = 0;
938 uint32_t ip_size = 0, ip_type = 0;
939 uint32_t vxlan_size = 0;
941 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
942 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
945 vxlan_encap = action_item->conf;
947 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
948 return BNXT_TF_RC_ERROR;
951 item = vxlan_encap->definition;
953 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
954 return BNXT_TF_RC_ERROR;
957 if (!ulp_rte_item_skip_void(&item, 0))
958 return BNXT_TF_RC_ERROR;
960 /* must have ethernet header */
961 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
962 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
963 return BNXT_TF_RC_ERROR;
965 eth_spec = item->spec;
966 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
967 ulp_encap_buffer_copy(buff,
968 eth_spec->dst.addr_bytes,
969 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
971 /* Goto the next item */
972 if (!ulp_rte_item_skip_void(&item, 1))
973 return BNXT_TF_RC_ERROR;
975 /* May have vlan header */
976 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
978 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
979 ulp_encap_buffer_copy(buff,
981 sizeof(struct rte_flow_item_vlan));
983 if (!ulp_rte_item_skip_void(&item, 1))
984 return BNXT_TF_RC_ERROR;
987 /* may have two vlan headers */
988 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
990 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
991 sizeof(struct rte_flow_item_vlan)],
993 sizeof(struct rte_flow_item_vlan));
994 if (!ulp_rte_item_skip_void(&item, 1))
995 return BNXT_TF_RC_ERROR;
997 /* Update the vlan count and size of more than one */
999 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1000 vlan_num = tfp_cpu_to_be_32(vlan_num);
1001 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1004 vlan_size = tfp_cpu_to_be_32(vlan_size);
1005 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1010 /* L3 must be IPv4, IPv6 */
1011 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1012 ipv4_spec = item->spec;
1013 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1015 /* copy the ipv4 details */
1016 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1017 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1018 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1019 ulp_encap_buffer_copy(buff,
1021 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1022 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1024 const uint8_t *tmp_buff;
1026 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1027 ulp_encap_buffer_copy(buff,
1028 &ipv4_spec->hdr.version_ihl,
1029 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1030 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1031 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1032 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1033 ulp_encap_buffer_copy(buff,
1035 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1037 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1038 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1039 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1040 ulp_encap_buffer_copy(buff,
1041 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1042 BNXT_ULP_ENCAP_IPV4_DEST_IP);
1044 /* Update the ip size details */
1045 ip_size = tfp_cpu_to_be_32(ip_size);
1046 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1047 &ip_size, sizeof(uint32_t));
1049 /* update the ip type */
1050 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1051 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1052 &ip_type, sizeof(uint32_t));
1054 if (!ulp_rte_item_skip_void(&item, 1))
1055 return BNXT_TF_RC_ERROR;
1056 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1057 ipv6_spec = item->spec;
1058 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1060 /* copy the ipv4 details */
1061 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1062 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1064 /* Update the ip size details */
1065 ip_size = tfp_cpu_to_be_32(ip_size);
1066 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1067 &ip_size, sizeof(uint32_t));
1069 /* update the ip type */
1070 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1071 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1072 &ip_type, sizeof(uint32_t));
1074 if (!ulp_rte_item_skip_void(&item, 1))
1075 return BNXT_TF_RC_ERROR;
1077 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1078 return BNXT_TF_RC_ERROR;
1082 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1083 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1084 return BNXT_TF_RC_ERROR;
1086 /* copy the udp details */
1087 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1088 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1090 if (!ulp_rte_item_skip_void(&item, 1))
1091 return BNXT_TF_RC_ERROR;
1094 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1095 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1096 return BNXT_TF_RC_ERROR;
1098 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1099 /* copy the vxlan details */
1100 memcpy(&vxlan_spec, item->spec, vxlan_size);
1101 vxlan_spec.flags = 0x08;
1102 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1103 (const uint8_t *)&vxlan_spec,
1105 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1106 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1107 &vxlan_size, sizeof(uint32_t));
1109 /*update the hdr_bitmap with vxlan */
1110 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1111 return BNXT_TF_RC_SUCCESS;
1114 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1116 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1118 struct ulp_rte_act_bitmap *act,
1119 struct ulp_rte_act_prop *act_prop __rte_unused)
1121 /* update the hdr_bitmap with vxlan */
1122 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1123 return BNXT_TF_RC_SUCCESS;
1126 /* Function to handle the parsing of RTE Flow action drop Header. */
1128 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1129 struct ulp_rte_act_bitmap *act,
1130 struct ulp_rte_act_prop *act_prop __rte_unused)
1132 /* Update the hdr_bitmap with drop */
1133 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
1134 return BNXT_TF_RC_SUCCESS;
1137 /* Function to handle the parsing of RTE Flow action count. */
1139 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1140 struct ulp_rte_act_bitmap *act,
1141 struct ulp_rte_act_prop *act_prop __rte_unused)
1144 const struct rte_flow_action_count *act_count;
1146 act_count = action_item->conf;
1148 if (act_count->shared) {
1150 "Parse Error:Shared count not supported\n");
1151 return BNXT_TF_RC_PARSE_ERR;
1153 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1155 BNXT_ULP_ACT_PROP_SZ_COUNT);
1158 /* Update the hdr_bitmap with count */
1159 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
1160 return BNXT_TF_RC_SUCCESS;
1163 /* Function to handle the parsing of RTE Flow action PF. */
1165 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1166 struct ulp_rte_act_bitmap *act,
1167 struct ulp_rte_act_prop *act_prop)
1170 uint8_t *vnic_buffer;
1173 /* Update the hdr_bitmap with vnic bit */
1174 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1176 /* copy the PF of the current device into VNIC Property */
1177 svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1178 ulp_util_field_int_read(svif_buf, &svif);
1179 svif = (uint32_t)bnxt_get_vnic_id(svif);
1181 vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1182 ulp_util_field_int_write(vnic_buffer, svif);
1184 return BNXT_TF_RC_SUCCESS;
1187 /* Function to handle the parsing of RTE Flow action VF. */
1189 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1190 struct ulp_rte_act_bitmap *act,
1191 struct ulp_rte_act_prop *act_prop)
1193 const struct rte_flow_action_vf *vf_action;
1195 vf_action = action_item->conf;
1197 if (vf_action->original) {
1199 "Parse Error:VF Original not supported\n");
1200 return BNXT_TF_RC_PARSE_ERR;
1202 /* TBD: Update the computed VNIC using VF conversion */
1203 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1205 BNXT_ULP_ACT_PROP_SZ_VNIC);
1208 /* Update the hdr_bitmap with count */
1209 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1210 return BNXT_TF_RC_SUCCESS;
1213 /* Function to handle the parsing of RTE Flow action port_id. */
1215 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1216 struct ulp_rte_act_bitmap *act,
1217 struct ulp_rte_act_prop *act_prop)
1219 const struct rte_flow_action_port_id *port_id;
1221 port_id = act_item->conf;
1223 if (port_id->original) {
1225 "ParseErr:Portid Original not supported\n");
1226 return BNXT_TF_RC_PARSE_ERR;
1228 /* TBD: Update the computed VNIC using port conversion */
1229 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1231 BNXT_ULP_ACT_PROP_SZ_VNIC);
1234 /* Update the hdr_bitmap with count */
1235 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1236 return BNXT_TF_RC_SUCCESS;
1239 /* Function to handle the parsing of RTE Flow action phy_port. */
1241 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1242 struct ulp_rte_act_bitmap *act,
1243 struct ulp_rte_act_prop *act_prop)
1245 const struct rte_flow_action_phy_port *phy_port;
1247 phy_port = action_item->conf;
1249 if (phy_port->original) {
1251 "Parse Err:Port Original not supported\n");
1252 return BNXT_TF_RC_PARSE_ERR;
1254 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1256 BNXT_ULP_ACT_PROP_SZ_VPORT);
1259 /* Update the hdr_bitmap with count */
1260 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
1261 return BNXT_TF_RC_SUCCESS;