1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
20 memcpy(&temp_val, buffer, sizeof(uint32_t));
21 *val = rte_be_to_cpu_32(temp_val);
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
28 uint32_t temp_val = rte_cpu_to_be_32(val);
30 memcpy(buffer, &temp_val, sizeof(uint32_t));
33 /* Utility function to skip the void items. */
35 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
41 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
49 * Function to handle the parsing of RTE Flows and placing
50 * the RTE flow items into the ulp structures.
53 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
54 struct ulp_rte_hdr_bitmap *hdr_bitmap,
55 struct ulp_rte_hdr_field *hdr_field)
57 const struct rte_flow_item *item = pattern;
58 uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
59 uint32_t vlan_idx = 0;
60 struct bnxt_ulp_rte_hdr_info *hdr_info;
62 /* Parse all the items in the pattern */
63 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
64 /* get the header information from the flow_hdr_info table */
65 hdr_info = &ulp_hdr_info[item->type];
66 if (hdr_info->hdr_type ==
67 BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
69 "Truflow parser does not support type %d\n",
71 return BNXT_TF_RC_PARSE_ERR;
72 } else if (hdr_info->hdr_type ==
73 BNXT_ULP_HDR_TYPE_SUPPORTED) {
74 /* call the registered callback handler */
75 if (hdr_info->proto_hdr_func) {
76 if (hdr_info->proto_hdr_func(item,
82 return BNXT_TF_RC_ERROR;
88 return BNXT_TF_RC_SUCCESS;
92 * Function to handle the parsing of RTE Flows and placing
93 * the RTE flow actions into the ulp structures.
96 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
97 struct ulp_rte_act_bitmap *act_bitmap,
98 struct ulp_rte_act_prop *act_prop)
100 const struct rte_flow_action *action_item = actions;
101 struct bnxt_ulp_rte_act_info *hdr_info;
103 /* Parse all the items in the pattern */
104 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
105 /* get the header information from the flow_hdr_info table */
106 hdr_info = &ulp_act_info[action_item->type];
107 if (hdr_info->act_type ==
108 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
110 "Truflow parser does not support act %u\n",
112 return BNXT_TF_RC_ERROR;
113 } else if (hdr_info->act_type ==
114 BNXT_ULP_ACT_TYPE_SUPPORTED) {
115 /* call the registered callback handler */
116 if (hdr_info->proto_act_func) {
117 if (hdr_info->proto_act_func(action_item,
120 BNXT_TF_RC_SUCCESS) {
121 return BNXT_TF_RC_ERROR;
127 return BNXT_TF_RC_SUCCESS;
130 /* Function to handle the parsing of RTE Flow item PF Header. */
132 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
133 struct ulp_rte_hdr_field *hdr_field,
134 enum rte_flow_item_type proto,
138 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
141 " multiple sources not supported\n");
142 return BNXT_TF_RC_ERROR;
145 /* TBD: Check for any mapping errors for svif */
146 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
147 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
149 if (proto != RTE_FLOW_ITEM_TYPE_PF) {
150 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
151 &svif, sizeof(svif));
152 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
153 &mask, sizeof(mask));
154 hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
157 return BNXT_TF_RC_SUCCESS;
160 /* Function to handle the parsing of RTE Flow item PF Header. */
162 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
163 struct ulp_rte_hdr_bitmap *hdr_bitmap,
164 struct ulp_rte_hdr_field *hdr_field,
165 uint32_t *field_idx __rte_unused,
166 uint32_t *vlan_idx __rte_unused)
168 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
172 /* Function to handle the parsing of RTE Flow item VF Header. */
174 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
175 struct ulp_rte_hdr_bitmap *hdr_bitmap,
176 struct ulp_rte_hdr_field *hdr_field,
177 uint32_t *field_idx __rte_unused,
178 uint32_t *vlan_idx __rte_unused)
180 const struct rte_flow_item_vf *vf_spec, *vf_mask;
181 uint32_t svif = 0, mask = 0;
183 vf_spec = item->spec;
184 vf_mask = item->mask;
187 * Copy the rte_flow_item for eth into hdr_field using ethernet
195 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
196 item->type, svif, mask);
199 /* Function to handle the parsing of RTE Flow item port id Header. */
201 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
202 struct ulp_rte_hdr_bitmap *hdr_bitmap,
203 struct ulp_rte_hdr_field *hdr_field,
204 uint32_t *field_idx __rte_unused,
205 uint32_t *vlan_idx __rte_unused)
207 const struct rte_flow_item_port_id *port_spec, *port_mask;
208 uint32_t svif = 0, mask = 0;
210 port_spec = item->spec;
211 port_mask = item->mask;
214 * Copy the rte_flow_item for Port into hdr_field using port id
218 svif = port_spec->id;
220 mask = port_mask->id;
222 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
223 item->type, svif, mask);
226 /* Function to handle the parsing of RTE Flow item phy port Header. */
228 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
229 struct ulp_rte_hdr_bitmap *hdr_bitmap,
230 struct ulp_rte_hdr_field *hdr_field,
231 uint32_t *field_idx __rte_unused,
232 uint32_t *vlan_idx __rte_unused)
234 const struct rte_flow_item_phy_port *port_spec, *port_mask;
235 uint32_t svif = 0, mask = 0;
237 port_spec = item->spec;
238 port_mask = item->mask;
240 /* Copy the rte_flow_item for phy port into hdr_field */
242 svif = port_spec->index;
244 mask = port_mask->index;
246 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
247 item->type, svif, mask);
250 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
252 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
253 struct ulp_rte_hdr_bitmap *hdr_bitmap,
254 struct ulp_rte_hdr_field *hdr_field,
258 const struct rte_flow_item_eth *eth_spec, *eth_mask;
259 uint32_t idx = *field_idx;
260 uint32_t mdx = *field_idx;
261 uint64_t set_flag = 0;
263 eth_spec = item->spec;
264 eth_mask = item->mask;
267 * Copy the rte_flow_item for eth into hdr_field using ethernet
271 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
272 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
273 sizeof(eth_spec->dst.addr_bytes));
274 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
275 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
276 sizeof(eth_spec->src.addr_bytes));
277 hdr_field[idx].size = sizeof(eth_spec->type);
278 memcpy(hdr_field[idx++].spec, ð_spec->type,
279 sizeof(eth_spec->type));
281 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
285 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
286 sizeof(eth_mask->dst.addr_bytes));
287 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
288 sizeof(eth_mask->src.addr_bytes));
289 memcpy(hdr_field[mdx++].mask, ð_mask->type,
290 sizeof(eth_mask->type));
292 /* Add number of vlan header elements */
293 *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
296 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
297 set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
299 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
301 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
303 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
304 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
306 return BNXT_TF_RC_SUCCESS;
309 /* Function to handle the parsing of RTE Flow item Vlan Header. */
311 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
312 struct ulp_rte_hdr_bitmap *hdr_bitmap,
313 struct ulp_rte_hdr_field *hdr_field,
314 uint32_t *field_idx __rte_unused,
317 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
318 uint32_t idx = *vlan_idx;
319 uint32_t mdx = *vlan_idx;
320 uint16_t vlan_tag, priority;
321 uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
322 uint8_t *outer_tag_buffer;
323 uint8_t *inner_tag_buffer;
325 vlan_spec = item->spec;
326 vlan_mask = item->mask;
327 outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
328 inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
331 * Copy the rte_flow_item for vlan into hdr_field using Vlan
335 vlan_tag = ntohs(vlan_spec->tci);
336 priority = htons(vlan_tag >> 13);
338 vlan_tag = htons(vlan_tag);
340 hdr_field[idx].size = sizeof(priority);
341 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
342 hdr_field[idx].size = sizeof(vlan_tag);
343 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
344 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
345 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
346 sizeof(vlan_spec->inner_type));
348 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
352 vlan_tag = ntohs(vlan_mask->tci);
353 priority = htons(vlan_tag >> 13);
355 vlan_tag = htons(vlan_tag);
357 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
358 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
359 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
360 sizeof(vlan_mask->inner_type));
362 /* Set the vlan index to new incremented value */
365 /* Get the outer tag and inner tag counts */
366 ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
367 ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
369 /* Update the hdr_bitmap of the vlans */
370 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
371 !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
372 /* Set the outer vlan bit and update the vlan tag num */
373 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
375 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
376 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
378 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
379 ULP_BITMAP_ISSET(hdr_bitmap->bits,
380 BNXT_ULP_HDR_BIT_OO_VLAN) &&
381 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
382 BNXT_ULP_HDR_BIT_OI_VLAN)) {
383 /* Set the outer vlan bit and update the vlan tag num */
384 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
386 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
387 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
389 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
390 BNXT_ULP_HDR_BIT_O_ETH) &&
391 ULP_BITMAP_ISSET(hdr_bitmap->bits,
392 BNXT_ULP_HDR_BIT_OO_VLAN) &&
393 ULP_BITMAP_ISSET(hdr_bitmap->bits,
394 BNXT_ULP_HDR_BIT_OI_VLAN) &&
395 ULP_BITMAP_ISSET(hdr_bitmap->bits,
396 BNXT_ULP_HDR_BIT_I_ETH) &&
397 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
398 BNXT_ULP_HDR_BIT_IO_VLAN)) {
399 /* Set the inner vlan bit and update the vlan tag num */
400 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
402 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
403 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
405 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
406 BNXT_ULP_HDR_BIT_O_ETH) &&
407 ULP_BITMAP_ISSET(hdr_bitmap->bits,
408 BNXT_ULP_HDR_BIT_OO_VLAN) &&
409 ULP_BITMAP_ISSET(hdr_bitmap->bits,
410 BNXT_ULP_HDR_BIT_OI_VLAN) &&
411 ULP_BITMAP_ISSET(hdr_bitmap->bits,
412 BNXT_ULP_HDR_BIT_I_ETH) &&
413 ULP_BITMAP_ISSET(hdr_bitmap->bits,
414 BNXT_ULP_HDR_BIT_IO_VLAN) &&
415 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
416 BNXT_ULP_HDR_BIT_II_VLAN)) {
417 /* Set the inner vlan bit and update the vlan tag num */
418 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
420 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
421 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
424 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
425 return BNXT_TF_RC_ERROR;
427 return BNXT_TF_RC_SUCCESS;
430 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
432 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
433 struct ulp_rte_hdr_bitmap *hdr_bitmap,
434 struct ulp_rte_hdr_field *hdr_field,
436 uint32_t *vlan_idx __rte_unused)
438 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
439 uint32_t idx = *field_idx;
440 uint32_t mdx = *field_idx;
442 ipv4_spec = item->spec;
443 ipv4_mask = item->mask;
445 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
446 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
447 return BNXT_TF_RC_ERROR;
451 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
455 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
456 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
457 sizeof(ipv4_spec->hdr.version_ihl));
458 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
459 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
460 sizeof(ipv4_spec->hdr.type_of_service));
461 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
462 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
463 sizeof(ipv4_spec->hdr.total_length));
464 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
465 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
466 sizeof(ipv4_spec->hdr.packet_id));
467 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
468 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
469 sizeof(ipv4_spec->hdr.fragment_offset));
470 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
471 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
472 sizeof(ipv4_spec->hdr.time_to_live));
473 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
474 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
475 sizeof(ipv4_spec->hdr.next_proto_id));
476 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
477 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
478 sizeof(ipv4_spec->hdr.hdr_checksum));
479 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
480 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
481 sizeof(ipv4_spec->hdr.src_addr));
482 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
483 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
484 sizeof(ipv4_spec->hdr.dst_addr));
486 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
490 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
491 sizeof(ipv4_mask->hdr.version_ihl));
492 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
493 sizeof(ipv4_mask->hdr.type_of_service));
494 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
495 sizeof(ipv4_mask->hdr.total_length));
496 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
497 sizeof(ipv4_mask->hdr.packet_id));
498 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
499 sizeof(ipv4_mask->hdr.fragment_offset));
500 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
501 sizeof(ipv4_mask->hdr.time_to_live));
502 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
503 sizeof(ipv4_mask->hdr.next_proto_id));
504 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
505 sizeof(ipv4_mask->hdr.hdr_checksum));
506 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
507 sizeof(ipv4_mask->hdr.src_addr));
508 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
509 sizeof(ipv4_mask->hdr.dst_addr));
511 *field_idx = idx; /* Number of ipv4 header elements */
513 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
514 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
515 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
516 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
517 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
518 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
520 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
521 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
523 return BNXT_TF_RC_SUCCESS;
526 /* Function to handle the parsing of RTE Flow item IPV6 Header */
528 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
529 struct ulp_rte_hdr_bitmap *hdr_bitmap,
530 struct ulp_rte_hdr_field *hdr_field,
532 uint32_t *vlan_idx __rte_unused)
534 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
535 uint32_t idx = *field_idx;
536 uint32_t mdx = *field_idx;
538 ipv6_spec = item->spec;
539 ipv6_mask = item->mask;
541 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
542 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
543 return BNXT_TF_RC_ERROR;
547 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
551 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
552 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
553 sizeof(ipv6_spec->hdr.vtc_flow));
554 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
555 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
556 sizeof(ipv6_spec->hdr.payload_len));
557 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
558 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
559 sizeof(ipv6_spec->hdr.proto));
560 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
561 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
562 sizeof(ipv6_spec->hdr.hop_limits));
563 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
564 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
565 sizeof(ipv6_spec->hdr.src_addr));
566 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
567 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
568 sizeof(ipv6_spec->hdr.dst_addr));
570 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
574 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
575 sizeof(ipv6_mask->hdr.vtc_flow));
576 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
577 sizeof(ipv6_mask->hdr.payload_len));
578 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
579 sizeof(ipv6_mask->hdr.proto));
580 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
581 sizeof(ipv6_mask->hdr.hop_limits));
582 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
583 sizeof(ipv6_mask->hdr.src_addr));
584 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
585 sizeof(ipv6_mask->hdr.dst_addr));
587 *field_idx = idx; /* add number of ipv6 header elements */
589 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
590 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
591 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
592 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
593 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
594 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
596 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
597 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
599 return BNXT_TF_RC_SUCCESS;
602 /* Function to handle the parsing of RTE Flow item UDP Header. */
604 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
605 struct ulp_rte_hdr_bitmap *hdr_bitmap,
606 struct ulp_rte_hdr_field *hdr_field,
608 uint32_t *vlan_idx __rte_unused)
610 const struct rte_flow_item_udp *udp_spec, *udp_mask;
611 uint32_t idx = *field_idx;
612 uint32_t mdx = *field_idx;
614 udp_spec = item->spec;
615 udp_mask = item->mask;
617 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
618 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
619 return BNXT_TF_RC_ERROR;
623 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
627 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
628 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
629 sizeof(udp_spec->hdr.src_port));
630 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
631 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
632 sizeof(udp_spec->hdr.dst_port));
633 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
634 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
635 sizeof(udp_spec->hdr.dgram_len));
636 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
637 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
638 sizeof(udp_spec->hdr.dgram_cksum));
640 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
644 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
645 sizeof(udp_mask->hdr.src_port));
646 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
647 sizeof(udp_mask->hdr.dst_port));
648 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
649 sizeof(udp_mask->hdr.dgram_len));
650 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
651 sizeof(udp_mask->hdr.dgram_cksum));
653 *field_idx = idx; /* Add number of UDP header elements */
655 /* Set the udp header bitmap and computed l4 header bitmaps */
656 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
657 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
658 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
659 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
660 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
662 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
663 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
665 return BNXT_TF_RC_SUCCESS;
668 /* Function to handle the parsing of RTE Flow item TCP Header. */
670 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
671 struct ulp_rte_hdr_bitmap *hdr_bitmap,
672 struct ulp_rte_hdr_field *hdr_field,
674 uint32_t *vlan_idx __rte_unused)
676 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
677 uint32_t idx = *field_idx;
678 uint32_t mdx = *field_idx;
680 tcp_spec = item->spec;
681 tcp_mask = item->mask;
683 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
684 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
685 return BNXT_TF_RC_ERROR;
689 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
693 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
694 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
695 sizeof(tcp_spec->hdr.src_port));
696 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
697 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
698 sizeof(tcp_spec->hdr.dst_port));
699 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
700 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
701 sizeof(tcp_spec->hdr.sent_seq));
702 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
703 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
704 sizeof(tcp_spec->hdr.recv_ack));
705 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
706 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
707 sizeof(tcp_spec->hdr.data_off));
708 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
709 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
710 sizeof(tcp_spec->hdr.tcp_flags));
711 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
712 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
713 sizeof(tcp_spec->hdr.rx_win));
714 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
715 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
716 sizeof(tcp_spec->hdr.cksum));
717 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
718 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
719 sizeof(tcp_spec->hdr.tcp_urp));
721 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
725 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
726 sizeof(tcp_mask->hdr.src_port));
727 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
728 sizeof(tcp_mask->hdr.dst_port));
729 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
730 sizeof(tcp_mask->hdr.sent_seq));
731 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
732 sizeof(tcp_mask->hdr.recv_ack));
733 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
734 sizeof(tcp_mask->hdr.data_off));
735 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
736 sizeof(tcp_mask->hdr.tcp_flags));
737 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
738 sizeof(tcp_mask->hdr.rx_win));
739 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
740 sizeof(tcp_mask->hdr.cksum));
741 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
742 sizeof(tcp_mask->hdr.tcp_urp));
744 *field_idx = idx; /* add number of TCP header elements */
746 /* Set the udp header bitmap and computed l4 header bitmaps */
747 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
748 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
749 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
750 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
751 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
753 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
754 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
756 return BNXT_TF_RC_SUCCESS;
759 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
761 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
762 struct ulp_rte_hdr_bitmap *hdrbitmap,
763 struct ulp_rte_hdr_field *hdr_field,
765 uint32_t *vlan_idx __rte_unused)
767 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
768 uint32_t idx = *field_idx;
769 uint32_t mdx = *field_idx;
771 vxlan_spec = item->spec;
772 vxlan_mask = item->mask;
775 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
779 hdr_field[idx].size = sizeof(vxlan_spec->flags);
780 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
781 sizeof(vxlan_spec->flags));
782 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
783 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
784 sizeof(vxlan_spec->rsvd0));
785 hdr_field[idx].size = sizeof(vxlan_spec->vni);
786 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
787 sizeof(vxlan_spec->vni));
788 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
789 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
790 sizeof(vxlan_spec->rsvd1));
792 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
796 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
797 sizeof(vxlan_mask->flags));
798 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
799 sizeof(vxlan_mask->rsvd0));
800 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
801 sizeof(vxlan_mask->vni));
802 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
803 sizeof(vxlan_mask->rsvd1));
805 *field_idx = idx; /* Add number of vxlan header elements */
807 /* Update the hdr_bitmap with vxlan */
808 ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
809 return BNXT_TF_RC_SUCCESS;
812 /* Function to handle the parsing of RTE Flow item void Header */
814 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
815 struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
816 struct ulp_rte_hdr_field *hdr_field __rte_unused,
817 uint32_t *field_idx __rte_unused,
818 uint32_t *vlan_idx __rte_unused)
820 return BNXT_TF_RC_SUCCESS;
823 /* Function to handle the parsing of RTE Flow action void Header. */
825 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
826 struct ulp_rte_act_bitmap *act __rte_unused,
827 struct ulp_rte_act_prop *act_prop __rte_unused)
829 return BNXT_TF_RC_SUCCESS;
832 /* Function to handle the parsing of RTE Flow action Mark Header. */
834 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
835 struct ulp_rte_act_bitmap *act,
836 struct ulp_rte_act_prop *act_prop)
838 const struct rte_flow_action_mark *mark;
839 uint32_t mark_id = 0;
841 mark = action_item->conf;
843 mark_id = tfp_cpu_to_be_32(mark->id);
844 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
845 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
847 /* Update the hdr_bitmap with vxlan */
848 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
849 return BNXT_TF_RC_SUCCESS;
851 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
852 return BNXT_TF_RC_ERROR;
855 /* Function to handle the parsing of RTE Flow action RSS Header. */
857 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
858 struct ulp_rte_act_bitmap *act,
859 struct ulp_rte_act_prop *act_prop __rte_unused)
861 const struct rte_flow_action_rss *rss;
863 rss = action_item->conf;
865 /* Update the hdr_bitmap with vxlan */
866 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
867 return BNXT_TF_RC_SUCCESS;
869 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
870 return BNXT_TF_RC_ERROR;
873 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
875 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
876 struct ulp_rte_act_bitmap *act,
877 struct ulp_rte_act_prop *ap)
879 const struct rte_flow_action_vxlan_encap *vxlan_encap;
880 const struct rte_flow_item *item;
881 const struct rte_flow_item_eth *eth_spec;
882 const struct rte_flow_item_ipv4 *ipv4_spec;
883 const struct rte_flow_item_ipv6 *ipv6_spec;
884 struct rte_flow_item_vxlan vxlan_spec;
885 uint32_t vlan_num = 0, vlan_size = 0;
886 uint32_t ip_size = 0, ip_type = 0;
887 uint32_t vxlan_size = 0;
889 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
890 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
893 vxlan_encap = action_item->conf;
895 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
896 return BNXT_TF_RC_ERROR;
899 item = vxlan_encap->definition;
901 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
902 return BNXT_TF_RC_ERROR;
905 if (!ulp_rte_item_skip_void(&item, 0))
906 return BNXT_TF_RC_ERROR;
908 /* must have ethernet header */
909 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
910 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
911 return BNXT_TF_RC_ERROR;
913 eth_spec = item->spec;
914 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
915 ulp_encap_buffer_copy(buff,
916 eth_spec->dst.addr_bytes,
917 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
919 /* Goto the next item */
920 if (!ulp_rte_item_skip_void(&item, 1))
921 return BNXT_TF_RC_ERROR;
923 /* May have vlan header */
924 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
926 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
927 ulp_encap_buffer_copy(buff,
929 sizeof(struct rte_flow_item_vlan));
931 if (!ulp_rte_item_skip_void(&item, 1))
932 return BNXT_TF_RC_ERROR;
935 /* may have two vlan headers */
936 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
938 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
939 sizeof(struct rte_flow_item_vlan)],
941 sizeof(struct rte_flow_item_vlan));
942 if (!ulp_rte_item_skip_void(&item, 1))
943 return BNXT_TF_RC_ERROR;
945 /* Update the vlan count and size of more than one */
947 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
948 vlan_num = tfp_cpu_to_be_32(vlan_num);
949 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
952 vlan_size = tfp_cpu_to_be_32(vlan_size);
953 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
958 /* L3 must be IPv4, IPv6 */
959 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
960 ipv4_spec = item->spec;
961 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
963 /* copy the ipv4 details */
964 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
965 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
966 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
967 ulp_encap_buffer_copy(buff,
969 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
970 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
972 const uint8_t *tmp_buff;
974 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
975 ulp_encap_buffer_copy(buff,
976 &ipv4_spec->hdr.version_ihl,
977 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
978 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
979 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
980 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
981 ulp_encap_buffer_copy(buff,
983 BNXT_ULP_ENCAP_IPV4_ID_PROTO);
985 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
986 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
987 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
988 ulp_encap_buffer_copy(buff,
989 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
990 BNXT_ULP_ENCAP_IPV4_DEST_IP);
992 /* Update the ip size details */
993 ip_size = tfp_cpu_to_be_32(ip_size);
994 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
995 &ip_size, sizeof(uint32_t));
997 /* update the ip type */
998 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
999 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1000 &ip_type, sizeof(uint32_t));
1002 if (!ulp_rte_item_skip_void(&item, 1))
1003 return BNXT_TF_RC_ERROR;
1004 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1005 ipv6_spec = item->spec;
1006 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1008 /* copy the ipv4 details */
1009 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1010 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1012 /* Update the ip size details */
1013 ip_size = tfp_cpu_to_be_32(ip_size);
1014 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1015 &ip_size, sizeof(uint32_t));
1017 /* update the ip type */
1018 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1019 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1020 &ip_type, sizeof(uint32_t));
1022 if (!ulp_rte_item_skip_void(&item, 1))
1023 return BNXT_TF_RC_ERROR;
1025 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1026 return BNXT_TF_RC_ERROR;
1030 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1031 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1032 return BNXT_TF_RC_ERROR;
1034 /* copy the udp details */
1035 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1036 item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1038 if (!ulp_rte_item_skip_void(&item, 1))
1039 return BNXT_TF_RC_ERROR;
1042 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1043 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1044 return BNXT_TF_RC_ERROR;
1046 vxlan_size = sizeof(struct rte_flow_item_vxlan);
1047 /* copy the vxlan details */
1048 memcpy(&vxlan_spec, item->spec, vxlan_size);
1049 vxlan_spec.flags = 0x08;
1050 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1051 (const uint8_t *)&vxlan_spec,
1053 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1054 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1055 &vxlan_size, sizeof(uint32_t));
1057 /*update the hdr_bitmap with vxlan */
1058 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1059 return BNXT_TF_RC_SUCCESS;
1062 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1064 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1066 struct ulp_rte_act_bitmap *act,
1067 struct ulp_rte_act_prop *act_prop __rte_unused)
1069 /* update the hdr_bitmap with vxlan */
1070 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1071 return BNXT_TF_RC_SUCCESS;
1074 /* Function to handle the parsing of RTE Flow action drop Header. */
1076 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1077 struct ulp_rte_act_bitmap *act,
1078 struct ulp_rte_act_prop *act_prop __rte_unused)
1080 /* Update the hdr_bitmap with drop */
1081 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
1082 return BNXT_TF_RC_SUCCESS;
1085 /* Function to handle the parsing of RTE Flow action count. */
1087 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1088 struct ulp_rte_act_bitmap *act,
1089 struct ulp_rte_act_prop *act_prop __rte_unused)
1092 const struct rte_flow_action_count *act_count;
1094 act_count = action_item->conf;
1096 if (act_count->shared) {
1098 "Parse Error:Shared count not supported\n");
1099 return BNXT_TF_RC_PARSE_ERR;
1101 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1103 BNXT_ULP_ACT_PROP_SZ_COUNT);
1106 /* Update the hdr_bitmap with count */
1107 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
1108 return BNXT_TF_RC_SUCCESS;
1111 /* Function to handle the parsing of RTE Flow action PF. */
1113 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1114 struct ulp_rte_act_bitmap *act,
1115 struct ulp_rte_act_prop *act_prop)
1118 uint8_t *vnic_buffer;
1121 /* Update the hdr_bitmap with vnic bit */
1122 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1124 /* copy the PF of the current device into VNIC Property */
1125 svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1126 ulp_util_field_int_read(svif_buf, &svif);
1127 vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1128 ulp_util_field_int_write(vnic_buffer, svif);
1130 return BNXT_TF_RC_SUCCESS;
1133 /* Function to handle the parsing of RTE Flow action VF. */
1135 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1136 struct ulp_rte_act_bitmap *act,
1137 struct ulp_rte_act_prop *act_prop)
1139 const struct rte_flow_action_vf *vf_action;
1141 vf_action = action_item->conf;
1143 if (vf_action->original) {
1145 "Parse Error:VF Original not supported\n");
1146 return BNXT_TF_RC_PARSE_ERR;
1148 /* TBD: Update the computed VNIC using VF conversion */
1149 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1151 BNXT_ULP_ACT_PROP_SZ_VNIC);
1154 /* Update the hdr_bitmap with count */
1155 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1156 return BNXT_TF_RC_SUCCESS;
1159 /* Function to handle the parsing of RTE Flow action port_id. */
1161 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1162 struct ulp_rte_act_bitmap *act,
1163 struct ulp_rte_act_prop *act_prop)
1165 const struct rte_flow_action_port_id *port_id;
1167 port_id = act_item->conf;
1169 if (port_id->original) {
1171 "ParseErr:Portid Original not supported\n");
1172 return BNXT_TF_RC_PARSE_ERR;
1174 /* TBD: Update the computed VNIC using port conversion */
1175 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1177 BNXT_ULP_ACT_PROP_SZ_VNIC);
1180 /* Update the hdr_bitmap with count */
1181 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1182 return BNXT_TF_RC_SUCCESS;
1185 /* Function to handle the parsing of RTE Flow action phy_port. */
1187 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1188 struct ulp_rte_act_bitmap *act,
1189 struct ulp_rte_act_prop *act_prop)
1191 const struct rte_flow_action_phy_port *phy_port;
1193 phy_port = action_item->conf;
1195 if (phy_port->original) {
1197 "Parse Err:Port Original not supported\n");
1198 return BNXT_TF_RC_PARSE_ERR;
1200 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1202 BNXT_ULP_ACT_PROP_SZ_VPORT);
1205 /* Update the hdr_bitmap with count */
1206 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
1207 return BNXT_TF_RC_SUCCESS;