1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
20 memcpy(&temp_val, buffer, sizeof(uint32_t));
21 *val = rte_be_to_cpu_32(temp_val);
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
28 uint32_t temp_val = rte_cpu_to_be_32(val);
30 memcpy(buffer, &temp_val, sizeof(uint32_t));
34 * Function to handle the parsing of RTE Flows and placing
35 * the RTE flow items into the ulp structures.
38 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
39 struct ulp_rte_hdr_bitmap *hdr_bitmap,
40 struct ulp_rte_hdr_field *hdr_field)
42 const struct rte_flow_item *item = pattern;
43 uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
44 uint32_t vlan_idx = 0;
45 struct bnxt_ulp_rte_hdr_info *hdr_info;
47 /* Parse all the items in the pattern */
48 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
49 /* get the header information from the flow_hdr_info table */
50 hdr_info = &ulp_hdr_info[item->type];
51 if (hdr_info->hdr_type ==
52 BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
54 "Truflow parser does not support type %d\n",
56 return BNXT_TF_RC_PARSE_ERR;
57 } else if (hdr_info->hdr_type ==
58 BNXT_ULP_HDR_TYPE_SUPPORTED) {
59 /* call the registered callback handler */
60 if (hdr_info->proto_hdr_func) {
61 if (hdr_info->proto_hdr_func(item,
67 return BNXT_TF_RC_ERROR;
73 return BNXT_TF_RC_SUCCESS;
76 /* Function to handle the parsing of RTE Flow item PF Header. */
78 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
79 struct ulp_rte_hdr_field *hdr_field,
80 enum rte_flow_item_type proto,
84 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
87 " multiple sources not supported\n");
88 return BNXT_TF_RC_ERROR;
91 /* TBD: Check for any mapping errors for svif */
92 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
93 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
95 if (proto != RTE_FLOW_ITEM_TYPE_PF) {
96 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
98 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
100 hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
103 return BNXT_TF_RC_SUCCESS;
106 /* Function to handle the parsing of RTE Flow item PF Header. */
108 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
109 struct ulp_rte_hdr_bitmap *hdr_bitmap,
110 struct ulp_rte_hdr_field *hdr_field,
111 uint32_t *field_idx __rte_unused,
112 uint32_t *vlan_idx __rte_unused)
114 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
118 /* Function to handle the parsing of RTE Flow item VF Header. */
120 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
121 struct ulp_rte_hdr_bitmap *hdr_bitmap,
122 struct ulp_rte_hdr_field *hdr_field,
123 uint32_t *field_idx __rte_unused,
124 uint32_t *vlan_idx __rte_unused)
126 const struct rte_flow_item_vf *vf_spec, *vf_mask;
127 uint32_t svif = 0, mask = 0;
129 vf_spec = item->spec;
130 vf_mask = item->mask;
133 * Copy the rte_flow_item for eth into hdr_field using ethernet
141 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
142 item->type, svif, mask);
145 /* Function to handle the parsing of RTE Flow item port id Header. */
147 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
148 struct ulp_rte_hdr_bitmap *hdr_bitmap,
149 struct ulp_rte_hdr_field *hdr_field,
150 uint32_t *field_idx __rte_unused,
151 uint32_t *vlan_idx __rte_unused)
153 const struct rte_flow_item_port_id *port_spec, *port_mask;
154 uint32_t svif = 0, mask = 0;
156 port_spec = item->spec;
157 port_mask = item->mask;
160 * Copy the rte_flow_item for Port into hdr_field using port id
164 svif = port_spec->id;
166 mask = port_mask->id;
168 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
169 item->type, svif, mask);
172 /* Function to handle the parsing of RTE Flow item phy port Header. */
174 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
175 struct ulp_rte_hdr_bitmap *hdr_bitmap,
176 struct ulp_rte_hdr_field *hdr_field,
177 uint32_t *field_idx __rte_unused,
178 uint32_t *vlan_idx __rte_unused)
180 const struct rte_flow_item_phy_port *port_spec, *port_mask;
181 uint32_t svif = 0, mask = 0;
183 port_spec = item->spec;
184 port_mask = item->mask;
186 /* Copy the rte_flow_item for phy port into hdr_field */
188 svif = port_spec->index;
190 mask = port_mask->index;
192 return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
193 item->type, svif, mask);
196 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
198 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
199 struct ulp_rte_hdr_bitmap *hdr_bitmap,
200 struct ulp_rte_hdr_field *hdr_field,
204 const struct rte_flow_item_eth *eth_spec, *eth_mask;
205 uint32_t idx = *field_idx;
206 uint32_t mdx = *field_idx;
207 uint64_t set_flag = 0;
209 eth_spec = item->spec;
210 eth_mask = item->mask;
213 * Copy the rte_flow_item for eth into hdr_field using ethernet
217 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
218 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
219 sizeof(eth_spec->dst.addr_bytes));
220 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
221 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
222 sizeof(eth_spec->src.addr_bytes));
223 hdr_field[idx].size = sizeof(eth_spec->type);
224 memcpy(hdr_field[idx++].spec, ð_spec->type,
225 sizeof(eth_spec->type));
227 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
231 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
232 sizeof(eth_mask->dst.addr_bytes));
233 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
234 sizeof(eth_mask->src.addr_bytes));
235 memcpy(hdr_field[mdx++].mask, ð_mask->type,
236 sizeof(eth_mask->type));
238 /* Add number of vlan header elements */
239 *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
242 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
243 set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
245 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
247 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
249 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
250 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
252 return BNXT_TF_RC_SUCCESS;
255 /* Function to handle the parsing of RTE Flow item Vlan Header. */
257 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
258 struct ulp_rte_hdr_bitmap *hdr_bitmap,
259 struct ulp_rte_hdr_field *hdr_field,
260 uint32_t *field_idx __rte_unused,
263 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
264 uint32_t idx = *vlan_idx;
265 uint32_t mdx = *vlan_idx;
266 uint16_t vlan_tag, priority;
267 uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
268 uint8_t *outer_tag_buffer;
269 uint8_t *inner_tag_buffer;
271 vlan_spec = item->spec;
272 vlan_mask = item->mask;
273 outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
274 inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
277 * Copy the rte_flow_item for vlan into hdr_field using Vlan
281 vlan_tag = ntohs(vlan_spec->tci);
282 priority = htons(vlan_tag >> 13);
284 vlan_tag = htons(vlan_tag);
286 hdr_field[idx].size = sizeof(priority);
287 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
288 hdr_field[idx].size = sizeof(vlan_tag);
289 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
290 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
291 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
292 sizeof(vlan_spec->inner_type));
294 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
298 vlan_tag = ntohs(vlan_mask->tci);
299 priority = htons(vlan_tag >> 13);
301 vlan_tag = htons(vlan_tag);
303 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
304 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
305 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
306 sizeof(vlan_mask->inner_type));
308 /* Set the vlan index to new incremented value */
311 /* Get the outer tag and inner tag counts */
312 ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
313 ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
315 /* Update the hdr_bitmap of the vlans */
316 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
317 !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
318 /* Set the outer vlan bit and update the vlan tag num */
319 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
321 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
322 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
324 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
325 ULP_BITMAP_ISSET(hdr_bitmap->bits,
326 BNXT_ULP_HDR_BIT_OO_VLAN) &&
327 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
328 BNXT_ULP_HDR_BIT_OI_VLAN)) {
329 /* Set the outer vlan bit and update the vlan tag num */
330 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
332 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
333 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
335 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
336 BNXT_ULP_HDR_BIT_O_ETH) &&
337 ULP_BITMAP_ISSET(hdr_bitmap->bits,
338 BNXT_ULP_HDR_BIT_OO_VLAN) &&
339 ULP_BITMAP_ISSET(hdr_bitmap->bits,
340 BNXT_ULP_HDR_BIT_OI_VLAN) &&
341 ULP_BITMAP_ISSET(hdr_bitmap->bits,
342 BNXT_ULP_HDR_BIT_I_ETH) &&
343 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
344 BNXT_ULP_HDR_BIT_IO_VLAN)) {
345 /* Set the inner vlan bit and update the vlan tag num */
346 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
348 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
349 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
351 } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
352 BNXT_ULP_HDR_BIT_O_ETH) &&
353 ULP_BITMAP_ISSET(hdr_bitmap->bits,
354 BNXT_ULP_HDR_BIT_OO_VLAN) &&
355 ULP_BITMAP_ISSET(hdr_bitmap->bits,
356 BNXT_ULP_HDR_BIT_OI_VLAN) &&
357 ULP_BITMAP_ISSET(hdr_bitmap->bits,
358 BNXT_ULP_HDR_BIT_I_ETH) &&
359 ULP_BITMAP_ISSET(hdr_bitmap->bits,
360 BNXT_ULP_HDR_BIT_IO_VLAN) &&
361 !ULP_BITMAP_ISSET(hdr_bitmap->bits,
362 BNXT_ULP_HDR_BIT_II_VLAN)) {
363 /* Set the inner vlan bit and update the vlan tag num */
364 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
366 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
367 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
370 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
371 return BNXT_TF_RC_ERROR;
373 return BNXT_TF_RC_SUCCESS;
376 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
378 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
379 struct ulp_rte_hdr_bitmap *hdr_bitmap,
380 struct ulp_rte_hdr_field *hdr_field,
382 uint32_t *vlan_idx __rte_unused)
384 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
385 uint32_t idx = *field_idx;
386 uint32_t mdx = *field_idx;
388 ipv4_spec = item->spec;
389 ipv4_mask = item->mask;
391 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
392 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
393 return BNXT_TF_RC_ERROR;
397 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
401 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
402 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
403 sizeof(ipv4_spec->hdr.version_ihl));
404 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
405 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
406 sizeof(ipv4_spec->hdr.type_of_service));
407 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
408 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
409 sizeof(ipv4_spec->hdr.total_length));
410 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
411 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
412 sizeof(ipv4_spec->hdr.packet_id));
413 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
414 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
415 sizeof(ipv4_spec->hdr.fragment_offset));
416 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
417 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
418 sizeof(ipv4_spec->hdr.time_to_live));
419 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
420 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
421 sizeof(ipv4_spec->hdr.next_proto_id));
422 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
423 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
424 sizeof(ipv4_spec->hdr.hdr_checksum));
425 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
426 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
427 sizeof(ipv4_spec->hdr.src_addr));
428 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
429 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
430 sizeof(ipv4_spec->hdr.dst_addr));
432 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
436 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
437 sizeof(ipv4_mask->hdr.version_ihl));
438 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
439 sizeof(ipv4_mask->hdr.type_of_service));
440 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
441 sizeof(ipv4_mask->hdr.total_length));
442 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
443 sizeof(ipv4_mask->hdr.packet_id));
444 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
445 sizeof(ipv4_mask->hdr.fragment_offset));
446 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
447 sizeof(ipv4_mask->hdr.time_to_live));
448 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
449 sizeof(ipv4_mask->hdr.next_proto_id));
450 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
451 sizeof(ipv4_mask->hdr.hdr_checksum));
452 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
453 sizeof(ipv4_mask->hdr.src_addr));
454 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
455 sizeof(ipv4_mask->hdr.dst_addr));
457 *field_idx = idx; /* Number of ipv4 header elements */
459 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
460 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
461 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
462 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
463 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
464 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
466 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
467 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
469 return BNXT_TF_RC_SUCCESS;
472 /* Function to handle the parsing of RTE Flow item IPV6 Header */
474 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
475 struct ulp_rte_hdr_bitmap *hdr_bitmap,
476 struct ulp_rte_hdr_field *hdr_field,
478 uint32_t *vlan_idx __rte_unused)
480 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
481 uint32_t idx = *field_idx;
482 uint32_t mdx = *field_idx;
484 ipv6_spec = item->spec;
485 ipv6_mask = item->mask;
487 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
488 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
489 return BNXT_TF_RC_ERROR;
493 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
497 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
498 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
499 sizeof(ipv6_spec->hdr.vtc_flow));
500 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
501 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
502 sizeof(ipv6_spec->hdr.payload_len));
503 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
504 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
505 sizeof(ipv6_spec->hdr.proto));
506 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
507 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
508 sizeof(ipv6_spec->hdr.hop_limits));
509 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
510 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
511 sizeof(ipv6_spec->hdr.src_addr));
512 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
513 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
514 sizeof(ipv6_spec->hdr.dst_addr));
516 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
520 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
521 sizeof(ipv6_mask->hdr.vtc_flow));
522 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
523 sizeof(ipv6_mask->hdr.payload_len));
524 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
525 sizeof(ipv6_mask->hdr.proto));
526 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
527 sizeof(ipv6_mask->hdr.hop_limits));
528 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
529 sizeof(ipv6_mask->hdr.src_addr));
530 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
531 sizeof(ipv6_mask->hdr.dst_addr));
533 *field_idx = idx; /* add number of ipv6 header elements */
535 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
536 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
537 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
538 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
539 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
540 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
542 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
543 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
545 return BNXT_TF_RC_SUCCESS;
548 /* Function to handle the parsing of RTE Flow item UDP Header. */
550 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
551 struct ulp_rte_hdr_bitmap *hdr_bitmap,
552 struct ulp_rte_hdr_field *hdr_field,
554 uint32_t *vlan_idx __rte_unused)
556 const struct rte_flow_item_udp *udp_spec, *udp_mask;
557 uint32_t idx = *field_idx;
558 uint32_t mdx = *field_idx;
560 udp_spec = item->spec;
561 udp_mask = item->mask;
563 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
564 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
565 return BNXT_TF_RC_ERROR;
569 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
573 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
574 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
575 sizeof(udp_spec->hdr.src_port));
576 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
577 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
578 sizeof(udp_spec->hdr.dst_port));
579 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
580 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
581 sizeof(udp_spec->hdr.dgram_len));
582 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
583 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
584 sizeof(udp_spec->hdr.dgram_cksum));
586 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
590 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
591 sizeof(udp_mask->hdr.src_port));
592 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
593 sizeof(udp_mask->hdr.dst_port));
594 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
595 sizeof(udp_mask->hdr.dgram_len));
596 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
597 sizeof(udp_mask->hdr.dgram_cksum));
599 *field_idx = idx; /* Add number of UDP header elements */
601 /* Set the udp header bitmap and computed l4 header bitmaps */
602 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
603 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
604 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
605 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
608 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
609 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
611 return BNXT_TF_RC_SUCCESS;
614 /* Function to handle the parsing of RTE Flow item TCP Header. */
616 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
617 struct ulp_rte_hdr_bitmap *hdr_bitmap,
618 struct ulp_rte_hdr_field *hdr_field,
620 uint32_t *vlan_idx __rte_unused)
622 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
623 uint32_t idx = *field_idx;
624 uint32_t mdx = *field_idx;
626 tcp_spec = item->spec;
627 tcp_mask = item->mask;
629 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
630 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
631 return BNXT_TF_RC_ERROR;
635 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
639 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
640 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
641 sizeof(tcp_spec->hdr.src_port));
642 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
643 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
644 sizeof(tcp_spec->hdr.dst_port));
645 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
646 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
647 sizeof(tcp_spec->hdr.sent_seq));
648 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
649 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
650 sizeof(tcp_spec->hdr.recv_ack));
651 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
652 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
653 sizeof(tcp_spec->hdr.data_off));
654 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
655 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
656 sizeof(tcp_spec->hdr.tcp_flags));
657 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
658 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
659 sizeof(tcp_spec->hdr.rx_win));
660 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
661 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
662 sizeof(tcp_spec->hdr.cksum));
663 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
664 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
665 sizeof(tcp_spec->hdr.tcp_urp));
667 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
671 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
672 sizeof(tcp_mask->hdr.src_port));
673 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
674 sizeof(tcp_mask->hdr.dst_port));
675 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
676 sizeof(tcp_mask->hdr.sent_seq));
677 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
678 sizeof(tcp_mask->hdr.recv_ack));
679 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
680 sizeof(tcp_mask->hdr.data_off));
681 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
682 sizeof(tcp_mask->hdr.tcp_flags));
683 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
684 sizeof(tcp_mask->hdr.rx_win));
685 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
686 sizeof(tcp_mask->hdr.cksum));
687 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
688 sizeof(tcp_mask->hdr.tcp_urp));
690 *field_idx = idx; /* add number of TCP header elements */
692 /* Set the udp header bitmap and computed l4 header bitmaps */
693 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
694 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
695 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
696 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
697 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
699 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
700 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
702 return BNXT_TF_RC_SUCCESS;
705 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
707 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
708 struct ulp_rte_hdr_bitmap *hdrbitmap,
709 struct ulp_rte_hdr_field *hdr_field,
711 uint32_t *vlan_idx __rte_unused)
713 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
714 uint32_t idx = *field_idx;
715 uint32_t mdx = *field_idx;
717 vxlan_spec = item->spec;
718 vxlan_mask = item->mask;
721 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
725 hdr_field[idx].size = sizeof(vxlan_spec->flags);
726 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
727 sizeof(vxlan_spec->flags));
728 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
729 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
730 sizeof(vxlan_spec->rsvd0));
731 hdr_field[idx].size = sizeof(vxlan_spec->vni);
732 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
733 sizeof(vxlan_spec->vni));
734 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
735 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
736 sizeof(vxlan_spec->rsvd1));
738 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
742 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
743 sizeof(vxlan_mask->flags));
744 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
745 sizeof(vxlan_mask->rsvd0));
746 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
747 sizeof(vxlan_mask->vni));
748 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
749 sizeof(vxlan_mask->rsvd1));
751 *field_idx = idx; /* Add number of vxlan header elements */
753 /* Update the hdr_bitmap with vxlan */
754 ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
755 return BNXT_TF_RC_SUCCESS;
758 /* Function to handle the parsing of RTE Flow item void Header */
760 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
761 struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
762 struct ulp_rte_hdr_field *hdr_field __rte_unused,
763 uint32_t *field_idx __rte_unused,
764 uint32_t *vlan_idx __rte_unused)
766 return BNXT_TF_RC_SUCCESS;