1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
11 while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
19 * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
25 struct otx2_flow_item_info info;
33 info.hw_mask = &hw_mask;
39 switch (pst->pattern->type) {
40 case RTE_FLOW_ITEM_TYPE_UDP:
41 lt = NPC_LT_LH_TU_UDP;
42 info.def_mask = &rte_flow_item_udp_mask;
43 info.len = sizeof(struct rte_flow_item_udp);
45 case RTE_FLOW_ITEM_TYPE_TCP:
46 lt = NPC_LT_LH_TU_TCP;
47 info.def_mask = &rte_flow_item_tcp_mask;
48 info.len = sizeof(struct rte_flow_item_tcp);
50 case RTE_FLOW_ITEM_TYPE_SCTP:
51 lt = NPC_LT_LH_TU_SCTP;
52 info.def_mask = &rte_flow_item_sctp_mask;
53 info.len = sizeof(struct rte_flow_item_sctp);
55 case RTE_FLOW_ITEM_TYPE_ESP:
56 lt = NPC_LT_LH_TU_ESP;
57 info.def_mask = &rte_flow_item_esp_mask;
58 info.len = sizeof(struct rte_flow_item_esp);
64 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
69 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
72 /* Tunnel+IPv4, Tunnel+IPv6 */
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
76 struct otx2_flow_item_info info;
84 info.hw_mask = &hw_mask;
90 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
92 info.def_mask = &rte_flow_item_ipv4_mask;
93 info.len = sizeof(struct rte_flow_item_ipv4);
94 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 lt = NPC_LT_LG_TU_IP6;
96 info.def_mask = &rte_flow_item_ipv6_mask;
97 info.len = sizeof(struct rte_flow_item_ipv6);
99 /* There is no tunneled IP header */
103 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
108 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
115 const struct rte_flow_item *pattern, *last_pattern;
116 struct rte_flow_item_eth hw_mask;
117 struct otx2_flow_item_info info;
122 /* We hit this layer if there is a tunneling protocol */
126 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
130 lt = NPC_LT_LF_TU_ETHER;
133 info.def_mask = &rte_flow_item_vlan_mask;
134 /* No match support for vlan tags */
136 info.len = sizeof(struct rte_flow_item_vlan);
141 /* Look ahead and find out any VLAN tags. These can be
142 * detected but no data matching is available.
144 last_pattern = pst->pattern;
145 pattern = pst->pattern + 1;
146 pattern = otx2_flow_skip_void_and_any_items(pattern);
147 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
149 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
152 last_pattern = pattern;
154 pattern = otx2_flow_skip_void_and_any_items(pattern);
156 otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
161 lflags = NPC_F_TU_ETHER_CTAG;
164 lflags = NPC_F_TU_ETHER_STAG_CTAG;
167 rte_flow_error_set(pst->error, ENOTSUP,
168 RTE_FLOW_ERROR_TYPE_ITEM,
170 "more than 2 vlans with tunneled Ethernet "
175 info.def_mask = &rte_flow_item_eth_mask;
176 info.hw_mask = &hw_mask;
177 info.len = sizeof(struct rte_flow_item_eth);
179 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
183 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
187 pst->pattern = last_pattern;
189 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
196 * We are positioned at UDP. Scan ahead and look for
197 * UDP encapsulated tunnel protocols. If available,
198 * parse them. In that case handle this:
199 * - RTE spec assumes we point to tunnel header.
200 * - NPC parser provides offset from UDP header.
204 * Note: Add support to GENEVE, VXLAN_GPE when we
207 * Note: Better to split flags into two nibbles:
208 * - Higher nibble can have flags
209 * - Lower nibble to further enumerate protocols
210 * and have flags based extraction
212 const struct rte_flow_item *pattern = pst->pattern;
213 struct otx2_flow_item_info info;
221 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
227 info.def_mask = NULL;
233 /* Ensure we are not matching anything in UDP */
234 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
238 info.hw_mask = &hw_mask;
239 pattern = otx2_flow_skip_void_and_any_items(pattern);
240 otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 switch (pattern->type) {
242 case RTE_FLOW_ITEM_TYPE_VXLAN:
243 lflags = NPC_F_UDP_VXLAN;
244 info.def_mask = &rte_flow_item_vxlan_mask;
245 info.len = sizeof(struct rte_flow_item_vxlan);
246 lt = NPC_LT_LE_VXLAN;
248 case RTE_FLOW_ITEM_TYPE_GTPC:
249 lflags = NPC_F_UDP_GTP_GTPC;
250 info.def_mask = &rte_flow_item_gtp_mask;
251 info.len = sizeof(struct rte_flow_item_gtp);
254 case RTE_FLOW_ITEM_TYPE_GTPU:
255 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
256 info.def_mask = &rte_flow_item_gtp_mask;
257 info.len = sizeof(struct rte_flow_item_gtp);
260 case RTE_FLOW_ITEM_TYPE_GENEVE:
261 lflags = NPC_F_UDP_GENEVE;
262 info.def_mask = &rte_flow_item_geneve_mask;
263 info.len = sizeof(struct rte_flow_item_geneve);
264 lt = NPC_LT_LE_GENEVE;
266 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
267 lflags = NPC_F_UDP_VXLANGPE;
268 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
269 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
270 lt = NPC_LT_LE_VXLANGPE;
278 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
279 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
283 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
287 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
290 const struct rte_flow_item *pattern = pst->pattern;
291 struct otx2_flow_item_info info;
293 uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
294 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
297 * pst->pattern points to first MPLS label. We only check
298 * that subsequent labels do not have anything to match.
300 info.def_mask = &rte_flow_item_mpls_mask;
302 info.len = sizeof(struct rte_flow_item_mpls);
307 while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
310 /* Basic validation of 2nd/3rd/4th mpls item */
312 rc = otx2_flow_parse_item_basic(pattern, &info,
317 pst->last_pattern = pattern;
319 pattern = otx2_flow_skip_void_and_any_items(pattern);
323 rte_flow_error_set(pst->error, ENOTSUP,
324 RTE_FLOW_ERROR_TYPE_ITEM,
326 "more than 4 mpls labels not supported");
330 *flag = flag_list[nr_labels - 1];
335 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
337 /* Find number of MPLS labels */
338 struct rte_flow_item_mpls hw_mask;
339 struct otx2_flow_item_info info;
345 if (lid == NPC_LID_LC)
347 else if (lid == NPC_LID_LD)
348 lt = NPC_LT_LD_TU_MPLS_IN_IP;
350 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
352 /* Prepare for parsing the first item */
353 info.def_mask = &rte_flow_item_mpls_mask;
354 info.hw_mask = &hw_mask;
355 info.len = sizeof(struct rte_flow_item_mpls);
360 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
361 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
366 * Parse for more labels.
367 * This sets lflags and pst->last_pattern correctly.
369 rc = flow_parse_mpls_label_stack(pst, &lflags);
374 pst->pattern = pst->last_pattern;
376 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
380 * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
381 * GTP, GTPC, GTPU, ESP
383 * Note: UDP tunnel protocols are identified by flags.
384 * LPTR for these protocol still points to UDP
385 * header. Need flag based extraction to support
389 otx2_flow_parse_ld(struct otx2_parse_state *pst)
391 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
392 struct otx2_flow_item_info info;
397 /* We have already parsed MPLS or IPv4/v6 followed
398 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
399 * would be parsed as tunneled versions. Skip
400 * this layer, except for tunneled MPLS. If LC is
401 * MPLS, we have anyway skipped all stacked MPLS
404 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
405 return otx2_flow_parse_mpls(pst, NPC_LID_LD);
408 info.hw_mask = &hw_mask;
411 info.def_mask = NULL;
418 otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
419 switch (pst->pattern->type) {
420 case RTE_FLOW_ITEM_TYPE_ICMP:
421 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
422 lt = NPC_LT_LD_ICMP6;
425 info.def_mask = &rte_flow_item_icmp_mask;
426 info.len = sizeof(struct rte_flow_item_icmp);
428 case RTE_FLOW_ITEM_TYPE_UDP:
430 info.def_mask = &rte_flow_item_udp_mask;
431 info.len = sizeof(struct rte_flow_item_udp);
433 case RTE_FLOW_ITEM_TYPE_TCP:
435 info.def_mask = &rte_flow_item_tcp_mask;
436 info.len = sizeof(struct rte_flow_item_tcp);
438 case RTE_FLOW_ITEM_TYPE_SCTP:
440 info.def_mask = &rte_flow_item_sctp_mask;
441 info.len = sizeof(struct rte_flow_item_sctp);
443 case RTE_FLOW_ITEM_TYPE_ESP:
445 info.def_mask = &rte_flow_item_esp_mask;
446 info.len = sizeof(struct rte_flow_item_esp);
448 case RTE_FLOW_ITEM_TYPE_GRE:
450 info.def_mask = &rte_flow_item_gre_mask;
451 info.len = sizeof(struct rte_flow_item_gre);
453 case RTE_FLOW_ITEM_TYPE_NVGRE:
455 lflags = NPC_F_GRE_NVGRE;
456 info.def_mask = &rte_flow_item_nvgre_mask;
457 info.len = sizeof(struct rte_flow_item_nvgre);
458 /* Further IP/Ethernet are parsed as tunneled */
465 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
466 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
470 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
474 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
476 const struct rte_flow_item *pattern = pst->pattern + 1;
478 pattern = otx2_flow_skip_void_and_any_items(pattern);
479 if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
480 pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
481 pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
485 /* Outer IPv4, Outer IPv6, MPLS, ARP */
487 otx2_flow_parse_lc(struct otx2_parse_state *pst)
489 uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
490 struct otx2_flow_item_info info;
494 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
495 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
497 info.hw_mask = &hw_mask;
503 switch (pst->pattern->type) {
504 case RTE_FLOW_ITEM_TYPE_IPV4:
506 info.def_mask = &rte_flow_item_ipv4_mask;
507 info.len = sizeof(struct rte_flow_item_ipv4);
509 case RTE_FLOW_ITEM_TYPE_IPV6:
512 info.def_mask = &rte_flow_item_ipv6_mask;
513 info.len = sizeof(struct rte_flow_item_ipv6);
515 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
517 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
518 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
521 /* No match at this layer */
525 /* Identify if IP tunnels MPLS or IPv4/v6 */
526 flow_check_lc_ip_tunnel(pst);
528 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
529 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
533 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
538 otx2_flow_parse_lb(struct otx2_parse_state *pst)
540 const struct rte_flow_item *pattern = pst->pattern;
541 const struct rte_flow_item *last_pattern;
542 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
543 struct otx2_flow_item_info info;
550 info.hw_hdr_len = NPC_TPID_LENGTH;
554 last_pattern = pattern;
556 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
557 /* RTE vlan is either 802.1q or 802.1ad,
558 * this maps to either CTAG/STAG. We need to decide
559 * based on number of VLANS present. Matching is
560 * supported on first tag only.
562 info.def_mask = &rte_flow_item_vlan_mask;
564 info.len = sizeof(struct rte_flow_item_vlan);
566 pattern = pst->pattern;
567 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
570 /* Basic validation of 2nd/3rd vlan item */
572 otx2_npc_dbg("Vlans = %d", nr_vlans);
573 rc = otx2_flow_parse_item_basic(pattern, &info,
578 last_pattern = pattern;
580 pattern = otx2_flow_skip_void_and_any_items(pattern);
589 lflags = NPC_F_STAG_CTAG;
593 lflags = NPC_F_STAG_STAG_CTAG;
596 rte_flow_error_set(pst->error, ENOTSUP,
597 RTE_FLOW_ERROR_TYPE_ITEM,
599 "more than 3 vlans not supported");
602 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
603 /* we can support ETAG and match a subsequent CTAG
604 * without any matching support.
609 last_pattern = pst->pattern;
610 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
611 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
612 info.def_mask = &rte_flow_item_vlan_mask;
613 /* set supported mask to NULL for vlan tag */
615 info.len = sizeof(struct rte_flow_item_vlan);
616 rc = otx2_flow_parse_item_basic(pattern, &info,
621 lflags = NPC_F_ETAG_CTAG;
622 last_pattern = pattern;
625 info.def_mask = &rte_flow_item_e_tag_mask;
626 info.len = sizeof(struct rte_flow_item_e_tag);
631 info.hw_mask = &hw_mask;
634 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
636 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
640 /* Point pattern to last item consumed */
641 pst->pattern = last_pattern;
642 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
646 otx2_flow_parse_la(struct otx2_parse_state *pst)
648 struct rte_flow_item_eth hw_mask;
649 struct otx2_flow_item_info info;
653 /* Identify the pattern type into lid, lt */
654 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
658 lt = NPC_LT_LA_ETHER;
661 if (pst->flow->nix_intf == NIX_INTF_TX) {
662 lt = NPC_LT_LA_IH_NIX_ETHER;
663 info.hw_hdr_len = NPC_IH_LENGTH;
666 /* Prepare for parsing the item */
667 info.def_mask = &rte_flow_item_eth_mask;
668 info.hw_mask = &hw_mask;
669 info.len = sizeof(struct rte_flow_item_eth);
670 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
674 /* Basic validation of item parameters */
675 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
679 /* Update pst if not validate only? clash check? */
680 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);