1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
11 while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
19 * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
25 struct otx2_flow_item_info info;
33 info.hw_mask = &hw_mask;
39 switch (pst->pattern->type) {
40 case RTE_FLOW_ITEM_TYPE_UDP:
41 lt = NPC_LT_LH_TU_UDP;
42 info.def_mask = &rte_flow_item_udp_mask;
43 info.len = sizeof(struct rte_flow_item_udp);
45 case RTE_FLOW_ITEM_TYPE_TCP:
46 lt = NPC_LT_LH_TU_TCP;
47 info.def_mask = &rte_flow_item_tcp_mask;
48 info.len = sizeof(struct rte_flow_item_tcp);
50 case RTE_FLOW_ITEM_TYPE_SCTP:
51 lt = NPC_LT_LH_TU_SCTP;
52 info.def_mask = &rte_flow_item_sctp_mask;
53 info.len = sizeof(struct rte_flow_item_sctp);
55 case RTE_FLOW_ITEM_TYPE_ESP:
56 lt = NPC_LT_LH_TU_ESP;
57 info.def_mask = &rte_flow_item_esp_mask;
58 info.len = sizeof(struct rte_flow_item_esp);
64 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
69 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
72 /* Tunnel+IPv4, Tunnel+IPv6 */
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
76 struct otx2_flow_item_info info;
84 info.hw_mask = &hw_mask;
90 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
92 info.def_mask = &rte_flow_item_ipv4_mask;
93 info.len = sizeof(struct rte_flow_item_ipv4);
94 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 lt = NPC_LT_LG_TU_IP6;
96 info.def_mask = &rte_flow_item_ipv6_mask;
97 info.len = sizeof(struct rte_flow_item_ipv6);
99 /* There is no tunneled IP header */
103 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
108 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
115 const struct rte_flow_item *pattern, *last_pattern;
116 struct rte_flow_item_eth hw_mask;
117 struct otx2_flow_item_info info;
122 /* We hit this layer if there is a tunneling protocol */
126 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
130 lt = NPC_LT_LF_TU_ETHER;
133 info.def_mask = &rte_flow_item_vlan_mask;
134 /* No match support for vlan tags */
136 info.len = sizeof(struct rte_flow_item_vlan);
141 /* Look ahead and find out any VLAN tags. These can be
142 * detected but no data matching is available.
144 last_pattern = pst->pattern;
145 pattern = pst->pattern + 1;
146 pattern = otx2_flow_skip_void_and_any_items(pattern);
147 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
149 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
152 last_pattern = pattern;
154 pattern = otx2_flow_skip_void_and_any_items(pattern);
156 otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
161 lflags = NPC_F_TU_ETHER_CTAG;
164 lflags = NPC_F_TU_ETHER_STAG_CTAG;
167 rte_flow_error_set(pst->error, ENOTSUP,
168 RTE_FLOW_ERROR_TYPE_ITEM,
170 "more than 2 vlans with tunneled Ethernet "
175 info.def_mask = &rte_flow_item_eth_mask;
176 info.hw_mask = &hw_mask;
177 info.len = sizeof(struct rte_flow_item_eth);
179 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
183 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
187 pst->pattern = last_pattern;
189 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
196 * We are positioned at UDP. Scan ahead and look for
197 * UDP encapsulated tunnel protocols. If available,
198 * parse them. In that case handle this:
199 * - RTE spec assumes we point to tunnel header.
200 * - NPC parser provides offset from UDP header.
204 * Note: Add support to GENEVE, VXLAN_GPE when we
207 * Note: Better to split flags into two nibbles:
208 * - Higher nibble can have flags
209 * - Lower nibble to further enumerate protocols
210 * and have flags based extraction
212 const struct rte_flow_item *pattern = pst->pattern;
213 struct otx2_flow_item_info info;
221 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
227 info.def_mask = NULL;
233 /* Ensure we are not matching anything in UDP */
234 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
238 info.hw_mask = &hw_mask;
239 pattern = otx2_flow_skip_void_and_any_items(pattern);
240 otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 switch (pattern->type) {
242 case RTE_FLOW_ITEM_TYPE_VXLAN:
243 lflags = NPC_F_UDP_VXLAN;
244 info.def_mask = &rte_flow_item_vxlan_mask;
245 info.len = sizeof(struct rte_flow_item_vxlan);
246 lt = NPC_LT_LE_VXLAN;
248 case RTE_FLOW_ITEM_TYPE_ESP:
250 info.def_mask = &rte_flow_item_esp_mask;
251 info.len = sizeof(struct rte_flow_item_esp);
253 case RTE_FLOW_ITEM_TYPE_GTPC:
254 lflags = NPC_F_UDP_GTP_GTPC;
255 info.def_mask = &rte_flow_item_gtp_mask;
256 info.len = sizeof(struct rte_flow_item_gtp);
259 case RTE_FLOW_ITEM_TYPE_GTPU:
260 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
261 info.def_mask = &rte_flow_item_gtp_mask;
262 info.len = sizeof(struct rte_flow_item_gtp);
265 case RTE_FLOW_ITEM_TYPE_GENEVE:
266 lflags = NPC_F_UDP_GENEVE;
267 info.def_mask = &rte_flow_item_geneve_mask;
268 info.len = sizeof(struct rte_flow_item_geneve);
269 lt = NPC_LT_LE_GENEVE;
271 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
272 lflags = NPC_F_UDP_VXLANGPE;
273 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
274 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
275 lt = NPC_LT_LE_VXLANGPE;
283 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
284 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
288 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
292 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
295 const struct rte_flow_item *pattern = pst->pattern;
296 struct otx2_flow_item_info info;
298 uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
299 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
302 * pst->pattern points to first MPLS label. We only check
303 * that subsequent labels do not have anything to match.
305 info.def_mask = &rte_flow_item_mpls_mask;
307 info.len = sizeof(struct rte_flow_item_mpls);
312 while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
315 /* Basic validation of 2nd/3rd/4th mpls item */
317 rc = otx2_flow_parse_item_basic(pattern, &info,
322 pst->last_pattern = pattern;
324 pattern = otx2_flow_skip_void_and_any_items(pattern);
328 rte_flow_error_set(pst->error, ENOTSUP,
329 RTE_FLOW_ERROR_TYPE_ITEM,
331 "more than 4 mpls labels not supported");
335 *flag = flag_list[nr_labels - 1];
340 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
342 /* Find number of MPLS labels */
343 struct rte_flow_item_mpls hw_mask;
344 struct otx2_flow_item_info info;
350 if (lid == NPC_LID_LC)
352 else if (lid == NPC_LID_LD)
353 lt = NPC_LT_LD_TU_MPLS_IN_IP;
355 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
357 /* Prepare for parsing the first item */
358 info.def_mask = &rte_flow_item_mpls_mask;
359 info.hw_mask = &hw_mask;
360 info.len = sizeof(struct rte_flow_item_mpls);
365 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
366 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
371 * Parse for more labels.
372 * This sets lflags and pst->last_pattern correctly.
374 rc = flow_parse_mpls_label_stack(pst, &lflags);
379 pst->pattern = pst->last_pattern;
381 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
385 * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
386 * GTP, GTPC, GTPU, ESP
388 * Note: UDP tunnel protocols are identified by flags.
389 * LPTR for these protocol still points to UDP
390 * header. Need flag based extraction to support
394 otx2_flow_parse_ld(struct otx2_parse_state *pst)
396 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
397 uint32_t gre_key_mask = 0xffffffff;
398 struct otx2_flow_item_info info;
403 /* We have already parsed MPLS or IPv4/v6 followed
404 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
405 * would be parsed as tunneled versions. Skip
406 * this layer, except for tunneled MPLS. If LC is
407 * MPLS, we have anyway skipped all stacked MPLS
410 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
411 return otx2_flow_parse_mpls(pst, NPC_LID_LD);
414 info.hw_mask = &hw_mask;
417 info.def_mask = NULL;
424 otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
425 switch (pst->pattern->type) {
426 case RTE_FLOW_ITEM_TYPE_ICMP:
427 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
428 lt = NPC_LT_LD_ICMP6;
431 info.def_mask = &rte_flow_item_icmp_mask;
432 info.len = sizeof(struct rte_flow_item_icmp);
434 case RTE_FLOW_ITEM_TYPE_UDP:
436 info.def_mask = &rte_flow_item_udp_mask;
437 info.len = sizeof(struct rte_flow_item_udp);
439 case RTE_FLOW_ITEM_TYPE_TCP:
441 info.def_mask = &rte_flow_item_tcp_mask;
442 info.len = sizeof(struct rte_flow_item_tcp);
444 case RTE_FLOW_ITEM_TYPE_SCTP:
446 info.def_mask = &rte_flow_item_sctp_mask;
447 info.len = sizeof(struct rte_flow_item_sctp);
449 case RTE_FLOW_ITEM_TYPE_GRE:
451 info.def_mask = &rte_flow_item_gre_mask;
452 info.len = sizeof(struct rte_flow_item_gre);
454 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
456 info.def_mask = &gre_key_mask;
457 info.len = sizeof(gre_key_mask);
460 case RTE_FLOW_ITEM_TYPE_NVGRE:
461 lt = NPC_LT_LD_NVGRE;
462 lflags = NPC_F_GRE_NVGRE;
463 info.def_mask = &rte_flow_item_nvgre_mask;
464 info.len = sizeof(struct rte_flow_item_nvgre);
465 /* Further IP/Ethernet are parsed as tunneled */
472 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
473 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
477 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
481 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
483 const struct rte_flow_item *pattern = pst->pattern + 1;
485 pattern = otx2_flow_skip_void_and_any_items(pattern);
486 if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
487 pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
488 pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
493 otx2_flow_raw_item_prepare(const struct rte_flow_item_raw *raw_spec,
494 const struct rte_flow_item_raw *raw_mask,
495 struct otx2_flow_item_info *info,
496 uint8_t *spec_buf, uint8_t *mask_buf)
498 uint32_t custom_hdr_size = 0;
500 memset(spec_buf, 0, NPC_MAX_RAW_ITEM_LEN);
501 memset(mask_buf, 0, NPC_MAX_RAW_ITEM_LEN);
502 custom_hdr_size = raw_spec->offset + raw_spec->length;
504 memcpy(spec_buf + raw_spec->offset, raw_spec->pattern,
507 if (raw_mask->pattern) {
508 memcpy(mask_buf + raw_spec->offset, raw_mask->pattern,
511 memset(mask_buf + raw_spec->offset, 0xFF, raw_spec->length);
514 info->len = custom_hdr_size;
515 info->spec = spec_buf;
516 info->mask = mask_buf;
521 /* Outer IPv4, Outer IPv6, MPLS, ARP */
523 otx2_flow_parse_lc(struct otx2_parse_state *pst)
525 uint8_t raw_spec_buf[NPC_MAX_RAW_ITEM_LEN];
526 uint8_t raw_mask_buf[NPC_MAX_RAW_ITEM_LEN];
527 uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
528 const struct rte_flow_item_raw *raw_spec;
529 struct otx2_flow_item_info info;
533 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
534 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
536 info.hw_mask = &hw_mask;
542 switch (pst->pattern->type) {
543 case RTE_FLOW_ITEM_TYPE_IPV4:
545 info.def_mask = &rte_flow_item_ipv4_mask;
546 info.len = sizeof(struct rte_flow_item_ipv4);
548 case RTE_FLOW_ITEM_TYPE_IPV6:
551 info.def_mask = &rte_flow_item_ipv6_mask;
552 info.len = sizeof(struct rte_flow_item_ipv6);
554 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
556 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
557 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
559 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
561 lt = NPC_LT_LC_IP6_EXT;
562 info.def_mask = &rte_flow_item_ipv6_ext_mask;
563 info.len = sizeof(struct rte_flow_item_ipv6_ext);
564 info.hw_hdr_len = 40;
566 case RTE_FLOW_ITEM_TYPE_RAW:
567 raw_spec = pst->pattern->spec;
568 if (!raw_spec->relative)
571 len = raw_spec->length + raw_spec->offset;
572 if (len > NPC_MAX_RAW_ITEM_LEN) {
573 rte_flow_error_set(pst->error, EINVAL,
574 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
575 "Spec length too big");
579 otx2_flow_raw_item_prepare((const struct rte_flow_item_raw *)
581 (const struct rte_flow_item_raw *)
582 pst->pattern->mask, &info,
583 raw_spec_buf, raw_mask_buf);
587 info.hw_mask = &hw_mask;
588 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
591 /* No match at this layer */
595 /* Identify if IP tunnels MPLS or IPv4/v6 */
596 flow_check_lc_ip_tunnel(pst);
598 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
599 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
603 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
608 otx2_flow_parse_lb(struct otx2_parse_state *pst)
610 const struct rte_flow_item *pattern = pst->pattern;
611 uint8_t raw_spec_buf[NPC_MAX_RAW_ITEM_LEN];
612 uint8_t raw_mask_buf[NPC_MAX_RAW_ITEM_LEN];
613 const struct rte_flow_item *last_pattern;
614 const struct rte_flow_item_raw *raw_spec;
615 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
616 struct otx2_flow_item_info info;
617 int lid, lt, lflags, len;
623 info.hw_hdr_len = NPC_TPID_LENGTH;
627 last_pattern = pattern;
629 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
630 /* RTE vlan is either 802.1q or 802.1ad,
631 * this maps to either CTAG/STAG. We need to decide
632 * based on number of VLANS present. Matching is
633 * supported on first tag only.
635 info.def_mask = &rte_flow_item_vlan_mask;
637 info.len = sizeof(struct rte_flow_item_vlan);
639 pattern = pst->pattern;
640 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
643 /* Basic validation of 2nd/3rd vlan item */
645 otx2_npc_dbg("Vlans = %d", nr_vlans);
646 rc = otx2_flow_parse_item_basic(pattern, &info,
651 last_pattern = pattern;
653 pattern = otx2_flow_skip_void_and_any_items(pattern);
661 lt = NPC_LT_LB_STAG_QINQ;
662 lflags = NPC_F_STAG_CTAG;
665 lt = NPC_LT_LB_STAG_QINQ;
666 lflags = NPC_F_STAG_STAG_CTAG;
669 rte_flow_error_set(pst->error, ENOTSUP,
670 RTE_FLOW_ERROR_TYPE_ITEM,
672 "more than 3 vlans not supported");
675 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
676 /* we can support ETAG and match a subsequent CTAG
677 * without any matching support.
682 last_pattern = pst->pattern;
683 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
684 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
685 info.def_mask = &rte_flow_item_vlan_mask;
686 /* set supported mask to NULL for vlan tag */
688 info.len = sizeof(struct rte_flow_item_vlan);
689 rc = otx2_flow_parse_item_basic(pattern, &info,
694 lflags = NPC_F_ETAG_CTAG;
695 last_pattern = pattern;
698 info.def_mask = &rte_flow_item_e_tag_mask;
699 info.len = sizeof(struct rte_flow_item_e_tag);
700 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_RAW) {
701 raw_spec = pst->pattern->spec;
702 if (raw_spec->relative)
704 len = raw_spec->length + raw_spec->offset;
705 if (len > NPC_MAX_RAW_ITEM_LEN) {
706 rte_flow_error_set(pst->error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
708 "Spec length too big");
712 if (pst->npc->switch_header_type ==
713 OTX2_PRIV_FLAGS_VLAN_EXDSA) {
714 lt = NPC_LT_LB_VLAN_EXDSA;
715 } else if (pst->npc->switch_header_type ==
716 OTX2_PRIV_FLAGS_EXDSA) {
717 lt = NPC_LT_LB_EXDSA;
719 rte_flow_error_set(pst->error, ENOTSUP,
720 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
721 "exdsa or vlan_exdsa not enabled on"
726 otx2_flow_raw_item_prepare((const struct rte_flow_item_raw *)
728 (const struct rte_flow_item_raw *)
729 pst->pattern->mask, &info,
730 raw_spec_buf, raw_mask_buf);
737 info.hw_mask = &hw_mask;
738 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
740 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
744 /* Point pattern to last item consumed */
745 pst->pattern = last_pattern;
746 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
751 otx2_flow_parse_la(struct otx2_parse_state *pst)
753 struct rte_flow_item_eth hw_mask;
754 struct otx2_flow_item_info info;
758 /* Identify the pattern type into lid, lt */
759 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
763 lt = NPC_LT_LA_ETHER;
766 if (pst->flow->nix_intf == NIX_INTF_TX) {
767 lt = NPC_LT_LA_IH_NIX_ETHER;
768 info.hw_hdr_len = NPC_IH_LENGTH;
769 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
770 lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
771 info.hw_hdr_len += NPC_HIGIG2_LENGTH;
774 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
775 lt = NPC_LT_LA_HIGIG2_ETHER;
776 info.hw_hdr_len = NPC_HIGIG2_LENGTH;
780 /* Prepare for parsing the item */
781 info.def_mask = &rte_flow_item_eth_mask;
782 info.hw_mask = &hw_mask;
783 info.len = sizeof(struct rte_flow_item_eth);
784 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
788 /* Basic validation of item parameters */
789 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
793 /* Update pst if not validate only? clash check? */
794 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
798 otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
800 struct rte_flow_item_higig2_hdr hw_mask;
801 struct otx2_flow_item_info info;
805 /* Identify the pattern type into lid, lt */
806 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
810 lt = NPC_LT_LA_HIGIG2_ETHER;
813 if (pst->flow->nix_intf == NIX_INTF_TX) {
814 lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
815 info.hw_hdr_len = NPC_IH_LENGTH;
818 /* Prepare for parsing the item */
819 info.def_mask = &rte_flow_item_higig2_hdr_mask;
820 info.hw_mask = &hw_mask;
821 info.len = sizeof(struct rte_flow_item_higig2_hdr);
822 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
826 /* Basic validation of item parameters */
827 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
831 /* Update pst if not validate only? clash check? */
832 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
836 parse_rss_action(struct rte_eth_dev *dev,
837 const struct rte_flow_attr *attr,
838 const struct rte_flow_action *act,
839 struct rte_flow_error *error)
841 struct otx2_eth_dev *hw = dev->data->dev_private;
842 struct otx2_rss_info *rss_info = &hw->rss_info;
843 const struct rte_flow_action_rss *rss;
846 rss = (const struct rte_flow_action_rss *)act->conf;
850 return rte_flow_error_set(error, EINVAL,
851 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
852 attr, "No support of RSS in egress");
855 if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
856 return rte_flow_error_set(error, ENOTSUP,
857 RTE_FLOW_ERROR_TYPE_ACTION,
858 act, "multi-queue mode is disabled");
860 /* Parse RSS related parameters from configuration */
861 if (!rss || !rss->queue_num)
862 return rte_flow_error_set(error, EINVAL,
863 RTE_FLOW_ERROR_TYPE_ACTION,
864 act, "no valid queues");
866 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
867 return rte_flow_error_set(error, ENOTSUP,
868 RTE_FLOW_ERROR_TYPE_ACTION, act,
869 "non-default RSS hash functions"
870 " are not supported");
872 if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
873 return rte_flow_error_set(error, ENOTSUP,
874 RTE_FLOW_ERROR_TYPE_ACTION, act,
875 "RSS hash key too large");
877 if (rss->queue_num > rss_info->rss_size)
878 return rte_flow_error_set
879 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
880 "too many queues for RSS context");
882 for (i = 0; i < rss->queue_num; i++) {
883 if (rss->queue[i] >= dev->data->nb_rx_queues)
884 return rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ACTION,
887 "queue id > max number"
895 otx2_flow_parse_actions(struct rte_eth_dev *dev,
896 const struct rte_flow_attr *attr,
897 const struct rte_flow_action actions[],
898 struct rte_flow_error *error,
899 struct rte_flow *flow)
901 struct otx2_eth_dev *hw = dev->data->dev_private;
902 struct otx2_npc_flow_info *npc = &hw->npc_flow;
903 const struct rte_flow_action_mark *act_mark;
904 const struct rte_flow_action_queue *act_q;
905 const struct rte_flow_action_vf *vf_act;
906 uint16_t pf_func, vf_id, port_id, pf_id;
907 char if_name[RTE_ETH_NAME_MAX_LEN];
908 bool vlan_insert_action = false;
909 struct rte_eth_dev *eth_dev;
910 const char *errmsg = NULL;
911 int sel_act, req_act = 0;
916 /* Initialize actions */
917 flow->ctr_id = NPC_COUNTER_NONE;
918 pf_func = otx2_pfvf_func(hw->pf, hw->vf);
920 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
921 otx2_npc_dbg("Action type = %d", actions->type);
923 switch (actions->type) {
924 case RTE_FLOW_ACTION_TYPE_VOID:
926 case RTE_FLOW_ACTION_TYPE_MARK:
928 (const struct rte_flow_action_mark *)actions->conf;
930 /* We have only 16 bits. Use highest val for flag */
931 if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
932 errmsg = "mark value must be < 0xfffe";
936 mark = act_mark->id + 1;
937 req_act |= OTX2_FLOW_ACT_MARK;
938 rte_atomic32_inc(&npc->mark_actions);
941 case RTE_FLOW_ACTION_TYPE_FLAG:
942 mark = OTX2_FLOW_FLAG_VAL;
943 req_act |= OTX2_FLOW_ACT_FLAG;
944 rte_atomic32_inc(&npc->mark_actions);
947 case RTE_FLOW_ACTION_TYPE_COUNT:
948 /* Indicates, need a counter */
950 req_act |= OTX2_FLOW_ACT_COUNT;
953 case RTE_FLOW_ACTION_TYPE_DROP:
954 req_act |= OTX2_FLOW_ACT_DROP;
957 case RTE_FLOW_ACTION_TYPE_PF:
958 req_act |= OTX2_FLOW_ACT_PF;
962 case RTE_FLOW_ACTION_TYPE_VF:
963 vf_act = (const struct rte_flow_action_vf *)
965 req_act |= OTX2_FLOW_ACT_VF;
966 if (vf_act->original == 0) {
967 vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
968 if (vf_id >= hw->maxvf) {
969 errmsg = "invalid vf specified";
974 pf_func = (pf_func | (vf_id + 1));
978 case RTE_FLOW_ACTION_TYPE_PORT_ID:
979 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
980 if (actions->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
981 const struct rte_flow_action_port_id *port_act;
983 port_act = actions->conf;
984 port_id = port_act->id;
986 const struct rte_flow_action_ethdev *ethdev_act;
988 ethdev_act = actions->conf;
989 port_id = ethdev_act->port_id;
991 if (rte_eth_dev_get_name_by_port(port_id, if_name)) {
992 errmsg = "Name not found for output port id";
996 eth_dev = rte_eth_dev_allocated(if_name);
998 errmsg = "eth_dev not found for output port id";
1002 if (!otx2_ethdev_is_same_driver(eth_dev)) {
1003 errmsg = "Output port id unsupported type";
1007 if (!otx2_dev_is_vf(otx2_eth_pmd_priv(eth_dev))) {
1008 errmsg = "Output port should be VF";
1012 vf_id = otx2_eth_pmd_priv(eth_dev)->vf;
1013 if (vf_id >= hw->maxvf) {
1014 errmsg = "Invalid vf for output port";
1018 pf_id = otx2_eth_pmd_priv(eth_dev)->pf;
1019 if (pf_id != hw->pf) {
1020 errmsg = "Output port unsupported PF";
1024 pf_func &= (0xfc00);
1025 pf_func = (pf_func | (vf_id + 1));
1026 req_act |= OTX2_FLOW_ACT_VF;
1029 case RTE_FLOW_ACTION_TYPE_QUEUE:
1030 /* Applicable only to ingress flow */
1031 act_q = (const struct rte_flow_action_queue *)
1034 if (rq >= dev->data->nb_rx_queues) {
1035 errmsg = "invalid queue index";
1039 req_act |= OTX2_FLOW_ACT_QUEUE;
1042 case RTE_FLOW_ACTION_TYPE_RSS:
1043 errcode = parse_rss_action(dev, attr, actions, error);
1047 req_act |= OTX2_FLOW_ACT_RSS;
1050 case RTE_FLOW_ACTION_TYPE_SECURITY:
1051 /* Assumes user has already configured security
1052 * session for this flow. Associated conf is
1053 * opaque. When RTE security is implemented for otx2,
1054 * we need to verify that for specified security
1057 * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
1058 * session_protocol ==
1059 * RTE_SECURITY_PROTOCOL_IPSEC
1061 * RSS is not supported with inline ipsec. Get the
1062 * rq from associated conf, or make
1063 * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
1065 * Currently, rq = 0 is assumed.
1067 req_act |= OTX2_FLOW_ACT_SEC;
1070 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1071 req_act |= OTX2_FLOW_ACT_VLAN_INSERT;
1073 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1074 req_act |= OTX2_FLOW_ACT_VLAN_STRIP;
1076 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1077 req_act |= OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT;
1079 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1080 req_act |= OTX2_FLOW_ACT_VLAN_PCP_INSERT;
1083 errmsg = "Unsupported action specified";
1090 (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
1091 OTX2_FLOW_ACT_VLAN_PCP_INSERT))
1092 vlan_insert_action = true;
1095 (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
1096 OTX2_FLOW_ACT_VLAN_PCP_INSERT)) ==
1097 OTX2_FLOW_ACT_VLAN_PCP_INSERT) {
1098 errmsg = " PCP insert action can't be supported alone";
1103 /* Both STRIP and INSERT actions are not supported */
1104 if (vlan_insert_action && (req_act & OTX2_FLOW_ACT_VLAN_STRIP)) {
1105 errmsg = "Both VLAN insert and strip actions not supported"
1111 /* Check if actions specified are compatible */
1113 if (req_act & OTX2_FLOW_ACT_VLAN_STRIP) {
1114 errmsg = "VLAN pop action is not supported on Egress";
1119 if (req_act & OTX2_FLOW_ACT_DROP) {
1120 flow->npc_action = NIX_TX_ACTIONOP_DROP;
1121 } else if ((req_act & OTX2_FLOW_ACT_COUNT) ||
1122 vlan_insert_action) {
1123 flow->npc_action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
1125 errmsg = "Unsupported action for egress";
1132 /* We have already verified the attr, this is ingress.
1133 * - Exactly one terminating action is supported
1134 * - Exactly one of MARK or FLAG is supported
1135 * - If terminating action is DROP, only count is valid.
1137 sel_act = req_act & OTX2_FLOW_ACT_TERM;
1138 if ((sel_act & (sel_act - 1)) != 0) {
1139 errmsg = "Only one terminating action supported";
1144 if (req_act & OTX2_FLOW_ACT_DROP) {
1145 sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
1146 if ((sel_act & (sel_act - 1)) != 0) {
1147 errmsg = "Only COUNT action is supported "
1148 "with DROP ingress action";
1154 if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
1155 == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1156 errmsg = "Only one of FLAG or MARK action is supported";
1161 if (vlan_insert_action) {
1162 errmsg = "VLAN push/Insert action is not supported on Ingress";
1167 if (req_act & OTX2_FLOW_ACT_VLAN_STRIP)
1168 npc->vtag_actions++;
1170 /* Only VLAN action is provided */
1171 if (req_act == OTX2_FLOW_ACT_VLAN_STRIP)
1172 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1173 /* Set NIX_RX_ACTIONOP */
1174 else if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
1175 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1176 if (req_act & OTX2_FLOW_ACT_QUEUE)
1177 flow->npc_action |= (uint64_t)rq << 20;
1178 } else if (req_act & OTX2_FLOW_ACT_DROP) {
1179 flow->npc_action = NIX_RX_ACTIONOP_DROP;
1180 } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
1181 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1182 flow->npc_action |= (uint64_t)rq << 20;
1183 } else if (req_act & OTX2_FLOW_ACT_RSS) {
1184 /* When user added a rule for rss, first we will add the
1185 *rule in MCAM and then update the action, once if we have
1186 *FLOW_KEY_ALG index. So, till we update the action with
1187 *flow_key_alg index, set the action to drop.
1189 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
1190 flow->npc_action = NIX_RX_ACTIONOP_DROP;
1192 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1193 } else if (req_act & OTX2_FLOW_ACT_SEC) {
1194 flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
1195 flow->npc_action |= (uint64_t)rq << 20;
1196 } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1197 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1198 } else if (req_act & OTX2_FLOW_ACT_COUNT) {
1199 /* Keep OTX2_FLOW_ACT_COUNT always at the end
1200 * This is default action, when user specify only
1203 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1205 /* Should never reach here */
1206 errmsg = "Invalid action specified";
1212 flow->npc_action |= (uint64_t)mark << 40;
1214 if (rte_atomic32_read(&npc->mark_actions) == 1) {
1215 hw->rx_offload_flags |=
1216 NIX_RX_OFFLOAD_MARK_UPDATE_F;
1217 otx2_eth_set_rx_function(dev);
1220 if (npc->vtag_actions == 1) {
1221 hw->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
1222 otx2_eth_set_rx_function(dev);
1226 /* Ideally AF must ensure that correct pf_func is set */
1228 flow->npc_action |= (uint64_t)pf_func << 48;
1230 flow->npc_action |= (uint64_t)pf_func << 4;
1235 rte_flow_error_set(error, errcode,
1236 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,