1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
11 while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
19 * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
25 struct otx2_flow_item_info info;
33 info.hw_mask = &hw_mask;
39 switch (pst->pattern->type) {
40 case RTE_FLOW_ITEM_TYPE_UDP:
41 lt = NPC_LT_LH_TU_UDP;
42 info.def_mask = &rte_flow_item_udp_mask;
43 info.len = sizeof(struct rte_flow_item_udp);
45 case RTE_FLOW_ITEM_TYPE_TCP:
46 lt = NPC_LT_LH_TU_TCP;
47 info.def_mask = &rte_flow_item_tcp_mask;
48 info.len = sizeof(struct rte_flow_item_tcp);
50 case RTE_FLOW_ITEM_TYPE_SCTP:
51 lt = NPC_LT_LH_TU_SCTP;
52 info.def_mask = &rte_flow_item_sctp_mask;
53 info.len = sizeof(struct rte_flow_item_sctp);
55 case RTE_FLOW_ITEM_TYPE_ESP:
56 lt = NPC_LT_LH_TU_ESP;
57 info.def_mask = &rte_flow_item_esp_mask;
58 info.len = sizeof(struct rte_flow_item_esp);
64 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
69 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
72 /* Tunnel+IPv4, Tunnel+IPv6 */
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
76 struct otx2_flow_item_info info;
84 info.hw_mask = &hw_mask;
90 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
92 info.def_mask = &rte_flow_item_ipv4_mask;
93 info.len = sizeof(struct rte_flow_item_ipv4);
94 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 lt = NPC_LT_LG_TU_IP6;
96 info.def_mask = &rte_flow_item_ipv6_mask;
97 info.len = sizeof(struct rte_flow_item_ipv6);
99 /* There is no tunneled IP header */
103 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
108 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
115 const struct rte_flow_item *pattern, *last_pattern;
116 struct rte_flow_item_eth hw_mask;
117 struct otx2_flow_item_info info;
122 /* We hit this layer if there is a tunneling protocol */
126 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
130 lt = NPC_LT_LF_TU_ETHER;
133 info.def_mask = &rte_flow_item_vlan_mask;
134 /* No match support for vlan tags */
136 info.len = sizeof(struct rte_flow_item_vlan);
141 /* Look ahead and find out any VLAN tags. These can be
142 * detected but no data matching is available.
144 last_pattern = pst->pattern;
145 pattern = pst->pattern + 1;
146 pattern = otx2_flow_skip_void_and_any_items(pattern);
147 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
149 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
152 last_pattern = pattern;
154 pattern = otx2_flow_skip_void_and_any_items(pattern);
156 otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
161 lflags = NPC_F_TU_ETHER_CTAG;
164 lflags = NPC_F_TU_ETHER_STAG_CTAG;
167 rte_flow_error_set(pst->error, ENOTSUP,
168 RTE_FLOW_ERROR_TYPE_ITEM,
170 "more than 2 vlans with tunneled Ethernet "
175 info.def_mask = &rte_flow_item_eth_mask;
176 info.hw_mask = &hw_mask;
177 info.len = sizeof(struct rte_flow_item_eth);
179 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
183 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
187 pst->pattern = last_pattern;
189 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
196 * We are positioned at UDP. Scan ahead and look for
197 * UDP encapsulated tunnel protocols. If available,
198 * parse them. In that case handle this:
199 * - RTE spec assumes we point to tunnel header.
200 * - NPC parser provides offset from UDP header.
204 * Note: Add support to GENEVE, VXLAN_GPE when we
207 * Note: Better to split flags into two nibbles:
208 * - Higher nibble can have flags
209 * - Lower nibble to further enumerate protocols
210 * and have flags based extraction
212 const struct rte_flow_item *pattern = pst->pattern;
213 struct otx2_flow_item_info info;
221 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
227 info.def_mask = NULL;
233 /* Ensure we are not matching anything in UDP */
234 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
238 info.hw_mask = &hw_mask;
239 pattern = otx2_flow_skip_void_and_any_items(pattern);
240 otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 switch (pattern->type) {
242 case RTE_FLOW_ITEM_TYPE_VXLAN:
243 lflags = NPC_F_UDP_VXLAN;
244 info.def_mask = &rte_flow_item_vxlan_mask;
245 info.len = sizeof(struct rte_flow_item_vxlan);
246 lt = NPC_LT_LE_VXLAN;
248 case RTE_FLOW_ITEM_TYPE_GTPC:
249 lflags = NPC_F_UDP_GTP_GTPC;
250 info.def_mask = &rte_flow_item_gtp_mask;
251 info.len = sizeof(struct rte_flow_item_gtp);
254 case RTE_FLOW_ITEM_TYPE_GTPU:
255 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
256 info.def_mask = &rte_flow_item_gtp_mask;
257 info.len = sizeof(struct rte_flow_item_gtp);
260 case RTE_FLOW_ITEM_TYPE_GENEVE:
261 lflags = NPC_F_UDP_GENEVE;
262 info.def_mask = &rte_flow_item_geneve_mask;
263 info.len = sizeof(struct rte_flow_item_geneve);
264 lt = NPC_LT_LE_GENEVE;
266 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
267 lflags = NPC_F_UDP_VXLANGPE;
268 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
269 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
270 lt = NPC_LT_LE_VXLANGPE;
278 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
279 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
283 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
287 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
290 const struct rte_flow_item *pattern = pst->pattern;
291 struct otx2_flow_item_info info;
293 uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
294 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
297 * pst->pattern points to first MPLS label. We only check
298 * that subsequent labels do not have anything to match.
300 info.def_mask = &rte_flow_item_mpls_mask;
302 info.len = sizeof(struct rte_flow_item_mpls);
307 while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
310 /* Basic validation of 2nd/3rd/4th mpls item */
312 rc = otx2_flow_parse_item_basic(pattern, &info,
317 pst->last_pattern = pattern;
319 pattern = otx2_flow_skip_void_and_any_items(pattern);
323 rte_flow_error_set(pst->error, ENOTSUP,
324 RTE_FLOW_ERROR_TYPE_ITEM,
326 "more than 4 mpls labels not supported");
330 *flag = flag_list[nr_labels - 1];
335 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
337 /* Find number of MPLS labels */
338 struct rte_flow_item_mpls hw_mask;
339 struct otx2_flow_item_info info;
345 if (lid == NPC_LID_LC)
347 else if (lid == NPC_LID_LD)
348 lt = NPC_LT_LD_TU_MPLS_IN_IP;
350 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
352 /* Prepare for parsing the first item */
353 info.def_mask = &rte_flow_item_mpls_mask;
354 info.hw_mask = &hw_mask;
355 info.len = sizeof(struct rte_flow_item_mpls);
360 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
361 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
366 * Parse for more labels.
367 * This sets lflags and pst->last_pattern correctly.
369 rc = flow_parse_mpls_label_stack(pst, &lflags);
374 pst->pattern = pst->last_pattern;
376 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
380 * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
381 * GTP, GTPC, GTPU, ESP
383 * Note: UDP tunnel protocols are identified by flags.
384 * LPTR for these protocol still points to UDP
385 * header. Need flag based extraction to support
389 otx2_flow_parse_ld(struct otx2_parse_state *pst)
391 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
392 struct otx2_flow_item_info info;
397 /* We have already parsed MPLS or IPv4/v6 followed
398 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
399 * would be parsed as tunneled versions. Skip
400 * this layer, except for tunneled MPLS. If LC is
401 * MPLS, we have anyway skipped all stacked MPLS
404 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
405 return otx2_flow_parse_mpls(pst, NPC_LID_LD);
408 info.hw_mask = &hw_mask;
411 info.def_mask = NULL;
418 otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
419 switch (pst->pattern->type) {
420 case RTE_FLOW_ITEM_TYPE_ICMP:
421 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
422 lt = NPC_LT_LD_ICMP6;
425 info.def_mask = &rte_flow_item_icmp_mask;
426 info.len = sizeof(struct rte_flow_item_icmp);
428 case RTE_FLOW_ITEM_TYPE_UDP:
430 info.def_mask = &rte_flow_item_udp_mask;
431 info.len = sizeof(struct rte_flow_item_udp);
433 case RTE_FLOW_ITEM_TYPE_TCP:
435 info.def_mask = &rte_flow_item_tcp_mask;
436 info.len = sizeof(struct rte_flow_item_tcp);
438 case RTE_FLOW_ITEM_TYPE_SCTP:
440 info.def_mask = &rte_flow_item_sctp_mask;
441 info.len = sizeof(struct rte_flow_item_sctp);
443 case RTE_FLOW_ITEM_TYPE_ESP:
445 info.def_mask = &rte_flow_item_esp_mask;
446 info.len = sizeof(struct rte_flow_item_esp);
448 case RTE_FLOW_ITEM_TYPE_GRE:
450 info.def_mask = &rte_flow_item_gre_mask;
451 info.len = sizeof(struct rte_flow_item_gre);
453 case RTE_FLOW_ITEM_TYPE_NVGRE:
455 lflags = NPC_F_GRE_NVGRE;
456 info.def_mask = &rte_flow_item_nvgre_mask;
457 info.len = sizeof(struct rte_flow_item_nvgre);
458 /* Further IP/Ethernet are parsed as tunneled */
465 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
466 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
470 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
474 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
476 const struct rte_flow_item *pattern = pst->pattern + 1;
478 pattern = otx2_flow_skip_void_and_any_items(pattern);
479 if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
480 pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
481 pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
485 /* Outer IPv4, Outer IPv6, MPLS, ARP */
487 otx2_flow_parse_lc(struct otx2_parse_state *pst)
489 uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
490 struct otx2_flow_item_info info;
494 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
495 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
497 info.hw_mask = &hw_mask;
503 switch (pst->pattern->type) {
504 case RTE_FLOW_ITEM_TYPE_IPV4:
506 info.def_mask = &rte_flow_item_ipv4_mask;
507 info.len = sizeof(struct rte_flow_item_ipv4);
509 case RTE_FLOW_ITEM_TYPE_IPV6:
512 info.def_mask = &rte_flow_item_ipv6_mask;
513 info.len = sizeof(struct rte_flow_item_ipv6);
515 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
517 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
518 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
520 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
522 lt = NPC_LT_LC_IP6_EXT;
523 info.def_mask = &rte_flow_item_ipv6_ext_mask;
524 info.len = sizeof(struct rte_flow_item_ipv6_ext);
525 info.hw_hdr_len = 40;
528 /* No match at this layer */
532 /* Identify if IP tunnels MPLS or IPv4/v6 */
533 flow_check_lc_ip_tunnel(pst);
535 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
536 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
540 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
545 otx2_flow_parse_lb(struct otx2_parse_state *pst)
547 const struct rte_flow_item *pattern = pst->pattern;
548 const struct rte_flow_item *last_pattern;
549 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
550 struct otx2_flow_item_info info;
557 info.hw_hdr_len = NPC_TPID_LENGTH;
561 last_pattern = pattern;
563 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
564 /* RTE vlan is either 802.1q or 802.1ad,
565 * this maps to either CTAG/STAG. We need to decide
566 * based on number of VLANS present. Matching is
567 * supported on first tag only.
569 info.def_mask = &rte_flow_item_vlan_mask;
571 info.len = sizeof(struct rte_flow_item_vlan);
573 pattern = pst->pattern;
574 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
577 /* Basic validation of 2nd/3rd vlan item */
579 otx2_npc_dbg("Vlans = %d", nr_vlans);
580 rc = otx2_flow_parse_item_basic(pattern, &info,
585 last_pattern = pattern;
587 pattern = otx2_flow_skip_void_and_any_items(pattern);
596 lflags = NPC_F_STAG_CTAG;
600 lflags = NPC_F_STAG_STAG_CTAG;
603 rte_flow_error_set(pst->error, ENOTSUP,
604 RTE_FLOW_ERROR_TYPE_ITEM,
606 "more than 3 vlans not supported");
609 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
610 /* we can support ETAG and match a subsequent CTAG
611 * without any matching support.
616 last_pattern = pst->pattern;
617 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
618 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
619 info.def_mask = &rte_flow_item_vlan_mask;
620 /* set supported mask to NULL for vlan tag */
622 info.len = sizeof(struct rte_flow_item_vlan);
623 rc = otx2_flow_parse_item_basic(pattern, &info,
628 lflags = NPC_F_ETAG_CTAG;
629 last_pattern = pattern;
632 info.def_mask = &rte_flow_item_e_tag_mask;
633 info.len = sizeof(struct rte_flow_item_e_tag);
638 info.hw_mask = &hw_mask;
641 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
643 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
647 /* Point pattern to last item consumed */
648 pst->pattern = last_pattern;
649 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
653 otx2_flow_parse_la(struct otx2_parse_state *pst)
655 struct rte_flow_item_eth hw_mask;
656 struct otx2_flow_item_info info;
660 /* Identify the pattern type into lid, lt */
661 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
665 lt = NPC_LT_LA_ETHER;
668 if (pst->flow->nix_intf == NIX_INTF_TX) {
669 lt = NPC_LT_LA_IH_NIX_ETHER;
670 info.hw_hdr_len = NPC_IH_LENGTH;
673 /* Prepare for parsing the item */
674 info.def_mask = &rte_flow_item_eth_mask;
675 info.hw_mask = &hw_mask;
676 info.len = sizeof(struct rte_flow_item_eth);
677 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
681 /* Basic validation of item parameters */
682 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
686 /* Update pst if not validate only? clash check? */
687 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
691 parse_rss_action(struct rte_eth_dev *dev,
692 const struct rte_flow_attr *attr,
693 const struct rte_flow_action *act,
694 struct rte_flow_error *error)
696 struct otx2_eth_dev *hw = dev->data->dev_private;
697 struct otx2_rss_info *rss_info = &hw->rss_info;
698 const struct rte_flow_action_rss *rss;
701 rss = (const struct rte_flow_action_rss *)act->conf;
705 return rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
707 attr, "No support of RSS in egress");
710 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
711 return rte_flow_error_set(error, ENOTSUP,
712 RTE_FLOW_ERROR_TYPE_ACTION,
713 act, "multi-queue mode is disabled");
715 /* Parse RSS related parameters from configuration */
716 if (!rss || !rss->queue_num)
717 return rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_ACTION,
719 act, "no valid queues");
721 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
722 return rte_flow_error_set(error, ENOTSUP,
723 RTE_FLOW_ERROR_TYPE_ACTION, act,
724 "non-default RSS hash functions"
725 " are not supported");
727 if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
728 return rte_flow_error_set(error, ENOTSUP,
729 RTE_FLOW_ERROR_TYPE_ACTION, act,
730 "RSS hash key too large");
732 if (rss->queue_num > rss_info->rss_size)
733 return rte_flow_error_set
734 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
735 "too many queues for RSS context");
737 for (i = 0; i < rss->queue_num; i++) {
738 if (rss->queue[i] >= dev->data->nb_rx_queues)
739 return rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ACTION,
742 "queue id > max number"
750 otx2_flow_parse_actions(struct rte_eth_dev *dev,
751 const struct rte_flow_attr *attr,
752 const struct rte_flow_action actions[],
753 struct rte_flow_error *error,
754 struct rte_flow *flow)
756 struct otx2_eth_dev *hw = dev->data->dev_private;
757 struct otx2_npc_flow_info *npc = &hw->npc_flow;
758 const struct rte_flow_action_count *act_count;
759 const struct rte_flow_action_mark *act_mark;
760 const struct rte_flow_action_queue *act_q;
761 const struct rte_flow_action_vf *vf_act;
762 const char *errmsg = NULL;
763 int sel_act, req_act = 0;
764 uint16_t pf_func, vf_id;
769 /* Initialize actions */
770 flow->ctr_id = NPC_COUNTER_NONE;
771 pf_func = otx2_pfvf_func(hw->pf, hw->vf);
773 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
774 otx2_npc_dbg("Action type = %d", actions->type);
776 switch (actions->type) {
777 case RTE_FLOW_ACTION_TYPE_VOID:
779 case RTE_FLOW_ACTION_TYPE_MARK:
781 (const struct rte_flow_action_mark *)actions->conf;
783 /* We have only 16 bits. Use highest val for flag */
784 if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
785 errmsg = "mark value must be < 0xfffe";
789 mark = act_mark->id + 1;
790 req_act |= OTX2_FLOW_ACT_MARK;
791 rte_atomic32_inc(&npc->mark_actions);
794 case RTE_FLOW_ACTION_TYPE_FLAG:
795 mark = OTX2_FLOW_FLAG_VAL;
796 req_act |= OTX2_FLOW_ACT_FLAG;
797 rte_atomic32_inc(&npc->mark_actions);
800 case RTE_FLOW_ACTION_TYPE_COUNT:
802 (const struct rte_flow_action_count *)
805 if (act_count->shared == 1) {
806 errmsg = "Shared Counters not supported";
810 /* Indicates, need a counter */
812 req_act |= OTX2_FLOW_ACT_COUNT;
815 case RTE_FLOW_ACTION_TYPE_DROP:
816 req_act |= OTX2_FLOW_ACT_DROP;
819 case RTE_FLOW_ACTION_TYPE_PF:
820 req_act |= OTX2_FLOW_ACT_PF;
824 case RTE_FLOW_ACTION_TYPE_VF:
825 vf_act = (const struct rte_flow_action_vf *)
827 req_act |= OTX2_FLOW_ACT_VF;
828 if (vf_act->original == 0) {
829 vf_id = (vf_act->id & RVU_PFVF_FUNC_MASK) + 1;
830 if (vf_id >= hw->maxvf) {
831 errmsg = "invalid vf specified";
836 pf_func = (pf_func | vf_id);
840 case RTE_FLOW_ACTION_TYPE_QUEUE:
841 /* Applicable only to ingress flow */
842 act_q = (const struct rte_flow_action_queue *)
845 if (rq >= dev->data->nb_rx_queues) {
846 errmsg = "invalid queue index";
850 req_act |= OTX2_FLOW_ACT_QUEUE;
853 case RTE_FLOW_ACTION_TYPE_RSS:
854 errcode = parse_rss_action(dev, attr, actions, error);
858 req_act |= OTX2_FLOW_ACT_RSS;
861 case RTE_FLOW_ACTION_TYPE_SECURITY:
862 /* Assumes user has already configured security
863 * session for this flow. Associated conf is
864 * opaque. When RTE security is implemented for otx2,
865 * we need to verify that for specified security
868 * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
869 * session_protocol ==
870 * RTE_SECURITY_PROTOCOL_IPSEC
872 * RSS is not supported with inline ipsec. Get the
873 * rq from associated conf, or make
874 * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
876 * Currently, rq = 0 is assumed.
878 req_act |= OTX2_FLOW_ACT_SEC;
882 errmsg = "Unsupported action specified";
888 /* Check if actions specified are compatible */
890 /* Only DROP/COUNT is supported */
891 if (!(req_act & OTX2_FLOW_ACT_DROP)) {
892 errmsg = "DROP is required action for egress";
895 } else if (req_act & ~(OTX2_FLOW_ACT_DROP |
896 OTX2_FLOW_ACT_COUNT)) {
897 errmsg = "Unsupported action specified";
901 flow->npc_action = NIX_TX_ACTIONOP_DROP;
905 /* We have already verified the attr, this is ingress.
906 * - Exactly one terminating action is supported
907 * - Exactly one of MARK or FLAG is supported
908 * - If terminating action is DROP, only count is valid.
910 sel_act = req_act & OTX2_FLOW_ACT_TERM;
911 if ((sel_act & (sel_act - 1)) != 0) {
912 errmsg = "Only one terminating action supported";
917 if (req_act & OTX2_FLOW_ACT_DROP) {
918 sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
919 if ((sel_act & (sel_act - 1)) != 0) {
920 errmsg = "Only COUNT action is supported "
921 "with DROP ingress action";
927 if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
928 == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
929 errmsg = "Only one of FLAG or MARK action is supported";
934 /* Set NIX_RX_ACTIONOP */
935 if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
936 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
937 if (req_act & OTX2_FLOW_ACT_QUEUE)
938 flow->npc_action |= (uint64_t)rq << 20;
939 } else if (req_act & OTX2_FLOW_ACT_DROP) {
940 flow->npc_action = NIX_RX_ACTIONOP_DROP;
941 } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
942 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
943 flow->npc_action |= (uint64_t)rq << 20;
944 } else if (req_act & OTX2_FLOW_ACT_RSS) {
945 /* When user added a rule for rss, first we will add the
946 *rule in MCAM and then update the action, once if we have
947 *FLOW_KEY_ALG index. So, till we update the action with
948 *flow_key_alg index, set the action to drop.
950 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
951 flow->npc_action = NIX_RX_ACTIONOP_DROP;
953 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
954 } else if (req_act & OTX2_FLOW_ACT_SEC) {
955 flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
956 flow->npc_action |= (uint64_t)rq << 20;
957 } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
958 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
959 } else if (req_act & OTX2_FLOW_ACT_COUNT) {
960 /* Keep OTX2_FLOW_ACT_COUNT always at the end
961 * This is default action, when user specify only
964 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
966 /* Should never reach here */
967 errmsg = "Invalid action specified";
973 flow->npc_action |= (uint64_t)mark << 40;
975 if (rte_atomic32_read(&npc->mark_actions) == 1) {
976 hw->rx_offload_flags |=
977 NIX_RX_OFFLOAD_MARK_UPDATE_F;
978 otx2_eth_set_rx_function(dev);
982 /* Ideally AF must ensure that correct pf_func is set */
983 flow->npc_action |= (uint64_t)pf_func << 4;
988 rte_flow_error_set(error, errcode,
989 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,