1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
11 while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
19 * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
25 struct otx2_flow_item_info info;
33 info.hw_mask = &hw_mask;
39 switch (pst->pattern->type) {
40 case RTE_FLOW_ITEM_TYPE_UDP:
41 lt = NPC_LT_LH_TU_UDP;
42 info.def_mask = &rte_flow_item_udp_mask;
43 info.len = sizeof(struct rte_flow_item_udp);
45 case RTE_FLOW_ITEM_TYPE_TCP:
46 lt = NPC_LT_LH_TU_TCP;
47 info.def_mask = &rte_flow_item_tcp_mask;
48 info.len = sizeof(struct rte_flow_item_tcp);
50 case RTE_FLOW_ITEM_TYPE_SCTP:
51 lt = NPC_LT_LH_TU_SCTP;
52 info.def_mask = &rte_flow_item_sctp_mask;
53 info.len = sizeof(struct rte_flow_item_sctp);
55 case RTE_FLOW_ITEM_TYPE_ESP:
56 lt = NPC_LT_LH_TU_ESP;
57 info.def_mask = &rte_flow_item_esp_mask;
58 info.len = sizeof(struct rte_flow_item_esp);
64 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
69 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
72 /* Tunnel+IPv4, Tunnel+IPv6 */
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
76 struct otx2_flow_item_info info;
84 info.hw_mask = &hw_mask;
90 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
92 info.def_mask = &rte_flow_item_ipv4_mask;
93 info.len = sizeof(struct rte_flow_item_ipv4);
94 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 lt = NPC_LT_LG_TU_IP6;
96 info.def_mask = &rte_flow_item_ipv6_mask;
97 info.len = sizeof(struct rte_flow_item_ipv6);
99 /* There is no tunneled IP header */
103 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
108 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
115 const struct rte_flow_item *pattern, *last_pattern;
116 struct rte_flow_item_eth hw_mask;
117 struct otx2_flow_item_info info;
122 /* We hit this layer if there is a tunneling protocol */
126 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
130 lt = NPC_LT_LF_TU_ETHER;
133 info.def_mask = &rte_flow_item_vlan_mask;
134 /* No match support for vlan tags */
136 info.len = sizeof(struct rte_flow_item_vlan);
141 /* Look ahead and find out any VLAN tags. These can be
142 * detected but no data matching is available.
144 last_pattern = pst->pattern;
145 pattern = pst->pattern + 1;
146 pattern = otx2_flow_skip_void_and_any_items(pattern);
147 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
149 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
152 last_pattern = pattern;
154 pattern = otx2_flow_skip_void_and_any_items(pattern);
156 otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
161 lflags = NPC_F_TU_ETHER_CTAG;
164 lflags = NPC_F_TU_ETHER_STAG_CTAG;
167 rte_flow_error_set(pst->error, ENOTSUP,
168 RTE_FLOW_ERROR_TYPE_ITEM,
170 "more than 2 vlans with tunneled Ethernet "
175 info.def_mask = &rte_flow_item_eth_mask;
176 info.hw_mask = &hw_mask;
177 info.len = sizeof(struct rte_flow_item_eth);
179 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
183 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
187 pst->pattern = last_pattern;
189 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
196 * We are positioned at UDP. Scan ahead and look for
197 * UDP encapsulated tunnel protocols. If available,
198 * parse them. In that case handle this:
199 * - RTE spec assumes we point to tunnel header.
200 * - NPC parser provides offset from UDP header.
204 * Note: Add support to GENEVE, VXLAN_GPE when we
207 * Note: Better to split flags into two nibbles:
208 * - Higher nibble can have flags
209 * - Lower nibble to further enumerate protocols
210 * and have flags based extraction
212 const struct rte_flow_item *pattern = pst->pattern;
213 struct otx2_flow_item_info info;
221 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
227 info.def_mask = NULL;
233 /* Ensure we are not matching anything in UDP */
234 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
238 info.hw_mask = &hw_mask;
239 pattern = otx2_flow_skip_void_and_any_items(pattern);
240 otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 switch (pattern->type) {
242 case RTE_FLOW_ITEM_TYPE_VXLAN:
243 lflags = NPC_F_UDP_VXLAN;
244 info.def_mask = &rte_flow_item_vxlan_mask;
245 info.len = sizeof(struct rte_flow_item_vxlan);
246 lt = NPC_LT_LE_VXLAN;
248 case RTE_FLOW_ITEM_TYPE_GTPC:
249 lflags = NPC_F_UDP_GTP_GTPC;
250 info.def_mask = &rte_flow_item_gtp_mask;
251 info.len = sizeof(struct rte_flow_item_gtp);
254 case RTE_FLOW_ITEM_TYPE_GTPU:
255 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
256 info.def_mask = &rte_flow_item_gtp_mask;
257 info.len = sizeof(struct rte_flow_item_gtp);
260 case RTE_FLOW_ITEM_TYPE_GENEVE:
261 lflags = NPC_F_UDP_GENEVE;
262 info.def_mask = &rte_flow_item_geneve_mask;
263 info.len = sizeof(struct rte_flow_item_geneve);
264 lt = NPC_LT_LE_GENEVE;
266 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
267 lflags = NPC_F_UDP_VXLANGPE;
268 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
269 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
270 lt = NPC_LT_LE_VXLANGPE;
278 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
279 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
283 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
287 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
290 const struct rte_flow_item *pattern = pst->pattern;
291 struct otx2_flow_item_info info;
293 uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
294 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
297 * pst->pattern points to first MPLS label. We only check
298 * that subsequent labels do not have anything to match.
300 info.def_mask = &rte_flow_item_mpls_mask;
302 info.len = sizeof(struct rte_flow_item_mpls);
307 while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
310 /* Basic validation of 2nd/3rd/4th mpls item */
312 rc = otx2_flow_parse_item_basic(pattern, &info,
317 pst->last_pattern = pattern;
319 pattern = otx2_flow_skip_void_and_any_items(pattern);
323 rte_flow_error_set(pst->error, ENOTSUP,
324 RTE_FLOW_ERROR_TYPE_ITEM,
326 "more than 4 mpls labels not supported");
330 *flag = flag_list[nr_labels - 1];
335 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
337 /* Find number of MPLS labels */
338 struct rte_flow_item_mpls hw_mask;
339 struct otx2_flow_item_info info;
345 if (lid == NPC_LID_LC)
347 else if (lid == NPC_LID_LD)
348 lt = NPC_LT_LD_TU_MPLS_IN_IP;
350 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
352 /* Prepare for parsing the first item */
353 info.def_mask = &rte_flow_item_mpls_mask;
354 info.hw_mask = &hw_mask;
355 info.len = sizeof(struct rte_flow_item_mpls);
360 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
361 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
366 * Parse for more labels.
367 * This sets lflags and pst->last_pattern correctly.
369 rc = flow_parse_mpls_label_stack(pst, &lflags);
374 pst->pattern = pst->last_pattern;
376 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
380 * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
381 * GTP, GTPC, GTPU, ESP
383 * Note: UDP tunnel protocols are identified by flags.
384 * LPTR for these protocol still points to UDP
385 * header. Need flag based extraction to support
389 otx2_flow_parse_ld(struct otx2_parse_state *pst)
391 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
392 uint32_t gre_key_mask = 0xffffffff;
393 struct otx2_flow_item_info info;
398 /* We have already parsed MPLS or IPv4/v6 followed
399 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
400 * would be parsed as tunneled versions. Skip
401 * this layer, except for tunneled MPLS. If LC is
402 * MPLS, we have anyway skipped all stacked MPLS
405 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
406 return otx2_flow_parse_mpls(pst, NPC_LID_LD);
409 info.hw_mask = &hw_mask;
412 info.def_mask = NULL;
419 otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
420 switch (pst->pattern->type) {
421 case RTE_FLOW_ITEM_TYPE_ICMP:
422 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
423 lt = NPC_LT_LD_ICMP6;
426 info.def_mask = &rte_flow_item_icmp_mask;
427 info.len = sizeof(struct rte_flow_item_icmp);
429 case RTE_FLOW_ITEM_TYPE_UDP:
431 info.def_mask = &rte_flow_item_udp_mask;
432 info.len = sizeof(struct rte_flow_item_udp);
434 case RTE_FLOW_ITEM_TYPE_TCP:
436 info.def_mask = &rte_flow_item_tcp_mask;
437 info.len = sizeof(struct rte_flow_item_tcp);
439 case RTE_FLOW_ITEM_TYPE_SCTP:
441 info.def_mask = &rte_flow_item_sctp_mask;
442 info.len = sizeof(struct rte_flow_item_sctp);
444 case RTE_FLOW_ITEM_TYPE_ESP:
446 info.def_mask = &rte_flow_item_esp_mask;
447 info.len = sizeof(struct rte_flow_item_esp);
449 case RTE_FLOW_ITEM_TYPE_GRE:
451 info.def_mask = &rte_flow_item_gre_mask;
452 info.len = sizeof(struct rte_flow_item_gre);
454 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
456 info.def_mask = &gre_key_mask;
457 info.len = sizeof(gre_key_mask);
460 case RTE_FLOW_ITEM_TYPE_NVGRE:
462 lflags = NPC_F_GRE_NVGRE;
463 info.def_mask = &rte_flow_item_nvgre_mask;
464 info.len = sizeof(struct rte_flow_item_nvgre);
465 /* Further IP/Ethernet are parsed as tunneled */
472 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
473 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
477 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
481 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
483 const struct rte_flow_item *pattern = pst->pattern + 1;
485 pattern = otx2_flow_skip_void_and_any_items(pattern);
486 if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
487 pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
488 pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
492 /* Outer IPv4, Outer IPv6, MPLS, ARP */
494 otx2_flow_parse_lc(struct otx2_parse_state *pst)
496 uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
497 struct otx2_flow_item_info info;
501 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
502 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
504 info.hw_mask = &hw_mask;
510 switch (pst->pattern->type) {
511 case RTE_FLOW_ITEM_TYPE_IPV4:
513 info.def_mask = &rte_flow_item_ipv4_mask;
514 info.len = sizeof(struct rte_flow_item_ipv4);
516 case RTE_FLOW_ITEM_TYPE_IPV6:
519 info.def_mask = &rte_flow_item_ipv6_mask;
520 info.len = sizeof(struct rte_flow_item_ipv6);
522 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
524 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
525 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
527 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
529 lt = NPC_LT_LC_IP6_EXT;
530 info.def_mask = &rte_flow_item_ipv6_ext_mask;
531 info.len = sizeof(struct rte_flow_item_ipv6_ext);
532 info.hw_hdr_len = 40;
535 /* No match at this layer */
539 /* Identify if IP tunnels MPLS or IPv4/v6 */
540 flow_check_lc_ip_tunnel(pst);
542 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
543 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
547 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
552 otx2_flow_parse_lb(struct otx2_parse_state *pst)
554 const struct rte_flow_item *pattern = pst->pattern;
555 const struct rte_flow_item *last_pattern;
556 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
557 struct otx2_flow_item_info info;
564 info.hw_hdr_len = NPC_TPID_LENGTH;
568 last_pattern = pattern;
570 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
571 /* RTE vlan is either 802.1q or 802.1ad,
572 * this maps to either CTAG/STAG. We need to decide
573 * based on number of VLANS present. Matching is
574 * supported on first tag only.
576 info.def_mask = &rte_flow_item_vlan_mask;
578 info.len = sizeof(struct rte_flow_item_vlan);
580 pattern = pst->pattern;
581 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
584 /* Basic validation of 2nd/3rd vlan item */
586 otx2_npc_dbg("Vlans = %d", nr_vlans);
587 rc = otx2_flow_parse_item_basic(pattern, &info,
592 last_pattern = pattern;
594 pattern = otx2_flow_skip_void_and_any_items(pattern);
603 lflags = NPC_F_STAG_CTAG;
607 lflags = NPC_F_STAG_STAG_CTAG;
610 rte_flow_error_set(pst->error, ENOTSUP,
611 RTE_FLOW_ERROR_TYPE_ITEM,
613 "more than 3 vlans not supported");
616 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
617 /* we can support ETAG and match a subsequent CTAG
618 * without any matching support.
623 last_pattern = pst->pattern;
624 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
625 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
626 info.def_mask = &rte_flow_item_vlan_mask;
627 /* set supported mask to NULL for vlan tag */
629 info.len = sizeof(struct rte_flow_item_vlan);
630 rc = otx2_flow_parse_item_basic(pattern, &info,
635 lflags = NPC_F_ETAG_CTAG;
636 last_pattern = pattern;
639 info.def_mask = &rte_flow_item_e_tag_mask;
640 info.len = sizeof(struct rte_flow_item_e_tag);
645 info.hw_mask = &hw_mask;
648 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
650 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
654 /* Point pattern to last item consumed */
655 pst->pattern = last_pattern;
656 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
660 otx2_flow_parse_la(struct otx2_parse_state *pst)
662 struct rte_flow_item_eth hw_mask;
663 struct otx2_flow_item_info info;
667 /* Identify the pattern type into lid, lt */
668 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
672 lt = NPC_LT_LA_ETHER;
675 if (pst->flow->nix_intf == NIX_INTF_TX) {
676 lt = NPC_LT_LA_IH_NIX_ETHER;
677 info.hw_hdr_len = NPC_IH_LENGTH;
680 /* Prepare for parsing the item */
681 info.def_mask = &rte_flow_item_eth_mask;
682 info.hw_mask = &hw_mask;
683 info.len = sizeof(struct rte_flow_item_eth);
684 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
688 /* Basic validation of item parameters */
689 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
693 /* Update pst if not validate only? clash check? */
694 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
698 parse_rss_action(struct rte_eth_dev *dev,
699 const struct rte_flow_attr *attr,
700 const struct rte_flow_action *act,
701 struct rte_flow_error *error)
703 struct otx2_eth_dev *hw = dev->data->dev_private;
704 struct otx2_rss_info *rss_info = &hw->rss_info;
705 const struct rte_flow_action_rss *rss;
708 rss = (const struct rte_flow_action_rss *)act->conf;
712 return rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
714 attr, "No support of RSS in egress");
717 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
718 return rte_flow_error_set(error, ENOTSUP,
719 RTE_FLOW_ERROR_TYPE_ACTION,
720 act, "multi-queue mode is disabled");
722 /* Parse RSS related parameters from configuration */
723 if (!rss || !rss->queue_num)
724 return rte_flow_error_set(error, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ACTION,
726 act, "no valid queues");
728 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
729 return rte_flow_error_set(error, ENOTSUP,
730 RTE_FLOW_ERROR_TYPE_ACTION, act,
731 "non-default RSS hash functions"
732 " are not supported");
734 if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
735 return rte_flow_error_set(error, ENOTSUP,
736 RTE_FLOW_ERROR_TYPE_ACTION, act,
737 "RSS hash key too large");
739 if (rss->queue_num > rss_info->rss_size)
740 return rte_flow_error_set
741 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
742 "too many queues for RSS context");
744 for (i = 0; i < rss->queue_num; i++) {
745 if (rss->queue[i] >= dev->data->nb_rx_queues)
746 return rte_flow_error_set(error, EINVAL,
747 RTE_FLOW_ERROR_TYPE_ACTION,
749 "queue id > max number"
757 otx2_flow_parse_actions(struct rte_eth_dev *dev,
758 const struct rte_flow_attr *attr,
759 const struct rte_flow_action actions[],
760 struct rte_flow_error *error,
761 struct rte_flow *flow)
763 struct otx2_eth_dev *hw = dev->data->dev_private;
764 struct otx2_npc_flow_info *npc = &hw->npc_flow;
765 const struct rte_flow_action_count *act_count;
766 const struct rte_flow_action_mark *act_mark;
767 const struct rte_flow_action_queue *act_q;
768 const struct rte_flow_action_vf *vf_act;
769 const char *errmsg = NULL;
770 int sel_act, req_act = 0;
771 uint16_t pf_func, vf_id;
776 /* Initialize actions */
777 flow->ctr_id = NPC_COUNTER_NONE;
778 pf_func = otx2_pfvf_func(hw->pf, hw->vf);
780 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
781 otx2_npc_dbg("Action type = %d", actions->type);
783 switch (actions->type) {
784 case RTE_FLOW_ACTION_TYPE_VOID:
786 case RTE_FLOW_ACTION_TYPE_MARK:
788 (const struct rte_flow_action_mark *)actions->conf;
790 /* We have only 16 bits. Use highest val for flag */
791 if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
792 errmsg = "mark value must be < 0xfffe";
796 mark = act_mark->id + 1;
797 req_act |= OTX2_FLOW_ACT_MARK;
798 rte_atomic32_inc(&npc->mark_actions);
801 case RTE_FLOW_ACTION_TYPE_FLAG:
802 mark = OTX2_FLOW_FLAG_VAL;
803 req_act |= OTX2_FLOW_ACT_FLAG;
804 rte_atomic32_inc(&npc->mark_actions);
807 case RTE_FLOW_ACTION_TYPE_COUNT:
809 (const struct rte_flow_action_count *)
812 if (act_count->shared == 1) {
813 errmsg = "Shared Counters not supported";
817 /* Indicates, need a counter */
819 req_act |= OTX2_FLOW_ACT_COUNT;
822 case RTE_FLOW_ACTION_TYPE_DROP:
823 req_act |= OTX2_FLOW_ACT_DROP;
826 case RTE_FLOW_ACTION_TYPE_PF:
827 req_act |= OTX2_FLOW_ACT_PF;
831 case RTE_FLOW_ACTION_TYPE_VF:
832 vf_act = (const struct rte_flow_action_vf *)
834 req_act |= OTX2_FLOW_ACT_VF;
835 if (vf_act->original == 0) {
836 vf_id = (vf_act->id & RVU_PFVF_FUNC_MASK) + 1;
837 if (vf_id >= hw->maxvf) {
838 errmsg = "invalid vf specified";
843 pf_func = (pf_func | vf_id);
847 case RTE_FLOW_ACTION_TYPE_QUEUE:
848 /* Applicable only to ingress flow */
849 act_q = (const struct rte_flow_action_queue *)
852 if (rq >= dev->data->nb_rx_queues) {
853 errmsg = "invalid queue index";
857 req_act |= OTX2_FLOW_ACT_QUEUE;
860 case RTE_FLOW_ACTION_TYPE_RSS:
861 errcode = parse_rss_action(dev, attr, actions, error);
865 req_act |= OTX2_FLOW_ACT_RSS;
868 case RTE_FLOW_ACTION_TYPE_SECURITY:
869 /* Assumes user has already configured security
870 * session for this flow. Associated conf is
871 * opaque. When RTE security is implemented for otx2,
872 * we need to verify that for specified security
875 * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
876 * session_protocol ==
877 * RTE_SECURITY_PROTOCOL_IPSEC
879 * RSS is not supported with inline ipsec. Get the
880 * rq from associated conf, or make
881 * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
883 * Currently, rq = 0 is assumed.
885 req_act |= OTX2_FLOW_ACT_SEC;
889 errmsg = "Unsupported action specified";
895 /* Check if actions specified are compatible */
897 /* Only DROP/COUNT is supported */
898 if (!(req_act & OTX2_FLOW_ACT_DROP)) {
899 errmsg = "DROP is required action for egress";
902 } else if (req_act & ~(OTX2_FLOW_ACT_DROP |
903 OTX2_FLOW_ACT_COUNT)) {
904 errmsg = "Unsupported action specified";
908 flow->npc_action = NIX_TX_ACTIONOP_DROP;
912 /* We have already verified the attr, this is ingress.
913 * - Exactly one terminating action is supported
914 * - Exactly one of MARK or FLAG is supported
915 * - If terminating action is DROP, only count is valid.
917 sel_act = req_act & OTX2_FLOW_ACT_TERM;
918 if ((sel_act & (sel_act - 1)) != 0) {
919 errmsg = "Only one terminating action supported";
924 if (req_act & OTX2_FLOW_ACT_DROP) {
925 sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
926 if ((sel_act & (sel_act - 1)) != 0) {
927 errmsg = "Only COUNT action is supported "
928 "with DROP ingress action";
934 if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
935 == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
936 errmsg = "Only one of FLAG or MARK action is supported";
941 /* Set NIX_RX_ACTIONOP */
942 if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
943 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
944 if (req_act & OTX2_FLOW_ACT_QUEUE)
945 flow->npc_action |= (uint64_t)rq << 20;
946 } else if (req_act & OTX2_FLOW_ACT_DROP) {
947 flow->npc_action = NIX_RX_ACTIONOP_DROP;
948 } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
949 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
950 flow->npc_action |= (uint64_t)rq << 20;
951 } else if (req_act & OTX2_FLOW_ACT_RSS) {
952 /* When user added a rule for rss, first we will add the
953 *rule in MCAM and then update the action, once if we have
954 *FLOW_KEY_ALG index. So, till we update the action with
955 *flow_key_alg index, set the action to drop.
957 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
958 flow->npc_action = NIX_RX_ACTIONOP_DROP;
960 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
961 } else if (req_act & OTX2_FLOW_ACT_SEC) {
962 flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
963 flow->npc_action |= (uint64_t)rq << 20;
964 } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
965 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
966 } else if (req_act & OTX2_FLOW_ACT_COUNT) {
967 /* Keep OTX2_FLOW_ACT_COUNT always at the end
968 * This is default action, when user specify only
971 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
973 /* Should never reach here */
974 errmsg = "Invalid action specified";
980 flow->npc_action |= (uint64_t)mark << 40;
982 if (rte_atomic32_read(&npc->mark_actions) == 1) {
983 hw->rx_offload_flags |=
984 NIX_RX_OFFLOAD_MARK_UPDATE_F;
985 otx2_eth_set_rx_function(dev);
989 /* Ideally AF must ensure that correct pf_func is set */
990 flow->npc_action |= (uint64_t)pf_func << 4;
995 rte_flow_error_set(error, errcode,
996 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,