1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
11 while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
19 * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
25 struct otx2_flow_item_info info;
33 info.hw_mask = &hw_mask;
39 switch (pst->pattern->type) {
40 case RTE_FLOW_ITEM_TYPE_UDP:
41 lt = NPC_LT_LH_TU_UDP;
42 info.def_mask = &rte_flow_item_udp_mask;
43 info.len = sizeof(struct rte_flow_item_udp);
45 case RTE_FLOW_ITEM_TYPE_TCP:
46 lt = NPC_LT_LH_TU_TCP;
47 info.def_mask = &rte_flow_item_tcp_mask;
48 info.len = sizeof(struct rte_flow_item_tcp);
50 case RTE_FLOW_ITEM_TYPE_SCTP:
51 lt = NPC_LT_LH_TU_SCTP;
52 info.def_mask = &rte_flow_item_sctp_mask;
53 info.len = sizeof(struct rte_flow_item_sctp);
55 case RTE_FLOW_ITEM_TYPE_ESP:
56 lt = NPC_LT_LH_TU_ESP;
57 info.def_mask = &rte_flow_item_esp_mask;
58 info.len = sizeof(struct rte_flow_item_esp);
64 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
69 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
72 /* Tunnel+IPv4, Tunnel+IPv6 */
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
76 struct otx2_flow_item_info info;
84 info.hw_mask = &hw_mask;
90 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
92 info.def_mask = &rte_flow_item_ipv4_mask;
93 info.len = sizeof(struct rte_flow_item_ipv4);
94 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 lt = NPC_LT_LG_TU_IP6;
96 info.def_mask = &rte_flow_item_ipv6_mask;
97 info.len = sizeof(struct rte_flow_item_ipv6);
99 /* There is no tunneled IP header */
103 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
108 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
115 const struct rte_flow_item *pattern, *last_pattern;
116 struct rte_flow_item_eth hw_mask;
117 struct otx2_flow_item_info info;
122 /* We hit this layer if there is a tunneling protocol */
126 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
130 lt = NPC_LT_LF_TU_ETHER;
133 info.def_mask = &rte_flow_item_vlan_mask;
134 /* No match support for vlan tags */
136 info.len = sizeof(struct rte_flow_item_vlan);
141 /* Look ahead and find out any VLAN tags. These can be
142 * detected but no data matching is available.
144 last_pattern = pst->pattern;
145 pattern = pst->pattern + 1;
146 pattern = otx2_flow_skip_void_and_any_items(pattern);
147 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
149 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
152 last_pattern = pattern;
154 pattern = otx2_flow_skip_void_and_any_items(pattern);
156 otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
161 lflags = NPC_F_TU_ETHER_CTAG;
164 lflags = NPC_F_TU_ETHER_STAG_CTAG;
167 rte_flow_error_set(pst->error, ENOTSUP,
168 RTE_FLOW_ERROR_TYPE_ITEM,
170 "more than 2 vlans with tunneled Ethernet "
175 info.def_mask = &rte_flow_item_eth_mask;
176 info.hw_mask = &hw_mask;
177 info.len = sizeof(struct rte_flow_item_eth);
179 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
183 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
187 pst->pattern = last_pattern;
189 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
196 * We are positioned at UDP. Scan ahead and look for
197 * UDP encapsulated tunnel protocols. If available,
198 * parse them. In that case handle this:
199 * - RTE spec assumes we point to tunnel header.
200 * - NPC parser provides offset from UDP header.
204 * Note: Add support to GENEVE, VXLAN_GPE when we
207 * Note: Better to split flags into two nibbles:
208 * - Higher nibble can have flags
209 * - Lower nibble to further enumerate protocols
210 * and have flags based extraction
212 const struct rte_flow_item *pattern = pst->pattern;
213 struct otx2_flow_item_info info;
221 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
227 info.def_mask = NULL;
233 /* Ensure we are not matching anything in UDP */
234 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
238 info.hw_mask = &hw_mask;
239 pattern = otx2_flow_skip_void_and_any_items(pattern);
240 otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 switch (pattern->type) {
242 case RTE_FLOW_ITEM_TYPE_VXLAN:
243 lflags = NPC_F_UDP_VXLAN;
244 info.def_mask = &rte_flow_item_vxlan_mask;
245 info.len = sizeof(struct rte_flow_item_vxlan);
246 lt = NPC_LT_LE_VXLAN;
248 case RTE_FLOW_ITEM_TYPE_GTPC:
249 lflags = NPC_F_UDP_GTP_GTPC;
250 info.def_mask = &rte_flow_item_gtp_mask;
251 info.len = sizeof(struct rte_flow_item_gtp);
254 case RTE_FLOW_ITEM_TYPE_GTPU:
255 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
256 info.def_mask = &rte_flow_item_gtp_mask;
257 info.len = sizeof(struct rte_flow_item_gtp);
260 case RTE_FLOW_ITEM_TYPE_GENEVE:
261 lflags = NPC_F_UDP_GENEVE;
262 info.def_mask = &rte_flow_item_geneve_mask;
263 info.len = sizeof(struct rte_flow_item_geneve);
264 lt = NPC_LT_LE_GENEVE;
266 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
267 lflags = NPC_F_UDP_VXLANGPE;
268 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
269 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
270 lt = NPC_LT_LE_VXLANGPE;
278 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
279 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
283 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
287 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
290 const struct rte_flow_item *pattern = pst->pattern;
291 struct otx2_flow_item_info info;
293 uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
294 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
297 * pst->pattern points to first MPLS label. We only check
298 * that subsequent labels do not have anything to match.
300 info.def_mask = &rte_flow_item_mpls_mask;
302 info.len = sizeof(struct rte_flow_item_mpls);
307 while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
310 /* Basic validation of 2nd/3rd/4th mpls item */
312 rc = otx2_flow_parse_item_basic(pattern, &info,
317 pst->last_pattern = pattern;
319 pattern = otx2_flow_skip_void_and_any_items(pattern);
323 rte_flow_error_set(pst->error, ENOTSUP,
324 RTE_FLOW_ERROR_TYPE_ITEM,
326 "more than 4 mpls labels not supported");
330 *flag = flag_list[nr_labels - 1];
335 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
337 /* Find number of MPLS labels */
338 struct rte_flow_item_mpls hw_mask;
339 struct otx2_flow_item_info info;
345 if (lid == NPC_LID_LC)
347 else if (lid == NPC_LID_LD)
348 lt = NPC_LT_LD_TU_MPLS_IN_IP;
350 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
352 /* Prepare for parsing the first item */
353 info.def_mask = &rte_flow_item_mpls_mask;
354 info.hw_mask = &hw_mask;
355 info.len = sizeof(struct rte_flow_item_mpls);
360 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
361 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
366 * Parse for more labels.
367 * This sets lflags and pst->last_pattern correctly.
369 rc = flow_parse_mpls_label_stack(pst, &lflags);
374 pst->pattern = pst->last_pattern;
376 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
380 * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
381 * GTP, GTPC, GTPU, ESP
383 * Note: UDP tunnel protocols are identified by flags.
384 * LPTR for these protocol still points to UDP
385 * header. Need flag based extraction to support
389 otx2_flow_parse_ld(struct otx2_parse_state *pst)
391 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
392 uint32_t gre_key_mask = 0xffffffff;
393 struct otx2_flow_item_info info;
398 /* We have already parsed MPLS or IPv4/v6 followed
399 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
400 * would be parsed as tunneled versions. Skip
401 * this layer, except for tunneled MPLS. If LC is
402 * MPLS, we have anyway skipped all stacked MPLS
405 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
406 return otx2_flow_parse_mpls(pst, NPC_LID_LD);
409 info.hw_mask = &hw_mask;
412 info.def_mask = NULL;
419 otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
420 switch (pst->pattern->type) {
421 case RTE_FLOW_ITEM_TYPE_ICMP:
422 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
423 lt = NPC_LT_LD_ICMP6;
426 info.def_mask = &rte_flow_item_icmp_mask;
427 info.len = sizeof(struct rte_flow_item_icmp);
429 case RTE_FLOW_ITEM_TYPE_UDP:
431 info.def_mask = &rte_flow_item_udp_mask;
432 info.len = sizeof(struct rte_flow_item_udp);
434 case RTE_FLOW_ITEM_TYPE_TCP:
436 info.def_mask = &rte_flow_item_tcp_mask;
437 info.len = sizeof(struct rte_flow_item_tcp);
439 case RTE_FLOW_ITEM_TYPE_SCTP:
441 info.def_mask = &rte_flow_item_sctp_mask;
442 info.len = sizeof(struct rte_flow_item_sctp);
444 case RTE_FLOW_ITEM_TYPE_ESP:
446 info.def_mask = &rte_flow_item_esp_mask;
447 info.len = sizeof(struct rte_flow_item_esp);
449 case RTE_FLOW_ITEM_TYPE_GRE:
451 info.def_mask = &rte_flow_item_gre_mask;
452 info.len = sizeof(struct rte_flow_item_gre);
454 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
456 info.def_mask = &gre_key_mask;
457 info.len = sizeof(gre_key_mask);
460 case RTE_FLOW_ITEM_TYPE_NVGRE:
461 lt = NPC_LT_LD_NVGRE;
462 lflags = NPC_F_GRE_NVGRE;
463 info.def_mask = &rte_flow_item_nvgre_mask;
464 info.len = sizeof(struct rte_flow_item_nvgre);
465 /* Further IP/Ethernet are parsed as tunneled */
472 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
473 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
477 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
481 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
483 const struct rte_flow_item *pattern = pst->pattern + 1;
485 pattern = otx2_flow_skip_void_and_any_items(pattern);
486 if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
487 pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
488 pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
492 /* Outer IPv4, Outer IPv6, MPLS, ARP */
494 otx2_flow_parse_lc(struct otx2_parse_state *pst)
496 uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
497 struct otx2_flow_item_info info;
501 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
502 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
504 info.hw_mask = &hw_mask;
510 switch (pst->pattern->type) {
511 case RTE_FLOW_ITEM_TYPE_IPV4:
513 info.def_mask = &rte_flow_item_ipv4_mask;
514 info.len = sizeof(struct rte_flow_item_ipv4);
516 case RTE_FLOW_ITEM_TYPE_IPV6:
519 info.def_mask = &rte_flow_item_ipv6_mask;
520 info.len = sizeof(struct rte_flow_item_ipv6);
522 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
524 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
525 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
527 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
529 lt = NPC_LT_LC_IP6_EXT;
530 info.def_mask = &rte_flow_item_ipv6_ext_mask;
531 info.len = sizeof(struct rte_flow_item_ipv6_ext);
532 info.hw_hdr_len = 40;
535 /* No match at this layer */
539 /* Identify if IP tunnels MPLS or IPv4/v6 */
540 flow_check_lc_ip_tunnel(pst);
542 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
543 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
547 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
552 otx2_flow_parse_lb(struct otx2_parse_state *pst)
554 const struct rte_flow_item *pattern = pst->pattern;
555 const struct rte_flow_item *last_pattern;
556 char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
557 struct otx2_flow_item_info info;
564 info.hw_hdr_len = NPC_TPID_LENGTH;
568 last_pattern = pattern;
570 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
571 /* RTE vlan is either 802.1q or 802.1ad,
572 * this maps to either CTAG/STAG. We need to decide
573 * based on number of VLANS present. Matching is
574 * supported on first tag only.
576 info.def_mask = &rte_flow_item_vlan_mask;
578 info.len = sizeof(struct rte_flow_item_vlan);
580 pattern = pst->pattern;
581 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
584 /* Basic validation of 2nd/3rd vlan item */
586 otx2_npc_dbg("Vlans = %d", nr_vlans);
587 rc = otx2_flow_parse_item_basic(pattern, &info,
592 last_pattern = pattern;
594 pattern = otx2_flow_skip_void_and_any_items(pattern);
602 lt = NPC_LT_LB_STAG_QINQ;
603 lflags = NPC_F_STAG_CTAG;
606 lt = NPC_LT_LB_STAG_QINQ;
607 lflags = NPC_F_STAG_STAG_CTAG;
610 rte_flow_error_set(pst->error, ENOTSUP,
611 RTE_FLOW_ERROR_TYPE_ITEM,
613 "more than 3 vlans not supported");
616 } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
617 /* we can support ETAG and match a subsequent CTAG
618 * without any matching support.
623 last_pattern = pst->pattern;
624 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
625 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
626 info.def_mask = &rte_flow_item_vlan_mask;
627 /* set supported mask to NULL for vlan tag */
629 info.len = sizeof(struct rte_flow_item_vlan);
630 rc = otx2_flow_parse_item_basic(pattern, &info,
635 lflags = NPC_F_ETAG_CTAG;
636 last_pattern = pattern;
639 info.def_mask = &rte_flow_item_e_tag_mask;
640 info.len = sizeof(struct rte_flow_item_e_tag);
645 info.hw_mask = &hw_mask;
648 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
650 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
654 /* Point pattern to last item consumed */
655 pst->pattern = last_pattern;
656 return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
660 otx2_flow_parse_la(struct otx2_parse_state *pst)
662 struct rte_flow_item_eth hw_mask;
663 struct otx2_flow_item_info info;
667 /* Identify the pattern type into lid, lt */
668 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
672 lt = NPC_LT_LA_ETHER;
675 if (pst->flow->nix_intf == NIX_INTF_TX) {
676 lt = NPC_LT_LA_IH_NIX_ETHER;
677 info.hw_hdr_len = NPC_IH_LENGTH;
678 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
679 lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
680 info.hw_hdr_len += NPC_HIGIG2_LENGTH;
683 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
684 lt = NPC_LT_LA_HIGIG2_ETHER;
685 info.hw_hdr_len = NPC_HIGIG2_LENGTH;
689 /* Prepare for parsing the item */
690 info.def_mask = &rte_flow_item_eth_mask;
691 info.hw_mask = &hw_mask;
692 info.len = sizeof(struct rte_flow_item_eth);
693 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
697 /* Basic validation of item parameters */
698 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
702 /* Update pst if not validate only? clash check? */
703 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
707 otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
709 struct rte_flow_item_higig2_hdr hw_mask;
710 struct otx2_flow_item_info info;
714 /* Identify the pattern type into lid, lt */
715 if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
719 lt = NPC_LT_LA_HIGIG2_ETHER;
722 if (pst->flow->nix_intf == NIX_INTF_TX) {
723 lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
724 info.hw_hdr_len = NPC_IH_LENGTH;
727 /* Prepare for parsing the item */
728 info.def_mask = &rte_flow_item_higig2_hdr_mask;
729 info.hw_mask = &hw_mask;
730 info.len = sizeof(struct rte_flow_item_higig2_hdr);
731 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
735 /* Basic validation of item parameters */
736 rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
740 /* Update pst if not validate only? clash check? */
741 return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
745 parse_rss_action(struct rte_eth_dev *dev,
746 const struct rte_flow_attr *attr,
747 const struct rte_flow_action *act,
748 struct rte_flow_error *error)
750 struct otx2_eth_dev *hw = dev->data->dev_private;
751 struct otx2_rss_info *rss_info = &hw->rss_info;
752 const struct rte_flow_action_rss *rss;
755 rss = (const struct rte_flow_action_rss *)act->conf;
759 return rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
761 attr, "No support of RSS in egress");
764 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
765 return rte_flow_error_set(error, ENOTSUP,
766 RTE_FLOW_ERROR_TYPE_ACTION,
767 act, "multi-queue mode is disabled");
769 /* Parse RSS related parameters from configuration */
770 if (!rss || !rss->queue_num)
771 return rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ACTION,
773 act, "no valid queues");
775 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
776 return rte_flow_error_set(error, ENOTSUP,
777 RTE_FLOW_ERROR_TYPE_ACTION, act,
778 "non-default RSS hash functions"
779 " are not supported");
781 if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
782 return rte_flow_error_set(error, ENOTSUP,
783 RTE_FLOW_ERROR_TYPE_ACTION, act,
784 "RSS hash key too large");
786 if (rss->queue_num > rss_info->rss_size)
787 return rte_flow_error_set
788 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
789 "too many queues for RSS context");
791 for (i = 0; i < rss->queue_num; i++) {
792 if (rss->queue[i] >= dev->data->nb_rx_queues)
793 return rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ACTION,
796 "queue id > max number"
804 otx2_flow_parse_actions(struct rte_eth_dev *dev,
805 const struct rte_flow_attr *attr,
806 const struct rte_flow_action actions[],
807 struct rte_flow_error *error,
808 struct rte_flow *flow)
810 struct otx2_eth_dev *hw = dev->data->dev_private;
811 struct otx2_npc_flow_info *npc = &hw->npc_flow;
812 const struct rte_flow_action_count *act_count;
813 const struct rte_flow_action_mark *act_mark;
814 const struct rte_flow_action_queue *act_q;
815 const struct rte_flow_action_vf *vf_act;
816 const char *errmsg = NULL;
817 int sel_act, req_act = 0;
818 uint16_t pf_func, vf_id;
823 /* Initialize actions */
824 flow->ctr_id = NPC_COUNTER_NONE;
825 pf_func = otx2_pfvf_func(hw->pf, hw->vf);
827 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
828 otx2_npc_dbg("Action type = %d", actions->type);
830 switch (actions->type) {
831 case RTE_FLOW_ACTION_TYPE_VOID:
833 case RTE_FLOW_ACTION_TYPE_MARK:
835 (const struct rte_flow_action_mark *)actions->conf;
837 /* We have only 16 bits. Use highest val for flag */
838 if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
839 errmsg = "mark value must be < 0xfffe";
843 mark = act_mark->id + 1;
844 req_act |= OTX2_FLOW_ACT_MARK;
845 rte_atomic32_inc(&npc->mark_actions);
848 case RTE_FLOW_ACTION_TYPE_FLAG:
849 mark = OTX2_FLOW_FLAG_VAL;
850 req_act |= OTX2_FLOW_ACT_FLAG;
851 rte_atomic32_inc(&npc->mark_actions);
854 case RTE_FLOW_ACTION_TYPE_COUNT:
856 (const struct rte_flow_action_count *)
859 if (act_count->shared == 1) {
860 errmsg = "Shared Counters not supported";
864 /* Indicates, need a counter */
866 req_act |= OTX2_FLOW_ACT_COUNT;
869 case RTE_FLOW_ACTION_TYPE_DROP:
870 req_act |= OTX2_FLOW_ACT_DROP;
873 case RTE_FLOW_ACTION_TYPE_PF:
874 req_act |= OTX2_FLOW_ACT_PF;
878 case RTE_FLOW_ACTION_TYPE_VF:
879 vf_act = (const struct rte_flow_action_vf *)
881 req_act |= OTX2_FLOW_ACT_VF;
882 if (vf_act->original == 0) {
883 vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
884 if (vf_id >= hw->maxvf) {
885 errmsg = "invalid vf specified";
890 pf_func = (pf_func | (vf_id + 1));
894 case RTE_FLOW_ACTION_TYPE_QUEUE:
895 /* Applicable only to ingress flow */
896 act_q = (const struct rte_flow_action_queue *)
899 if (rq >= dev->data->nb_rx_queues) {
900 errmsg = "invalid queue index";
904 req_act |= OTX2_FLOW_ACT_QUEUE;
907 case RTE_FLOW_ACTION_TYPE_RSS:
908 errcode = parse_rss_action(dev, attr, actions, error);
912 req_act |= OTX2_FLOW_ACT_RSS;
915 case RTE_FLOW_ACTION_TYPE_SECURITY:
916 /* Assumes user has already configured security
917 * session for this flow. Associated conf is
918 * opaque. When RTE security is implemented for otx2,
919 * we need to verify that for specified security
922 * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
923 * session_protocol ==
924 * RTE_SECURITY_PROTOCOL_IPSEC
926 * RSS is not supported with inline ipsec. Get the
927 * rq from associated conf, or make
928 * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
930 * Currently, rq = 0 is assumed.
932 req_act |= OTX2_FLOW_ACT_SEC;
936 errmsg = "Unsupported action specified";
942 /* Check if actions specified are compatible */
944 /* Only DROP/COUNT is supported */
945 if (!(req_act & OTX2_FLOW_ACT_DROP)) {
946 errmsg = "DROP is required action for egress";
949 } else if (req_act & ~(OTX2_FLOW_ACT_DROP |
950 OTX2_FLOW_ACT_COUNT)) {
951 errmsg = "Unsupported action specified";
955 flow->npc_action = NIX_TX_ACTIONOP_DROP;
959 /* We have already verified the attr, this is ingress.
960 * - Exactly one terminating action is supported
961 * - Exactly one of MARK or FLAG is supported
962 * - If terminating action is DROP, only count is valid.
964 sel_act = req_act & OTX2_FLOW_ACT_TERM;
965 if ((sel_act & (sel_act - 1)) != 0) {
966 errmsg = "Only one terminating action supported";
971 if (req_act & OTX2_FLOW_ACT_DROP) {
972 sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
973 if ((sel_act & (sel_act - 1)) != 0) {
974 errmsg = "Only COUNT action is supported "
975 "with DROP ingress action";
981 if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
982 == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
983 errmsg = "Only one of FLAG or MARK action is supported";
988 /* Set NIX_RX_ACTIONOP */
989 if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
990 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
991 if (req_act & OTX2_FLOW_ACT_QUEUE)
992 flow->npc_action |= (uint64_t)rq << 20;
993 } else if (req_act & OTX2_FLOW_ACT_DROP) {
994 flow->npc_action = NIX_RX_ACTIONOP_DROP;
995 } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
996 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
997 flow->npc_action |= (uint64_t)rq << 20;
998 } else if (req_act & OTX2_FLOW_ACT_RSS) {
999 /* When user added a rule for rss, first we will add the
1000 *rule in MCAM and then update the action, once if we have
1001 *FLOW_KEY_ALG index. So, till we update the action with
1002 *flow_key_alg index, set the action to drop.
1004 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1005 flow->npc_action = NIX_RX_ACTIONOP_DROP;
1007 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1008 } else if (req_act & OTX2_FLOW_ACT_SEC) {
1009 flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
1010 flow->npc_action |= (uint64_t)rq << 20;
1011 } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1012 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1013 } else if (req_act & OTX2_FLOW_ACT_COUNT) {
1014 /* Keep OTX2_FLOW_ACT_COUNT always at the end
1015 * This is default action, when user specify only
1018 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1020 /* Should never reach here */
1021 errmsg = "Invalid action specified";
1027 flow->npc_action |= (uint64_t)mark << 40;
1029 if (rte_atomic32_read(&npc->mark_actions) == 1) {
1030 hw->rx_offload_flags |=
1031 NIX_RX_OFFLOAD_MARK_UPDATE_F;
1032 otx2_eth_set_rx_function(dev);
1036 /* Ideally AF must ensure that correct pf_func is set */
1037 flow->npc_action |= (uint64_t)pf_func << 4;
1042 rte_flow_error_set(error, errcode,
1043 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,