79b92fda8a4ad88b53c13631e8c1136257ed8b29
[dpdk.git] / drivers / net / octeontx2 / otx2_flow_parse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include "otx2_ethdev.h"
6 #include "otx2_flow.h"
7
8 const struct rte_flow_item *
9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
10 {
11         while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12                (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
13                 pattern++;
14
15         return pattern;
16 }
17
18 /*
19  * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
20  * Tunnel+SCTP
21  */
22 int
23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
24 {
25         struct otx2_flow_item_info info;
26         char hw_mask[64];
27         int lid, lt;
28         int rc;
29
30         if (!pst->tunnel)
31                 return 0;
32
33         info.hw_mask = &hw_mask;
34         info.spec = NULL;
35         info.mask = NULL;
36         info.hw_hdr_len = 0;
37         lid = NPC_LID_LH;
38
39         switch (pst->pattern->type) {
40         case RTE_FLOW_ITEM_TYPE_UDP:
41                 lt = NPC_LT_LH_TU_UDP;
42                 info.def_mask = &rte_flow_item_udp_mask;
43                 info.len = sizeof(struct rte_flow_item_udp);
44                 break;
45         case RTE_FLOW_ITEM_TYPE_TCP:
46                 lt = NPC_LT_LH_TU_TCP;
47                 info.def_mask = &rte_flow_item_tcp_mask;
48                 info.len = sizeof(struct rte_flow_item_tcp);
49                 break;
50         case RTE_FLOW_ITEM_TYPE_SCTP:
51                 lt = NPC_LT_LH_TU_SCTP;
52                 info.def_mask = &rte_flow_item_sctp_mask;
53                 info.len = sizeof(struct rte_flow_item_sctp);
54                 break;
55         case RTE_FLOW_ITEM_TYPE_ESP:
56                 lt = NPC_LT_LH_TU_ESP;
57                 info.def_mask = &rte_flow_item_esp_mask;
58                 info.len = sizeof(struct rte_flow_item_esp);
59                 break;
60         default:
61                 return 0;
62         }
63
64         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
66         if (rc != 0)
67                 return rc;
68
69         return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
70 }
71
72 /* Tunnel+IPv4, Tunnel+IPv6 */
73 int
74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
75 {
76         struct otx2_flow_item_info info;
77         char hw_mask[64];
78         int lid, lt;
79         int rc;
80
81         if (!pst->tunnel)
82                 return 0;
83
84         info.hw_mask = &hw_mask;
85         info.spec = NULL;
86         info.mask = NULL;
87         info.hw_hdr_len = 0;
88         lid = NPC_LID_LG;
89
90         if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
91                 lt = NPC_LT_LG_TU_IP;
92                 info.def_mask = &rte_flow_item_ipv4_mask;
93                 info.len = sizeof(struct rte_flow_item_ipv4);
94         } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95                 lt = NPC_LT_LG_TU_IP6;
96                 info.def_mask = &rte_flow_item_ipv6_mask;
97                 info.len = sizeof(struct rte_flow_item_ipv6);
98         } else {
99                 /* There is no tunneled IP header */
100                 return 0;
101         }
102
103         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
105         if (rc != 0)
106                 return rc;
107
108         return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
109 }
110
111 /* Tunnel+Ether */
112 int
113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
114 {
115         const struct rte_flow_item *pattern, *last_pattern;
116         struct rte_flow_item_eth hw_mask;
117         struct otx2_flow_item_info info;
118         int lid, lt, lflags;
119         int nr_vlans = 0;
120         int rc;
121
122         /* We hit this layer if there is a tunneling protocol */
123         if (!pst->tunnel)
124                 return 0;
125
126         if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
127                 return 0;
128
129         lid = NPC_LID_LF;
130         lt = NPC_LT_LF_TU_ETHER;
131         lflags = 0;
132
133         info.def_mask = &rte_flow_item_vlan_mask;
134         /* No match support for vlan tags */
135         info.hw_mask = NULL;
136         info.len = sizeof(struct rte_flow_item_vlan);
137         info.spec = NULL;
138         info.mask = NULL;
139         info.hw_hdr_len = 0;
140
141         /* Look ahead and find out any VLAN tags. These can be
142          * detected but no data matching is available.
143          */
144         last_pattern = pst->pattern;
145         pattern = pst->pattern + 1;
146         pattern = otx2_flow_skip_void_and_any_items(pattern);
147         while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
148                 nr_vlans++;
149                 rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
150                 if (rc != 0)
151                         return rc;
152                 last_pattern = pattern;
153                 pattern++;
154                 pattern = otx2_flow_skip_void_and_any_items(pattern);
155         }
156         otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
157         switch (nr_vlans) {
158         case 0:
159                 break;
160         case 1:
161                 lflags = NPC_F_TU_ETHER_CTAG;
162                 break;
163         case 2:
164                 lflags = NPC_F_TU_ETHER_STAG_CTAG;
165                 break;
166         default:
167                 rte_flow_error_set(pst->error, ENOTSUP,
168                                    RTE_FLOW_ERROR_TYPE_ITEM,
169                                    last_pattern,
170                                    "more than 2 vlans with tunneled Ethernet "
171                                    "not supported");
172                 return -rte_errno;
173         }
174
175         info.def_mask = &rte_flow_item_eth_mask;
176         info.hw_mask = &hw_mask;
177         info.len = sizeof(struct rte_flow_item_eth);
178         info.hw_hdr_len = 0;
179         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
180         info.spec = NULL;
181         info.mask = NULL;
182
183         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
184         if (rc != 0)
185                 return rc;
186
187         pst->pattern = last_pattern;
188
189         return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
190 }
191
192 int
193 otx2_flow_parse_le(struct otx2_parse_state *pst)
194 {
195         /*
196          * We are positioned at UDP. Scan ahead and look for
197          * UDP encapsulated tunnel protocols. If available,
198          * parse them. In that case handle this:
199          *      - RTE spec assumes we point to tunnel header.
200          *      - NPC parser provides offset from UDP header.
201          */
202
203         /*
204          * Note: Add support to GENEVE, VXLAN_GPE when we
205          * upgrade DPDK
206          *
207          * Note: Better to split flags into two nibbles:
208          *      - Higher nibble can have flags
209          *      - Lower nibble to further enumerate protocols
210          *        and have flags based extraction
211          */
212         const struct rte_flow_item *pattern = pst->pattern;
213         struct otx2_flow_item_info info;
214         int lid, lt, lflags;
215         char hw_mask[64];
216         int rc;
217
218         if (pst->tunnel)
219                 return 0;
220
221         if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222                 return otx2_flow_parse_mpls(pst, NPC_LID_LE);
223
224         info.spec = NULL;
225         info.mask = NULL;
226         info.hw_mask = NULL;
227         info.def_mask = NULL;
228         info.len = 0;
229         info.hw_hdr_len = 0;
230         lid = NPC_LID_LE;
231         lflags = 0;
232
233         /* Ensure we are not matching anything in UDP */
234         rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
235         if (rc)
236                 return rc;
237
238         info.hw_mask = &hw_mask;
239         pattern = otx2_flow_skip_void_and_any_items(pattern);
240         otx2_npc_dbg("Pattern->type = %d", pattern->type);
241         switch (pattern->type) {
242         case RTE_FLOW_ITEM_TYPE_VXLAN:
243                 lflags = NPC_F_UDP_VXLAN;
244                 info.def_mask = &rte_flow_item_vxlan_mask;
245                 info.len = sizeof(struct rte_flow_item_vxlan);
246                 lt = NPC_LT_LE_VXLAN;
247                 break;
248         case RTE_FLOW_ITEM_TYPE_ESP:
249                 lt = NPC_LT_LE_ESP;
250                 info.def_mask = &rte_flow_item_esp_mask;
251                 info.len = sizeof(struct rte_flow_item_esp);
252                 break;
253         case RTE_FLOW_ITEM_TYPE_GTPC:
254                 lflags = NPC_F_UDP_GTP_GTPC;
255                 info.def_mask = &rte_flow_item_gtp_mask;
256                 info.len = sizeof(struct rte_flow_item_gtp);
257                 lt = NPC_LT_LE_GTPC;
258                 break;
259         case RTE_FLOW_ITEM_TYPE_GTPU:
260                 lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
261                 info.def_mask = &rte_flow_item_gtp_mask;
262                 info.len = sizeof(struct rte_flow_item_gtp);
263                 lt = NPC_LT_LE_GTPU;
264                 break;
265         case RTE_FLOW_ITEM_TYPE_GENEVE:
266                 lflags = NPC_F_UDP_GENEVE;
267                 info.def_mask = &rte_flow_item_geneve_mask;
268                 info.len = sizeof(struct rte_flow_item_geneve);
269                 lt = NPC_LT_LE_GENEVE;
270                 break;
271         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
272                 lflags = NPC_F_UDP_VXLANGPE;
273                 info.def_mask = &rte_flow_item_vxlan_gpe_mask;
274                 info.len = sizeof(struct rte_flow_item_vxlan_gpe);
275                 lt = NPC_LT_LE_VXLANGPE;
276                 break;
277         default:
278                 return 0;
279         }
280
281         pst->tunnel = 1;
282
283         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
284         rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
285         if (rc != 0)
286                 return rc;
287
288         return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
289 }
290
291 static int
292 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
293 {
294         int nr_labels = 0;
295         const struct rte_flow_item *pattern = pst->pattern;
296         struct otx2_flow_item_info info;
297         int rc;
298         uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
299                 NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
300
301         /*
302          * pst->pattern points to first MPLS label. We only check
303          * that subsequent labels do not have anything to match.
304          */
305         info.def_mask = &rte_flow_item_mpls_mask;
306         info.hw_mask = NULL;
307         info.len = sizeof(struct rte_flow_item_mpls);
308         info.spec = NULL;
309         info.mask = NULL;
310         info.hw_hdr_len = 0;
311
312         while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
313                 nr_labels++;
314
315                 /* Basic validation of 2nd/3rd/4th mpls item */
316                 if (nr_labels > 1) {
317                         rc = otx2_flow_parse_item_basic(pattern, &info,
318                                                         pst->error);
319                         if (rc != 0)
320                                 return rc;
321                 }
322                 pst->last_pattern = pattern;
323                 pattern++;
324                 pattern = otx2_flow_skip_void_and_any_items(pattern);
325         }
326
327         if (nr_labels > 4) {
328                 rte_flow_error_set(pst->error, ENOTSUP,
329                                    RTE_FLOW_ERROR_TYPE_ITEM,
330                                    pst->last_pattern,
331                                    "more than 4 mpls labels not supported");
332                 return -rte_errno;
333         }
334
335         *flag = flag_list[nr_labels - 1];
336         return 0;
337 }
338
339 int
340 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
341 {
342         /* Find number of MPLS labels */
343         struct rte_flow_item_mpls hw_mask;
344         struct otx2_flow_item_info info;
345         int lt, lflags;
346         int rc;
347
348         lflags = 0;
349
350         if (lid == NPC_LID_LC)
351                 lt = NPC_LT_LC_MPLS;
352         else if (lid == NPC_LID_LD)
353                 lt = NPC_LT_LD_TU_MPLS_IN_IP;
354         else
355                 lt = NPC_LT_LE_TU_MPLS_IN_UDP;
356
357         /* Prepare for parsing the first item */
358         info.def_mask = &rte_flow_item_mpls_mask;
359         info.hw_mask = &hw_mask;
360         info.len = sizeof(struct rte_flow_item_mpls);
361         info.spec = NULL;
362         info.mask = NULL;
363         info.hw_hdr_len = 0;
364
365         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
366         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
367         if (rc != 0)
368                 return rc;
369
370         /*
371          * Parse for more labels.
372          * This sets lflags and pst->last_pattern correctly.
373          */
374         rc = flow_parse_mpls_label_stack(pst, &lflags);
375         if (rc != 0)
376                 return rc;
377
378         pst->tunnel = 1;
379         pst->pattern = pst->last_pattern;
380
381         return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
382 }
383
384 /*
385  * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
386  * GTP, GTPC, GTPU, ESP
387  *
388  * Note: UDP tunnel protocols are identified by flags.
389  *       LPTR for these protocol still points to UDP
390  *       header. Need flag based extraction to support
391  *       this.
392  */
393 int
394 otx2_flow_parse_ld(struct otx2_parse_state *pst)
395 {
396         char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
397         uint32_t gre_key_mask = 0xffffffff;
398         struct otx2_flow_item_info info;
399         int lid, lt, lflags;
400         int rc;
401
402         if (pst->tunnel) {
403                 /* We have already parsed MPLS or IPv4/v6 followed
404                  * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
405                  * would be parsed as tunneled versions. Skip
406                  * this layer, except for tunneled MPLS. If LC is
407                  * MPLS, we have anyway skipped all stacked MPLS
408                  * labels.
409                  */
410                 if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
411                         return otx2_flow_parse_mpls(pst, NPC_LID_LD);
412                 return 0;
413         }
414         info.hw_mask = &hw_mask;
415         info.spec = NULL;
416         info.mask = NULL;
417         info.def_mask = NULL;
418         info.len = 0;
419         info.hw_hdr_len = 0;
420
421         lid = NPC_LID_LD;
422         lflags = 0;
423
424         otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
425         switch (pst->pattern->type) {
426         case RTE_FLOW_ITEM_TYPE_ICMP:
427                 if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
428                         lt = NPC_LT_LD_ICMP6;
429                 else
430                         lt = NPC_LT_LD_ICMP;
431                 info.def_mask = &rte_flow_item_icmp_mask;
432                 info.len = sizeof(struct rte_flow_item_icmp);
433                 break;
434         case RTE_FLOW_ITEM_TYPE_UDP:
435                 lt = NPC_LT_LD_UDP;
436                 info.def_mask = &rte_flow_item_udp_mask;
437                 info.len = sizeof(struct rte_flow_item_udp);
438                 break;
439         case RTE_FLOW_ITEM_TYPE_TCP:
440                 lt = NPC_LT_LD_TCP;
441                 info.def_mask = &rte_flow_item_tcp_mask;
442                 info.len = sizeof(struct rte_flow_item_tcp);
443                 break;
444         case RTE_FLOW_ITEM_TYPE_SCTP:
445                 lt = NPC_LT_LD_SCTP;
446                 info.def_mask = &rte_flow_item_sctp_mask;
447                 info.len = sizeof(struct rte_flow_item_sctp);
448                 break;
449         case RTE_FLOW_ITEM_TYPE_GRE:
450                 lt = NPC_LT_LD_GRE;
451                 info.def_mask = &rte_flow_item_gre_mask;
452                 info.len = sizeof(struct rte_flow_item_gre);
453                 break;
454         case RTE_FLOW_ITEM_TYPE_GRE_KEY:
455                 lt = NPC_LT_LD_GRE;
456                 info.def_mask = &gre_key_mask;
457                 info.len = sizeof(gre_key_mask);
458                 info.hw_hdr_len = 4;
459                 break;
460         case RTE_FLOW_ITEM_TYPE_NVGRE:
461                 lt = NPC_LT_LD_NVGRE;
462                 lflags = NPC_F_GRE_NVGRE;
463                 info.def_mask = &rte_flow_item_nvgre_mask;
464                 info.len = sizeof(struct rte_flow_item_nvgre);
465                 /* Further IP/Ethernet are parsed as tunneled */
466                 pst->tunnel = 1;
467                 break;
468         default:
469                 return 0;
470         }
471
472         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
473         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
474         if (rc != 0)
475                 return rc;
476
477         return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
478 }
479
480 static inline void
481 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
482 {
483         const struct rte_flow_item *pattern = pst->pattern + 1;
484
485         pattern = otx2_flow_skip_void_and_any_items(pattern);
486         if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
487             pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
488             pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
489                 pst->tunnel = 1;
490 }
491
492 static int
493 otx2_flow_raw_item_prepare(const struct rte_flow_item_raw *raw_spec,
494                            const struct rte_flow_item_raw *raw_mask,
495                            struct otx2_flow_item_info *info,
496                            uint8_t *spec_buf, uint8_t *mask_buf)
497 {
498         uint32_t custom_hdr_size = 0;
499
500         memset(spec_buf, 0, NPC_MAX_RAW_ITEM_LEN);
501         memset(mask_buf, 0, NPC_MAX_RAW_ITEM_LEN);
502         custom_hdr_size = raw_spec->offset + raw_spec->length;
503
504         memcpy(spec_buf + raw_spec->offset, raw_spec->pattern,
505                raw_spec->length);
506
507         if (raw_mask->pattern) {
508                 memcpy(mask_buf + raw_spec->offset, raw_mask->pattern,
509                        raw_spec->length);
510         } else {
511                 memset(mask_buf + raw_spec->offset, 0xFF, raw_spec->length);
512         }
513
514         info->len = custom_hdr_size;
515         info->spec = spec_buf;
516         info->mask = mask_buf;
517
518         return 0;
519 }
520
521 /* Outer IPv4, Outer IPv6, MPLS, ARP */
522 int
523 otx2_flow_parse_lc(struct otx2_parse_state *pst)
524 {
525         uint8_t raw_spec_buf[NPC_MAX_RAW_ITEM_LEN];
526         uint8_t raw_mask_buf[NPC_MAX_RAW_ITEM_LEN];
527         uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
528         const struct rte_flow_item_raw *raw_spec;
529         struct otx2_flow_item_info info;
530         int lid, lt, len;
531         int rc;
532
533         if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
534                 return otx2_flow_parse_mpls(pst, NPC_LID_LC);
535
536         info.hw_mask = &hw_mask;
537         info.spec = NULL;
538         info.mask = NULL;
539         info.hw_hdr_len = 0;
540         lid = NPC_LID_LC;
541
542         switch (pst->pattern->type) {
543         case RTE_FLOW_ITEM_TYPE_IPV4:
544                 lt = NPC_LT_LC_IP;
545                 info.def_mask = &rte_flow_item_ipv4_mask;
546                 info.len = sizeof(struct rte_flow_item_ipv4);
547                 break;
548         case RTE_FLOW_ITEM_TYPE_IPV6:
549                 lid = NPC_LID_LC;
550                 lt = NPC_LT_LC_IP6;
551                 info.def_mask = &rte_flow_item_ipv6_mask;
552                 info.len = sizeof(struct rte_flow_item_ipv6);
553                 break;
554         case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
555                 lt = NPC_LT_LC_ARP;
556                 info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
557                 info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
558                 break;
559         case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
560                 lid = NPC_LID_LC;
561                 lt = NPC_LT_LC_IP6_EXT;
562                 info.def_mask = &rte_flow_item_ipv6_ext_mask;
563                 info.len = sizeof(struct rte_flow_item_ipv6_ext);
564                 info.hw_hdr_len = 40;
565                 break;
566         case RTE_FLOW_ITEM_TYPE_RAW:
567                 raw_spec = pst->pattern->spec;
568                 if (!raw_spec->relative)
569                         return 0;
570
571                 len = raw_spec->length + raw_spec->offset;
572                 if (len > NPC_MAX_RAW_ITEM_LEN) {
573                         rte_flow_error_set(pst->error, EINVAL,
574                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
575                                            "Spec length too big");
576                         return -rte_errno;
577                 }
578
579                 otx2_flow_raw_item_prepare((const struct rte_flow_item_raw *)
580                                            pst->pattern->spec,
581                                            (const struct rte_flow_item_raw *)
582                                            pst->pattern->mask, &info,
583                                            raw_spec_buf, raw_mask_buf);
584
585                 lid = NPC_LID_LC;
586                 lt = NPC_LT_LC_NGIO;
587                 info.hw_mask = &hw_mask;
588                 otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
589                 break;
590         default:
591                 /* No match at this layer */
592                 return 0;
593         }
594
595         /* Identify if IP tunnels MPLS or IPv4/v6 */
596         flow_check_lc_ip_tunnel(pst);
597
598         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
599         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
600         if (rc != 0)
601                 return rc;
602
603         return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
604 }
605
606 /* VLAN, ETAG */
607 int
608 otx2_flow_parse_lb(struct otx2_parse_state *pst)
609 {
610         const struct rte_flow_item *pattern = pst->pattern;
611         uint8_t raw_spec_buf[NPC_MAX_RAW_ITEM_LEN];
612         uint8_t raw_mask_buf[NPC_MAX_RAW_ITEM_LEN];
613         const struct rte_flow_item *last_pattern;
614         const struct rte_flow_item_raw *raw_spec;
615         char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
616         struct otx2_flow_item_info info;
617         int lid, lt, lflags, len;
618         int nr_vlans = 0;
619         int rc;
620
621         info.spec = NULL;
622         info.mask = NULL;
623         info.hw_hdr_len = NPC_TPID_LENGTH;
624
625         lid = NPC_LID_LB;
626         lflags = 0;
627         last_pattern = pattern;
628
629         if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
630                 /* RTE vlan is either 802.1q or 802.1ad,
631                  * this maps to either CTAG/STAG. We need to decide
632                  * based on number of VLANS present. Matching is
633                  * supported on first tag only.
634                  */
635                 info.def_mask = &rte_flow_item_vlan_mask;
636                 info.hw_mask = NULL;
637                 info.len = sizeof(struct rte_flow_item_vlan);
638
639                 pattern = pst->pattern;
640                 while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
641                         nr_vlans++;
642
643                         /* Basic validation of 2nd/3rd vlan item */
644                         if (nr_vlans > 1) {
645                                 otx2_npc_dbg("Vlans  = %d", nr_vlans);
646                                 rc = otx2_flow_parse_item_basic(pattern, &info,
647                                                                 pst->error);
648                                 if (rc != 0)
649                                         return rc;
650                         }
651                         last_pattern = pattern;
652                         pattern++;
653                         pattern = otx2_flow_skip_void_and_any_items(pattern);
654                 }
655
656                 switch (nr_vlans) {
657                 case 1:
658                         lt = NPC_LT_LB_CTAG;
659                         break;
660                 case 2:
661                         lt = NPC_LT_LB_STAG_QINQ;
662                         lflags = NPC_F_STAG_CTAG;
663                         break;
664                 case 3:
665                         lt = NPC_LT_LB_STAG_QINQ;
666                         lflags = NPC_F_STAG_STAG_CTAG;
667                         break;
668                 default:
669                         rte_flow_error_set(pst->error, ENOTSUP,
670                                            RTE_FLOW_ERROR_TYPE_ITEM,
671                                            last_pattern,
672                                            "more than 3 vlans not supported");
673                         return -rte_errno;
674                 }
675         } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
676                 /* we can support ETAG and match a subsequent CTAG
677                  * without any matching support.
678                  */
679                 lt = NPC_LT_LB_ETAG;
680                 lflags = 0;
681
682                 last_pattern = pst->pattern;
683                 pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
684                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
685                         info.def_mask = &rte_flow_item_vlan_mask;
686                         /* set supported mask to NULL for vlan tag */
687                         info.hw_mask = NULL;
688                         info.len = sizeof(struct rte_flow_item_vlan);
689                         rc = otx2_flow_parse_item_basic(pattern, &info,
690                                                         pst->error);
691                         if (rc != 0)
692                                 return rc;
693
694                         lflags = NPC_F_ETAG_CTAG;
695                         last_pattern = pattern;
696                 }
697
698                 info.def_mask = &rte_flow_item_e_tag_mask;
699                 info.len = sizeof(struct rte_flow_item_e_tag);
700         } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_RAW) {
701                 raw_spec = pst->pattern->spec;
702                 if (raw_spec->relative)
703                         return 0;
704                 len = raw_spec->length + raw_spec->offset;
705                 if (len > NPC_MAX_RAW_ITEM_LEN) {
706                         rte_flow_error_set(pst->error, EINVAL,
707                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
708                                            "Spec length too big");
709                         return -rte_errno;
710                 }
711
712                 if (pst->npc->switch_header_type ==
713                     OTX2_PRIV_FLAGS_VLAN_EXDSA) {
714                         lt = NPC_LT_LB_VLAN_EXDSA;
715                 } else if (pst->npc->switch_header_type ==
716                            OTX2_PRIV_FLAGS_EXDSA) {
717                         lt = NPC_LT_LB_EXDSA;
718                 } else {
719                         rte_flow_error_set(pst->error, ENOTSUP,
720                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
721                                            "exdsa or vlan_exdsa not enabled on"
722                                            " port");
723                         return -rte_errno;
724                 }
725
726                 otx2_flow_raw_item_prepare((const struct rte_flow_item_raw *)
727                                            pst->pattern->spec,
728                                            (const struct rte_flow_item_raw *)
729                                            pst->pattern->mask, &info,
730                                            raw_spec_buf, raw_mask_buf);
731
732                 info.hw_hdr_len = 0;
733         } else {
734                 return 0;
735         }
736
737         info.hw_mask = &hw_mask;
738         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
739
740         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
741         if (rc != 0)
742                 return rc;
743
744         /* Point pattern to last item consumed */
745         pst->pattern = last_pattern;
746         return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
747 }
748
749
750 int
751 otx2_flow_parse_la(struct otx2_parse_state *pst)
752 {
753         struct rte_flow_item_eth hw_mask;
754         struct otx2_flow_item_info info;
755         int lid, lt;
756         int rc;
757
758         /* Identify the pattern type into lid, lt */
759         if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
760                 return 0;
761
762         lid = NPC_LID_LA;
763         lt = NPC_LT_LA_ETHER;
764         info.hw_hdr_len = 0;
765
766         if (pst->flow->nix_intf == NIX_INTF_TX) {
767                 lt = NPC_LT_LA_IH_NIX_ETHER;
768                 info.hw_hdr_len = NPC_IH_LENGTH;
769                 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
770                         lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
771                         info.hw_hdr_len += NPC_HIGIG2_LENGTH;
772                 }
773         } else {
774                 if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
775                         lt = NPC_LT_LA_HIGIG2_ETHER;
776                         info.hw_hdr_len = NPC_HIGIG2_LENGTH;
777                 }
778         }
779
780         /* Prepare for parsing the item */
781         info.def_mask = &rte_flow_item_eth_mask;
782         info.hw_mask = &hw_mask;
783         info.len = sizeof(struct rte_flow_item_eth);
784         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
785         info.spec = NULL;
786         info.mask = NULL;
787
788         /* Basic validation of item parameters */
789         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
790         if (rc)
791                 return rc;
792
793         /* Update pst if not validate only? clash check? */
794         return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
795 }
796
797 int
798 otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
799 {
800         struct rte_flow_item_higig2_hdr hw_mask;
801         struct otx2_flow_item_info info;
802         int lid, lt;
803         int rc;
804
805         /* Identify the pattern type into lid, lt */
806         if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
807                 return 0;
808
809         lid = NPC_LID_LA;
810         lt = NPC_LT_LA_HIGIG2_ETHER;
811         info.hw_hdr_len = 0;
812
813         if (pst->flow->nix_intf == NIX_INTF_TX) {
814                 lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
815                 info.hw_hdr_len = NPC_IH_LENGTH;
816         }
817
818         /* Prepare for parsing the item */
819         info.def_mask = &rte_flow_item_higig2_hdr_mask;
820         info.hw_mask = &hw_mask;
821         info.len = sizeof(struct rte_flow_item_higig2_hdr);
822         otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
823         info.spec = NULL;
824         info.mask = NULL;
825
826         /* Basic validation of item parameters */
827         rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
828         if (rc)
829                 return rc;
830
831         /* Update pst if not validate only? clash check? */
832         return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
833 }
834
835 static int
836 parse_rss_action(struct rte_eth_dev *dev,
837                  const struct rte_flow_attr *attr,
838                  const struct rte_flow_action *act,
839                  struct rte_flow_error *error)
840 {
841         struct otx2_eth_dev *hw = dev->data->dev_private;
842         struct otx2_rss_info *rss_info = &hw->rss_info;
843         const struct rte_flow_action_rss *rss;
844         uint32_t i;
845
846         rss = (const struct rte_flow_action_rss *)act->conf;
847
848         /* Not supported */
849         if (attr->egress) {
850                 return rte_flow_error_set(error, EINVAL,
851                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
852                                           attr, "No support of RSS in egress");
853         }
854
855         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
856                 return rte_flow_error_set(error, ENOTSUP,
857                                           RTE_FLOW_ERROR_TYPE_ACTION,
858                                           act, "multi-queue mode is disabled");
859
860         /* Parse RSS related parameters from configuration */
861         if (!rss || !rss->queue_num)
862                 return rte_flow_error_set(error, EINVAL,
863                                           RTE_FLOW_ERROR_TYPE_ACTION,
864                                           act, "no valid queues");
865
866         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
867                 return rte_flow_error_set(error, ENOTSUP,
868                                           RTE_FLOW_ERROR_TYPE_ACTION, act,
869                                           "non-default RSS hash functions"
870                                           " are not supported");
871
872         if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
873                 return rte_flow_error_set(error, ENOTSUP,
874                                           RTE_FLOW_ERROR_TYPE_ACTION, act,
875                                           "RSS hash key too large");
876
877         if (rss->queue_num > rss_info->rss_size)
878                 return rte_flow_error_set
879                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
880                          "too many queues for RSS context");
881
882         for (i = 0; i < rss->queue_num; i++) {
883                 if (rss->queue[i] >= dev->data->nb_rx_queues)
884                         return rte_flow_error_set(error, EINVAL,
885                                                   RTE_FLOW_ERROR_TYPE_ACTION,
886                                                   act,
887                                                   "queue id > max number"
888                                                   " of queues");
889         }
890
891         return 0;
892 }
893
894 int
895 otx2_flow_parse_actions(struct rte_eth_dev *dev,
896                         const struct rte_flow_attr *attr,
897                         const struct rte_flow_action actions[],
898                         struct rte_flow_error *error,
899                         struct rte_flow *flow)
900 {
901         struct otx2_eth_dev *hw = dev->data->dev_private;
902         struct otx2_npc_flow_info *npc = &hw->npc_flow;
903         const struct rte_flow_action_mark *act_mark;
904         const struct rte_flow_action_queue *act_q;
905         const struct rte_flow_action_vf *vf_act;
906         uint16_t pf_func, vf_id, port_id, pf_id;
907         char if_name[RTE_ETH_NAME_MAX_LEN];
908         bool vlan_insert_action = false;
909         struct rte_eth_dev *eth_dev;
910         const char *errmsg = NULL;
911         int sel_act, req_act = 0;
912         int errcode = 0;
913         int mark = 0;
914         int rq = 0;
915
916         /* Initialize actions */
917         flow->ctr_id = NPC_COUNTER_NONE;
918         pf_func = otx2_pfvf_func(hw->pf, hw->vf);
919
920         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
921                 otx2_npc_dbg("Action type = %d", actions->type);
922
923                 switch (actions->type) {
924                 case RTE_FLOW_ACTION_TYPE_VOID:
925                         break;
926                 case RTE_FLOW_ACTION_TYPE_MARK:
927                         act_mark =
928                             (const struct rte_flow_action_mark *)actions->conf;
929
930                         /* We have only 16 bits. Use highest val for flag */
931                         if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
932                                 errmsg = "mark value must be < 0xfffe";
933                                 errcode = ENOTSUP;
934                                 goto err_exit;
935                         }
936                         mark = act_mark->id + 1;
937                         req_act |= OTX2_FLOW_ACT_MARK;
938                         rte_atomic32_inc(&npc->mark_actions);
939                         break;
940
941                 case RTE_FLOW_ACTION_TYPE_FLAG:
942                         mark = OTX2_FLOW_FLAG_VAL;
943                         req_act |= OTX2_FLOW_ACT_FLAG;
944                         rte_atomic32_inc(&npc->mark_actions);
945                         break;
946
947                 case RTE_FLOW_ACTION_TYPE_COUNT:
948                         /* Indicates, need a counter */
949                         flow->ctr_id = 1;
950                         req_act |= OTX2_FLOW_ACT_COUNT;
951                         break;
952
953                 case RTE_FLOW_ACTION_TYPE_DROP:
954                         req_act |= OTX2_FLOW_ACT_DROP;
955                         break;
956
957                 case RTE_FLOW_ACTION_TYPE_PF:
958                         req_act |= OTX2_FLOW_ACT_PF;
959                         pf_func &= (0xfc00);
960                         break;
961
962                 case RTE_FLOW_ACTION_TYPE_VF:
963                         vf_act = (const struct rte_flow_action_vf *)
964                                 actions->conf;
965                         req_act |= OTX2_FLOW_ACT_VF;
966                         if (vf_act->original == 0) {
967                                 vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
968                                 if (vf_id  >= hw->maxvf) {
969                                         errmsg = "invalid vf specified";
970                                         errcode = EINVAL;
971                                         goto err_exit;
972                                 }
973                                 pf_func &= (0xfc00);
974                                 pf_func = (pf_func | (vf_id + 1));
975                         }
976                         break;
977
978                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
979                 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
980                         if (actions->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
981                                 const struct rte_flow_action_port_id *port_act;
982
983                                 port_act = actions->conf;
984                                 port_id = port_act->id;
985                         } else {
986                                 const struct rte_flow_action_ethdev *ethdev_act;
987
988                                 ethdev_act = actions->conf;
989                                 port_id = ethdev_act->port_id;
990                         }
991                         if (rte_eth_dev_get_name_by_port(port_id, if_name)) {
992                                 errmsg = "Name not found for output port id";
993                                 errcode = EINVAL;
994                                 goto err_exit;
995                         }
996                         eth_dev = rte_eth_dev_allocated(if_name);
997                         if (!eth_dev) {
998                                 errmsg = "eth_dev not found for output port id";
999                                 errcode = EINVAL;
1000                                 goto err_exit;
1001                         }
1002                         if (!otx2_ethdev_is_same_driver(eth_dev)) {
1003                                 errmsg = "Output port id unsupported type";
1004                                 errcode = ENOTSUP;
1005                                 goto err_exit;
1006                         }
1007                         if (!otx2_dev_is_vf(otx2_eth_pmd_priv(eth_dev))) {
1008                                 errmsg = "Output port should be VF";
1009                                 errcode = ENOTSUP;
1010                                 goto err_exit;
1011                         }
1012                         vf_id = otx2_eth_pmd_priv(eth_dev)->vf;
1013                         if (vf_id  >= hw->maxvf) {
1014                                 errmsg = "Invalid vf for output port";
1015                                 errcode = EINVAL;
1016                                 goto err_exit;
1017                         }
1018                         pf_id = otx2_eth_pmd_priv(eth_dev)->pf;
1019                         if (pf_id != hw->pf) {
1020                                 errmsg = "Output port unsupported PF";
1021                                 errcode = ENOTSUP;
1022                                 goto err_exit;
1023                         }
1024                         pf_func &= (0xfc00);
1025                         pf_func = (pf_func | (vf_id + 1));
1026                         req_act |= OTX2_FLOW_ACT_VF;
1027                         break;
1028
1029                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1030                         /* Applicable only to ingress flow */
1031                         act_q = (const struct rte_flow_action_queue *)
1032                                 actions->conf;
1033                         rq = act_q->index;
1034                         if (rq >= dev->data->nb_rx_queues) {
1035                                 errmsg = "invalid queue index";
1036                                 errcode = EINVAL;
1037                                 goto err_exit;
1038                         }
1039                         req_act |= OTX2_FLOW_ACT_QUEUE;
1040                         break;
1041
1042                 case RTE_FLOW_ACTION_TYPE_RSS:
1043                         errcode = parse_rss_action(dev, attr, actions, error);
1044                         if (errcode)
1045                                 return -rte_errno;
1046
1047                         req_act |= OTX2_FLOW_ACT_RSS;
1048                         break;
1049
1050                 case RTE_FLOW_ACTION_TYPE_SECURITY:
1051                         /* Assumes user has already configured security
1052                          * session for this flow. Associated conf is
1053                          * opaque. When RTE security is implemented for otx2,
1054                          * we need to verify that for specified security
1055                          * session:
1056                          *  action_type ==
1057                          *    RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
1058                          *  session_protocol ==
1059                          *    RTE_SECURITY_PROTOCOL_IPSEC
1060                          *
1061                          * RSS is not supported with inline ipsec. Get the
1062                          * rq from associated conf, or make
1063                          * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
1064                          * action.
1065                          * Currently, rq = 0 is assumed.
1066                          */
1067                         req_act |= OTX2_FLOW_ACT_SEC;
1068                         rq = 0;
1069                         break;
1070                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1071                         req_act |= OTX2_FLOW_ACT_VLAN_INSERT;
1072                         break;
1073                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1074                         req_act |= OTX2_FLOW_ACT_VLAN_STRIP;
1075                         break;
1076                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1077                         req_act |= OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT;
1078                         break;
1079                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1080                         req_act |= OTX2_FLOW_ACT_VLAN_PCP_INSERT;
1081                         break;
1082                 default:
1083                         errmsg = "Unsupported action specified";
1084                         errcode = ENOTSUP;
1085                         goto err_exit;
1086                 }
1087         }
1088
1089         if (req_act &
1090             (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
1091              OTX2_FLOW_ACT_VLAN_PCP_INSERT))
1092                 vlan_insert_action = true;
1093
1094         if ((req_act &
1095              (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
1096               OTX2_FLOW_ACT_VLAN_PCP_INSERT)) ==
1097             OTX2_FLOW_ACT_VLAN_PCP_INSERT) {
1098                 errmsg = " PCP insert action can't be supported alone";
1099                 errcode = ENOTSUP;
1100                 goto err_exit;
1101         }
1102
1103         /* Both STRIP and INSERT actions are not supported */
1104         if (vlan_insert_action && (req_act & OTX2_FLOW_ACT_VLAN_STRIP)) {
1105                 errmsg = "Both VLAN insert and strip actions not supported"
1106                         " together";
1107                 errcode = ENOTSUP;
1108                 goto err_exit;
1109         }
1110
1111         /* Check if actions specified are compatible */
1112         if (attr->egress) {
1113                 if (req_act & OTX2_FLOW_ACT_VLAN_STRIP) {
1114                         errmsg = "VLAN pop action is not supported on Egress";
1115                         errcode = ENOTSUP;
1116                         goto err_exit;
1117                 }
1118
1119                 if (req_act & OTX2_FLOW_ACT_DROP) {
1120                         flow->npc_action = NIX_TX_ACTIONOP_DROP;
1121                 } else if ((req_act & OTX2_FLOW_ACT_COUNT) ||
1122                            vlan_insert_action) {
1123                         flow->npc_action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
1124                 } else {
1125                         errmsg = "Unsupported action for egress";
1126                         errcode = EINVAL;
1127                         goto err_exit;
1128                 }
1129                 goto set_pf_func;
1130         }
1131
1132         /* We have already verified the attr, this is ingress.
1133          * - Exactly one terminating action is supported
1134          * - Exactly one of MARK or FLAG is supported
1135          * - If terminating action is DROP, only count is valid.
1136          */
1137         sel_act = req_act & OTX2_FLOW_ACT_TERM;
1138         if ((sel_act & (sel_act - 1)) != 0) {
1139                 errmsg = "Only one terminating action supported";
1140                 errcode = EINVAL;
1141                 goto err_exit;
1142         }
1143
1144         if (req_act & OTX2_FLOW_ACT_DROP) {
1145                 sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
1146                 if ((sel_act & (sel_act - 1)) != 0) {
1147                         errmsg = "Only COUNT action is supported "
1148                                 "with DROP ingress action";
1149                         errcode = ENOTSUP;
1150                         goto err_exit;
1151                 }
1152         }
1153
1154         if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
1155             == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1156                 errmsg = "Only one of FLAG or MARK action is supported";
1157                 errcode = ENOTSUP;
1158                 goto err_exit;
1159         }
1160
1161         if (vlan_insert_action) {
1162                 errmsg = "VLAN push/Insert action is not supported on Ingress";
1163                 errcode = ENOTSUP;
1164                 goto err_exit;
1165         }
1166
1167         if (req_act & OTX2_FLOW_ACT_VLAN_STRIP)
1168                 npc->vtag_actions++;
1169
1170         /* Only VLAN action is provided */
1171         if (req_act == OTX2_FLOW_ACT_VLAN_STRIP)
1172                 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1173         /* Set NIX_RX_ACTIONOP */
1174         else if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
1175                 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1176                 if (req_act & OTX2_FLOW_ACT_QUEUE)
1177                         flow->npc_action |= (uint64_t)rq << 20;
1178         } else if (req_act & OTX2_FLOW_ACT_DROP) {
1179                 flow->npc_action = NIX_RX_ACTIONOP_DROP;
1180         } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
1181                 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1182                 flow->npc_action |= (uint64_t)rq << 20;
1183         } else if (req_act & OTX2_FLOW_ACT_RSS) {
1184                 /* When user added a rule for rss, first we will add the
1185                  *rule in MCAM and then update the action, once if we have
1186                  *FLOW_KEY_ALG index. So, till we update the action with
1187                  *flow_key_alg index, set the action to drop.
1188                  */
1189                 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1190                         flow->npc_action = NIX_RX_ACTIONOP_DROP;
1191                 else
1192                         flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1193         } else if (req_act & OTX2_FLOW_ACT_SEC) {
1194                 flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
1195                 flow->npc_action |= (uint64_t)rq << 20;
1196         } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1197                 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1198         } else if (req_act & OTX2_FLOW_ACT_COUNT) {
1199                 /* Keep OTX2_FLOW_ACT_COUNT always at the end
1200                  * This is default action, when user specify only
1201                  * COUNT ACTION
1202                  */
1203                 flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1204         } else {
1205                 /* Should never reach here */
1206                 errmsg = "Invalid action specified";
1207                 errcode = EINVAL;
1208                 goto err_exit;
1209         }
1210
1211         if (mark)
1212                 flow->npc_action |= (uint64_t)mark << 40;
1213
1214         if (rte_atomic32_read(&npc->mark_actions) == 1) {
1215                 hw->rx_offload_flags |=
1216                         NIX_RX_OFFLOAD_MARK_UPDATE_F;
1217                 otx2_eth_set_rx_function(dev);
1218         }
1219
1220         if (npc->vtag_actions == 1) {
1221                 hw->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
1222                 otx2_eth_set_rx_function(dev);
1223         }
1224
1225 set_pf_func:
1226         /* Ideally AF must ensure that correct pf_func is set */
1227         if (attr->egress)
1228                 flow->npc_action |= (uint64_t)pf_func << 48;
1229         else
1230                 flow->npc_action |= (uint64_t)pf_func << 4;
1231
1232         return 0;
1233
1234 err_exit:
1235         rte_flow_error_set(error, errcode,
1236                            RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1237                            errmsg);
1238         return -rte_errno;
1239 }