net/qede: fix flow director for IPv6 filter
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11
12 #include "qede_ethdev.h"
13
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16         uint16_t rte_filter_type;
17         enum ecore_filter_ucast_type qede_type;
18         enum ecore_tunn_clss qede_tunn_clss;
19         const char *string;
20 } qede_tunn_types[] = {
21         {
22                 ETH_TUNNEL_FILTER_OMAC,
23                 ECORE_FILTER_MAC,
24                 ECORE_TUNN_CLSS_MAC_VLAN,
25                 "outer-mac"
26         },
27         {
28                 ETH_TUNNEL_FILTER_TENID,
29                 ECORE_FILTER_VNI,
30                 ECORE_TUNN_CLSS_MAC_VNI,
31                 "vni"
32         },
33         {
34                 ETH_TUNNEL_FILTER_IMAC,
35                 ECORE_FILTER_INNER_MAC,
36                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
37                 "inner-mac"
38         },
39         {
40                 ETH_TUNNEL_FILTER_IVLAN,
41                 ECORE_FILTER_INNER_VLAN,
42                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43                 "inner-vlan"
44         },
45         {
46                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47                 ECORE_FILTER_MAC_VNI_PAIR,
48                 ECORE_TUNN_CLSS_MAC_VNI,
49                 "outer-mac and vni"
50         },
51         {
52                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
53                 ECORE_FILTER_UNUSED,
54                 MAX_ECORE_TUNN_CLSS,
55                 "outer-mac and inner-mac"
56         },
57         {
58                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
59                 ECORE_FILTER_UNUSED,
60                 MAX_ECORE_TUNN_CLSS,
61                 "outer-mac and inner-vlan"
62         },
63         {
64                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
67                 "vni and inner-mac",
68         },
69         {
70                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
71                 ECORE_FILTER_UNUSED,
72                 MAX_ECORE_TUNN_CLSS,
73                 "vni and inner-vlan",
74         },
75         {
76                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77                 ECORE_FILTER_INNER_PAIR,
78                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79                 "inner-mac and inner-vlan",
80         },
81         {
82                 ETH_TUNNEL_FILTER_OIP,
83                 ECORE_FILTER_UNUSED,
84                 MAX_ECORE_TUNN_CLSS,
85                 "outer-IP"
86         },
87         {
88                 ETH_TUNNEL_FILTER_IIP,
89                 ECORE_FILTER_UNUSED,
90                 MAX_ECORE_TUNN_CLSS,
91                 "inner-IP"
92         },
93         {
94                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
95                 ECORE_FILTER_UNUSED,
96                 MAX_ECORE_TUNN_CLSS,
97                 "IMAC_IVLAN"
98         },
99         {
100                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
101                 ECORE_FILTER_UNUSED,
102                 MAX_ECORE_TUNN_CLSS,
103                 "IMAC_IVLAN_TENID"
104         },
105         {
106                 RTE_TUNNEL_FILTER_IMAC_TENID,
107                 ECORE_FILTER_UNUSED,
108                 MAX_ECORE_TUNN_CLSS,
109                 "IMAC_TENID"
110         },
111         {
112                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
113                 ECORE_FILTER_UNUSED,
114                 MAX_ECORE_TUNN_CLSS,
115                 "OMAC_TENID_IMAC"
116         },
117 };
118
119 #define IP_VERSION                              (0x40)
120 #define IP_HDRLEN                               (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
124 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
125 /* Sum of length of header types of L2, L3, L4.
126  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
127  * L3 : ipv6_hdr
128  * L4 : tcp_hdr
129  */
130 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
131
132 #ifndef IPV6_ADDR_LEN
133 #define IPV6_ADDR_LEN                           (16)
134 #endif
135
136 static inline bool qede_valid_flow(uint16_t flow_type)
137 {
138         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
139                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
140                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
141                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
142 }
143
144 /* Note: Flowdir support is only partial.
145  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
146  * Parameters like pballoc/status fields are irrelevant here.
147  */
148 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
149 {
150         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
151         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
152         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
153
154         /* check FDIR modes */
155         switch (fdir->mode) {
156         case RTE_FDIR_MODE_NONE:
157                 qdev->fdir_info.arfs.arfs_enable = false;
158                 DP_INFO(edev, "flowdir is disabled\n");
159         break;
160         case RTE_FDIR_MODE_PERFECT:
161                 if (ECORE_IS_CMT(edev)) {
162                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
163                         qdev->fdir_info.arfs.arfs_enable = false;
164                         return -ENOTSUP;
165                 }
166                 qdev->fdir_info.arfs.arfs_enable = true;
167                 DP_INFO(edev, "flowdir is enabled\n");
168         break;
169         case RTE_FDIR_MODE_PERFECT_TUNNEL:
170         case RTE_FDIR_MODE_SIGNATURE:
171         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
172                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
173                 return -ENOTSUP;
174         }
175
176         return 0;
177 }
178
179 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
180 {
181         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
182         struct qede_fdir_entry *tmp = NULL;
183
184         SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
185                 if (tmp) {
186                         if (tmp->mz)
187                                 rte_memzone_free(tmp->mz);
188                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
189                                      qede_fdir_entry, list);
190                         rte_free(tmp);
191                 }
192         }
193 }
194
195 static int
196 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
197                             struct rte_eth_fdir_filter *fdir_filter,
198                             bool add)
199 {
200         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
201         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
202         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
203         struct qede_fdir_entry *tmp = NULL;
204         struct qede_fdir_entry *fdir = NULL;
205         const struct rte_memzone *mz;
206         struct ecore_hwfn *p_hwfn;
207         enum _ecore_status_t rc;
208         uint16_t pkt_len;
209         void *pkt;
210
211         if (add) {
212                 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
213                         DP_ERR(edev, "Reached max flowdir filter limit\n");
214                         return -EINVAL;
215                 }
216                 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
217                                   RTE_CACHE_LINE_SIZE);
218                 if (!fdir) {
219                         DP_ERR(edev, "Did not allocate memory for fdir\n");
220                         return -ENOMEM;
221                 }
222         }
223         /* soft_id could have been used as memzone string, but soft_id is
224          * not currently used so it has no significance.
225          */
226         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
227                  (unsigned long)rte_get_timer_cycles());
228         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
229                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
230         if (!mz) {
231                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
232                        rte_strerror(rte_errno));
233                 rc = -rte_errno;
234                 goto err1;
235         }
236
237         pkt = mz->addr;
238         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
239         pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
240                                           &qdev->fdir_info.arfs);
241         if (pkt_len == 0) {
242                 rc = -EINVAL;
243                 goto err2;
244         }
245         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
246         if (add) {
247                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
248                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
249                                 DP_INFO(edev, "flowdir filter exist\n");
250                                 rc = 0;
251                                 goto err2;
252                         }
253                 }
254         } else {
255                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
256                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
257                                 break;
258                 }
259                 if (!tmp) {
260                         DP_ERR(edev, "flowdir filter does not exist\n");
261                         rc = -EEXIST;
262                         goto err2;
263                 }
264         }
265         p_hwfn = ECORE_LEADING_HWFN(edev);
266         if (add) {
267                 if (!qdev->fdir_info.arfs.arfs_enable) {
268                         /* Force update */
269                         eth_dev->data->dev_conf.fdir_conf.mode =
270                                                 RTE_FDIR_MODE_PERFECT;
271                         qdev->fdir_info.arfs.arfs_enable = true;
272                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
273                 }
274                 /* Enable ARFS searcher with updated flow_types */
275                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
276                                           &qdev->fdir_info.arfs);
277         }
278         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
279         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
280                                                (dma_addr_t)mz->iova,
281                                                pkt_len,
282                                                fdir_filter->action.rx_queue,
283                                                0, add);
284         if (rc == ECORE_SUCCESS) {
285                 if (add) {
286                         fdir->rx_queue = fdir_filter->action.rx_queue;
287                         fdir->pkt_len = pkt_len;
288                         fdir->mz = mz;
289                         SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
290                                           fdir, list);
291                         qdev->fdir_info.filter_count++;
292                         DP_INFO(edev, "flowdir filter added, count = %d\n",
293                                 qdev->fdir_info.filter_count);
294                 } else {
295                         rte_memzone_free(tmp->mz);
296                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
297                                      qede_fdir_entry, list);
298                         rte_free(tmp); /* the node deleted */
299                         rte_memzone_free(mz); /* temp node allocated */
300                         qdev->fdir_info.filter_count--;
301                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
302                                 qdev->fdir_info.filter_count);
303                 }
304         } else {
305                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
306                        rc, qdev->fdir_info.filter_count);
307         }
308
309         /* Disable ARFS searcher if there are no more filters */
310         if (qdev->fdir_info.filter_count == 0) {
311                 memset(&qdev->fdir_info.arfs, 0,
312                        sizeof(struct ecore_arfs_config_params));
313                 DP_INFO(edev, "Disabling flowdir\n");
314                 qdev->fdir_info.arfs.arfs_enable = false;
315                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
316                                           &qdev->fdir_info.arfs);
317         }
318         return 0;
319
320 err2:
321         rte_memzone_free(mz);
322 err1:
323         if (add)
324                 rte_free(fdir);
325         return rc;
326 }
327
328 static int
329 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
330                      struct rte_eth_fdir_filter *fdir,
331                      bool add)
332 {
333         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
334         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
335
336         if (!qede_valid_flow(fdir->input.flow_type)) {
337                 DP_ERR(edev, "invalid flow_type input\n");
338                 return -EINVAL;
339         }
340
341         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
342                 DP_ERR(edev, "invalid queue number %u\n",
343                        fdir->action.rx_queue);
344                 return -EINVAL;
345         }
346
347         if (fdir->input.flow_ext.is_vf) {
348                 DP_ERR(edev, "flowdir is not supported over VF\n");
349                 return -EINVAL;
350         }
351
352         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
353 }
354
355 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
356 uint16_t
357 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
358                         struct rte_eth_fdir_filter *fdir,
359                         void *buff,
360                         struct ecore_arfs_config_params *params)
361
362 {
363         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
364         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
365         uint16_t *ether_type;
366         uint8_t *raw_pkt;
367         struct rte_eth_fdir_input *input;
368         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
369         struct ipv4_hdr *ip;
370         struct ipv6_hdr *ip6;
371         struct udp_hdr *udp;
372         struct tcp_hdr *tcp;
373         uint16_t len;
374         static const uint8_t next_proto[] = {
375                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
376                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
377                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
378                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
379         };
380         raw_pkt = (uint8_t *)buff;
381         input = &fdir->input;
382         DP_INFO(edev, "flow_type %d\n", input->flow_type);
383
384         len =  2 * sizeof(struct ether_addr);
385         raw_pkt += 2 * sizeof(struct ether_addr);
386         if (input->flow_ext.vlan_tci) {
387                 DP_INFO(edev, "adding VLAN header\n");
388                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
389                 rte_memcpy(raw_pkt + sizeof(uint16_t),
390                            &input->flow_ext.vlan_tci,
391                            sizeof(uint16_t));
392                 raw_pkt += sizeof(vlan_frame);
393                 len += sizeof(vlan_frame);
394         }
395         ether_type = (uint16_t *)raw_pkt;
396         raw_pkt += sizeof(uint16_t);
397         len += sizeof(uint16_t);
398
399         switch (input->flow_type) {
400         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
401         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
402                 /* fill the common ip header */
403                 ip = (struct ipv4_hdr *)raw_pkt;
404                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
405                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
406                 ip->total_length = sizeof(struct ipv4_hdr);
407                 ip->next_proto_id = input->flow.ip4_flow.proto ?
408                                     input->flow.ip4_flow.proto :
409                                     next_proto[input->flow_type];
410                 ip->time_to_live = input->flow.ip4_flow.ttl ?
411                                    input->flow.ip4_flow.ttl :
412                                    QEDE_FDIR_IPV4_DEF_TTL;
413                 ip->type_of_service = input->flow.ip4_flow.tos;
414                 ip->dst_addr = input->flow.ip4_flow.dst_ip;
415                 ip->src_addr = input->flow.ip4_flow.src_ip;
416                 len += sizeof(struct ipv4_hdr);
417                 params->ipv4 = true;
418
419                 raw_pkt = (uint8_t *)buff;
420                 /* UDP */
421                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
422                         udp = (struct udp_hdr *)(raw_pkt + len);
423                         udp->dst_port = input->flow.udp4_flow.dst_port;
424                         udp->src_port = input->flow.udp4_flow.src_port;
425                         udp->dgram_len = sizeof(struct udp_hdr);
426                         len += sizeof(struct udp_hdr);
427                         /* adjust ip total_length */
428                         ip->total_length += sizeof(struct udp_hdr);
429                         params->udp = true;
430                 } else { /* TCP */
431                         tcp = (struct tcp_hdr *)(raw_pkt + len);
432                         tcp->src_port = input->flow.tcp4_flow.src_port;
433                         tcp->dst_port = input->flow.tcp4_flow.dst_port;
434                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
435                         len += sizeof(struct tcp_hdr);
436                         /* adjust ip total_length */
437                         ip->total_length += sizeof(struct tcp_hdr);
438                         params->tcp = true;
439                 }
440                 break;
441         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
442         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
443                 ip6 = (struct ipv6_hdr *)raw_pkt;
444                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
445                 ip6->proto = input->flow.ipv6_flow.proto ?
446                                         input->flow.ipv6_flow.proto :
447                                         next_proto[input->flow_type];
448                 ip6->vtc_flow =
449                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
450
451                 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
452                            IPV6_ADDR_LEN);
453                 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
454                            IPV6_ADDR_LEN);
455                 len += sizeof(struct ipv6_hdr);
456                 params->ipv6 = true;
457
458                 raw_pkt = (uint8_t *)buff;
459                 /* UDP */
460                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
461                         udp = (struct udp_hdr *)(raw_pkt + len);
462                         udp->src_port = input->flow.udp6_flow.src_port;
463                         udp->dst_port = input->flow.udp6_flow.dst_port;
464                         len += sizeof(struct udp_hdr);
465                         params->udp = true;
466                 } else { /* TCP */
467                         tcp = (struct tcp_hdr *)(raw_pkt + len);
468                         tcp->src_port = input->flow.tcp6_flow.src_port;
469                         tcp->dst_port = input->flow.tcp6_flow.dst_port;
470                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
471                         len += sizeof(struct tcp_hdr);
472                         params->tcp = true;
473                 }
474                 break;
475         default:
476                 DP_ERR(edev, "Unsupported flow_type %u\n",
477                        input->flow_type);
478                 return 0;
479         }
480
481         return len;
482 }
483
484 static int
485 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
486                       enum rte_filter_op filter_op,
487                       void *arg)
488 {
489         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
490         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
491         struct rte_eth_fdir_filter *fdir;
492         int ret;
493
494         fdir = (struct rte_eth_fdir_filter *)arg;
495         switch (filter_op) {
496         case RTE_ETH_FILTER_NOP:
497                 /* Typically used to query flowdir support */
498                 if (ECORE_IS_CMT(edev)) {
499                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
500                         return -ENOTSUP;
501                 }
502                 return 0; /* means supported */
503         case RTE_ETH_FILTER_ADD:
504                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
505         break;
506         case RTE_ETH_FILTER_DELETE:
507                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
508         break;
509         case RTE_ETH_FILTER_FLUSH:
510         case RTE_ETH_FILTER_UPDATE:
511         case RTE_ETH_FILTER_INFO:
512                 return -ENOTSUP;
513         break;
514         default:
515                 DP_ERR(edev, "unknown operation %u", filter_op);
516                 ret = -EINVAL;
517         }
518
519         return ret;
520 }
521
522 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
523                             enum rte_filter_op filter_op,
524                             void *arg)
525 {
526         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
527         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
528         struct rte_eth_ntuple_filter *ntuple;
529         struct rte_eth_fdir_filter fdir_entry;
530         struct rte_eth_tcpv4_flow *tcpv4_flow;
531         struct rte_eth_udpv4_flow *udpv4_flow;
532         bool add = false;
533
534         switch (filter_op) {
535         case RTE_ETH_FILTER_NOP:
536                 /* Typically used to query fdir support */
537                 if (ECORE_IS_CMT(edev)) {
538                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
539                         return -ENOTSUP;
540                 }
541                 return 0; /* means supported */
542         case RTE_ETH_FILTER_ADD:
543                 add = true;
544         break;
545         case RTE_ETH_FILTER_DELETE:
546         break;
547         case RTE_ETH_FILTER_INFO:
548         case RTE_ETH_FILTER_GET:
549         case RTE_ETH_FILTER_UPDATE:
550         case RTE_ETH_FILTER_FLUSH:
551         case RTE_ETH_FILTER_SET:
552         case RTE_ETH_FILTER_STATS:
553         case RTE_ETH_FILTER_OP_MAX:
554                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
555                 return -ENOTSUP;
556         }
557         ntuple = (struct rte_eth_ntuple_filter *)arg;
558         /* Internally convert ntuple to fdir entry */
559         memset(&fdir_entry, 0, sizeof(fdir_entry));
560         if (ntuple->proto == IPPROTO_TCP) {
561                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
562                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
563                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
564                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
565                 tcpv4_flow->ip.proto = IPPROTO_TCP;
566                 tcpv4_flow->src_port = ntuple->src_port;
567                 tcpv4_flow->dst_port = ntuple->dst_port;
568         } else {
569                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
570                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
571                 udpv4_flow->ip.src_ip = ntuple->src_ip;
572                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
573                 udpv4_flow->ip.proto = IPPROTO_TCP;
574                 udpv4_flow->src_port = ntuple->src_port;
575                 udpv4_flow->dst_port = ntuple->dst_port;
576         }
577
578         fdir_entry.action.rx_queue = ntuple->queue;
579
580         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
581 }
582
583 static int
584 qede_tunnel_update(struct qede_dev *qdev,
585                    struct ecore_tunnel_info *tunn_info)
586 {
587         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
588         enum _ecore_status_t rc = ECORE_INVAL;
589         struct ecore_hwfn *p_hwfn;
590         struct ecore_ptt *p_ptt;
591         int i;
592
593         for_each_hwfn(edev, i) {
594                 p_hwfn = &edev->hwfns[i];
595                 if (IS_PF(edev)) {
596                         p_ptt = ecore_ptt_acquire(p_hwfn);
597                         if (!p_ptt) {
598                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
599                                 return -EAGAIN;
600                         }
601                 } else {
602                         p_ptt = NULL;
603                 }
604
605                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
606                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
607                 if (IS_PF(edev))
608                         ecore_ptt_release(p_hwfn, p_ptt);
609
610                 if (rc != ECORE_SUCCESS)
611                         break;
612         }
613
614         return rc;
615 }
616
617 static int
618 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
619                   bool enable)
620 {
621         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
622         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
623         enum _ecore_status_t rc = ECORE_INVAL;
624         struct ecore_tunnel_info tunn;
625
626         if (qdev->vxlan.enable == enable)
627                 return ECORE_SUCCESS;
628
629         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
630         tunn.vxlan.b_update_mode = true;
631         tunn.vxlan.b_mode_enabled = enable;
632         tunn.b_update_rx_cls = true;
633         tunn.b_update_tx_cls = true;
634         tunn.vxlan.tun_cls = clss;
635
636         tunn.vxlan_port.b_update_port = true;
637         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
638
639         rc = qede_tunnel_update(qdev, &tunn);
640         if (rc == ECORE_SUCCESS) {
641                 qdev->vxlan.enable = enable;
642                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
643                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
644                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
645         } else {
646                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
647                        tunn.vxlan.tun_cls);
648         }
649
650         return rc;
651 }
652
653 static int
654 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
655                   bool enable)
656 {
657         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
658         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
659         enum _ecore_status_t rc = ECORE_INVAL;
660         struct ecore_tunnel_info tunn;
661
662         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
663         tunn.l2_geneve.b_update_mode = true;
664         tunn.l2_geneve.b_mode_enabled = enable;
665         tunn.ip_geneve.b_update_mode = true;
666         tunn.ip_geneve.b_mode_enabled = enable;
667         tunn.l2_geneve.tun_cls = clss;
668         tunn.ip_geneve.tun_cls = clss;
669         tunn.b_update_rx_cls = true;
670         tunn.b_update_tx_cls = true;
671
672         tunn.geneve_port.b_update_port = true;
673         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
674
675         rc = qede_tunnel_update(qdev, &tunn);
676         if (rc == ECORE_SUCCESS) {
677                 qdev->geneve.enable = enable;
678                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
679                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
680                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
681         } else {
682                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
683                        clss);
684         }
685
686         return rc;
687 }
688
689 static int
690 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
691                   bool enable)
692 {
693         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
694         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
695         enum _ecore_status_t rc = ECORE_INVAL;
696         struct ecore_tunnel_info tunn;
697
698         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
699         tunn.ip_gre.b_update_mode = true;
700         tunn.ip_gre.b_mode_enabled = enable;
701         tunn.ip_gre.tun_cls = clss;
702         tunn.ip_gre.tun_cls = clss;
703         tunn.b_update_rx_cls = true;
704         tunn.b_update_tx_cls = true;
705
706         rc = qede_tunnel_update(qdev, &tunn);
707         if (rc == ECORE_SUCCESS) {
708                 qdev->ipgre.enable = enable;
709                 DP_INFO(edev, "IPGRE is %s\n",
710                         enable ? "enabled" : "disabled");
711         } else {
712                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
713                        clss);
714         }
715
716         return rc;
717 }
718
719 int
720 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
721                       struct rte_eth_udp_tunnel *tunnel_udp)
722 {
723         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
724         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
725         struct ecore_tunnel_info tunn; /* @DPDK */
726         uint16_t udp_port;
727         int rc;
728
729         PMD_INIT_FUNC_TRACE(edev);
730
731         memset(&tunn, 0, sizeof(tunn));
732
733         switch (tunnel_udp->prot_type) {
734         case RTE_TUNNEL_TYPE_VXLAN:
735                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
736                         DP_ERR(edev, "UDP port %u doesn't exist\n",
737                                 tunnel_udp->udp_port);
738                         return ECORE_INVAL;
739                 }
740                 udp_port = 0;
741
742                 tunn.vxlan_port.b_update_port = true;
743                 tunn.vxlan_port.port = udp_port;
744
745                 rc = qede_tunnel_update(qdev, &tunn);
746                 if (rc != ECORE_SUCCESS) {
747                         DP_ERR(edev, "Unable to config UDP port %u\n",
748                                tunn.vxlan_port.port);
749                         return rc;
750                 }
751
752                 qdev->vxlan.udp_port = udp_port;
753                 /* If the request is to delete UDP port and if the number of
754                  * VXLAN filters have reached 0 then VxLAN offload can be be
755                  * disabled.
756                  */
757                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
758                         return qede_vxlan_enable(eth_dev,
759                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
760
761                 break;
762         case RTE_TUNNEL_TYPE_GENEVE:
763                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
764                         DP_ERR(edev, "UDP port %u doesn't exist\n",
765                                 tunnel_udp->udp_port);
766                         return ECORE_INVAL;
767                 }
768
769                 udp_port = 0;
770
771                 tunn.geneve_port.b_update_port = true;
772                 tunn.geneve_port.port = udp_port;
773
774                 rc = qede_tunnel_update(qdev, &tunn);
775                 if (rc != ECORE_SUCCESS) {
776                         DP_ERR(edev, "Unable to config UDP port %u\n",
777                                tunn.vxlan_port.port);
778                         return rc;
779                 }
780
781                 qdev->vxlan.udp_port = udp_port;
782                 /* If the request is to delete UDP port and if the number of
783                  * GENEVE filters have reached 0 then GENEVE offload can be be
784                  * disabled.
785                  */
786                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
787                         return qede_geneve_enable(eth_dev,
788                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
789
790                 break;
791
792         default:
793                 return ECORE_INVAL;
794         }
795
796         return 0;
797 }
798
799 int
800 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
801                       struct rte_eth_udp_tunnel *tunnel_udp)
802 {
803         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
804         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
805         struct ecore_tunnel_info tunn; /* @DPDK */
806         uint16_t udp_port;
807         int rc;
808
809         PMD_INIT_FUNC_TRACE(edev);
810
811         memset(&tunn, 0, sizeof(tunn));
812
813         switch (tunnel_udp->prot_type) {
814         case RTE_TUNNEL_TYPE_VXLAN:
815                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
816                         DP_INFO(edev,
817                                 "UDP port %u for VXLAN was already configured\n",
818                                 tunnel_udp->udp_port);
819                         return ECORE_SUCCESS;
820                 }
821
822                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
823                  * it was not enabled while adding VXLAN filter before UDP port
824                  * update.
825                  */
826                 if (!qdev->vxlan.enable) {
827                         rc = qede_vxlan_enable(eth_dev,
828                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
829                         if (rc != ECORE_SUCCESS) {
830                                 DP_ERR(edev, "Failed to enable VXLAN "
831                                         "prior to updating UDP port\n");
832                                 return rc;
833                         }
834                 }
835                 udp_port = tunnel_udp->udp_port;
836
837                 tunn.vxlan_port.b_update_port = true;
838                 tunn.vxlan_port.port = udp_port;
839
840                 rc = qede_tunnel_update(qdev, &tunn);
841                 if (rc != ECORE_SUCCESS) {
842                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
843                                udp_port);
844                         return rc;
845                 }
846
847                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
848
849                 qdev->vxlan.udp_port = udp_port;
850                 break;
851         case RTE_TUNNEL_TYPE_GENEVE:
852                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
853                         DP_INFO(edev,
854                                 "UDP port %u for GENEVE was already configured\n",
855                                 tunnel_udp->udp_port);
856                         return ECORE_SUCCESS;
857                 }
858
859                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
860                  * it was not enabled while adding GENEVE filter before UDP port
861                  * update.
862                  */
863                 if (!qdev->geneve.enable) {
864                         rc = qede_geneve_enable(eth_dev,
865                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
866                         if (rc != ECORE_SUCCESS) {
867                                 DP_ERR(edev, "Failed to enable GENEVE "
868                                         "prior to updating UDP port\n");
869                                 return rc;
870                         }
871                 }
872                 udp_port = tunnel_udp->udp_port;
873
874                 tunn.geneve_port.b_update_port = true;
875                 tunn.geneve_port.port = udp_port;
876
877                 rc = qede_tunnel_update(qdev, &tunn);
878                 if (rc != ECORE_SUCCESS) {
879                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
880                                udp_port);
881                         return rc;
882                 }
883
884                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
885
886                 qdev->geneve.udp_port = udp_port;
887                 break;
888         default:
889                 return ECORE_INVAL;
890         }
891
892         return 0;
893 }
894
895 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
896                                        uint32_t *clss, char *str)
897 {
898         uint16_t j;
899         *clss = MAX_ECORE_TUNN_CLSS;
900
901         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
902                 if (filter == qede_tunn_types[j].rte_filter_type) {
903                         *type = qede_tunn_types[j].qede_type;
904                         *clss = qede_tunn_types[j].qede_tunn_clss;
905                         strcpy(str, qede_tunn_types[j].string);
906                         return;
907                 }
908         }
909 }
910
911 static int
912 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
913                               const struct rte_eth_tunnel_filter_conf *conf,
914                               uint32_t type)
915 {
916         /* Init commmon ucast params first */
917         qede_set_ucast_cmn_params(ucast);
918
919         /* Copy out the required fields based on classification type */
920         ucast->type = type;
921
922         switch (type) {
923         case ECORE_FILTER_VNI:
924                 ucast->vni = conf->tenant_id;
925         break;
926         case ECORE_FILTER_INNER_VLAN:
927                 ucast->vlan = conf->inner_vlan;
928         break;
929         case ECORE_FILTER_MAC:
930                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
931                        ETHER_ADDR_LEN);
932         break;
933         case ECORE_FILTER_INNER_MAC:
934                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
935                        ETHER_ADDR_LEN);
936         break;
937         case ECORE_FILTER_MAC_VNI_PAIR:
938                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
939                         ETHER_ADDR_LEN);
940                 ucast->vni = conf->tenant_id;
941         break;
942         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
943                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
944                         ETHER_ADDR_LEN);
945                 ucast->vni = conf->tenant_id;
946         break;
947         case ECORE_FILTER_INNER_PAIR:
948                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
949                         ETHER_ADDR_LEN);
950                 ucast->vlan = conf->inner_vlan;
951         break;
952         default:
953                 return -EINVAL;
954         }
955
956         return ECORE_SUCCESS;
957 }
958
959 static int
960 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
961                          const struct rte_eth_tunnel_filter_conf *conf,
962                          __attribute__((unused)) enum rte_filter_op filter_op,
963                          enum ecore_tunn_clss *clss,
964                          bool add)
965 {
966         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
967         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
968         struct ecore_filter_ucast ucast = {0};
969         enum ecore_filter_ucast_type type;
970         uint16_t filter_type = 0;
971         char str[80];
972         int rc;
973
974         filter_type = conf->filter_type;
975         /* Determine if the given filter classification is supported */
976         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
977         if (*clss == MAX_ECORE_TUNN_CLSS) {
978                 DP_ERR(edev, "Unsupported filter type\n");
979                 return -EINVAL;
980         }
981         /* Init tunnel ucast params */
982         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
983         if (rc != ECORE_SUCCESS) {
984                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
985                 conf->filter_type);
986                 return rc;
987         }
988         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
989                 str, filter_op, ucast.type);
990
991         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
992
993         /* Skip MAC/VLAN if filter is based on VNI */
994         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
995                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
996                 if (rc == 0 && add) {
997                         /* Enable accept anyvlan */
998                         qede_config_accept_any_vlan(qdev, true);
999                 }
1000         } else {
1001                 rc = qede_ucast_filter(eth_dev, &ucast, add);
1002                 if (rc == 0)
1003                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1004                                             ECORE_SPQ_MODE_CB, NULL);
1005         }
1006
1007         return rc;
1008 }
1009
1010 static int
1011 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1012                  enum rte_eth_tunnel_type tunn_type, bool enable)
1013 {
1014         int rc = -EINVAL;
1015
1016         switch (tunn_type) {
1017         case RTE_TUNNEL_TYPE_VXLAN:
1018                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1019                 break;
1020         case RTE_TUNNEL_TYPE_GENEVE:
1021                 rc = qede_geneve_enable(eth_dev, clss, enable);
1022                 break;
1023         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1024                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1025                 break;
1026         default:
1027                 rc = -EINVAL;
1028                 break;
1029         }
1030
1031         return rc;
1032 }
1033
1034 static int
1035 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1036                         enum rte_filter_op filter_op,
1037                         const struct rte_eth_tunnel_filter_conf *conf)
1038 {
1039         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1040         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1041         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1042         bool add;
1043         int rc;
1044
1045         PMD_INIT_FUNC_TRACE(edev);
1046
1047         switch (filter_op) {
1048         case RTE_ETH_FILTER_ADD:
1049                 add = true;
1050                 break;
1051         case RTE_ETH_FILTER_DELETE:
1052                 add = false;
1053                 break;
1054         default:
1055                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1056                 return -EINVAL;
1057         }
1058
1059         if (IS_VF(edev))
1060                 return qede_tunn_enable(eth_dev,
1061                                         ECORE_TUNN_CLSS_MAC_VLAN,
1062                                         conf->tunnel_type, add);
1063
1064         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1065         if (rc != ECORE_SUCCESS)
1066                 return rc;
1067
1068         if (add) {
1069                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1070                         qdev->vxlan.num_filters++;
1071                         qdev->vxlan.filter_type = conf->filter_type;
1072                 } else { /* GENEVE */
1073                         qdev->geneve.num_filters++;
1074                         qdev->geneve.filter_type = conf->filter_type;
1075                 }
1076
1077                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1078                     !qdev->ipgre.enable)
1079                         return qede_tunn_enable(eth_dev, clss,
1080                                                 conf->tunnel_type,
1081                                                 true);
1082         } else {
1083                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1084                         qdev->vxlan.num_filters--;
1085                 else /*GENEVE*/
1086                         qdev->geneve.num_filters--;
1087
1088                 /* Disable VXLAN if VXLAN filters become 0 */
1089                 if (qdev->vxlan.num_filters == 0 ||
1090                     qdev->geneve.num_filters == 0)
1091                         return qede_tunn_enable(eth_dev, clss,
1092                                                 conf->tunnel_type,
1093                                                 false);
1094         }
1095
1096         return 0;
1097 }
1098
1099 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1100                          enum rte_filter_type filter_type,
1101                          enum rte_filter_op filter_op,
1102                          void *arg)
1103 {
1104         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1105         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1106         struct rte_eth_tunnel_filter_conf *filter_conf =
1107                         (struct rte_eth_tunnel_filter_conf *)arg;
1108
1109         switch (filter_type) {
1110         case RTE_ETH_FILTER_TUNNEL:
1111                 switch (filter_conf->tunnel_type) {
1112                 case RTE_TUNNEL_TYPE_VXLAN:
1113                 case RTE_TUNNEL_TYPE_GENEVE:
1114                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1115                         DP_INFO(edev,
1116                                 "Packet steering to the specified Rx queue"
1117                                 " is not supported with UDP tunneling");
1118                         return(qede_tunn_filter_config(eth_dev, filter_op,
1119                                                       filter_conf));
1120                 case RTE_TUNNEL_TYPE_TEREDO:
1121                 case RTE_TUNNEL_TYPE_NVGRE:
1122                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1123                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1124                                 filter_conf->tunnel_type);
1125                         return -EINVAL;
1126                 case RTE_TUNNEL_TYPE_NONE:
1127                 default:
1128                         return 0;
1129                 }
1130                 break;
1131         case RTE_ETH_FILTER_FDIR:
1132                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1133         case RTE_ETH_FILTER_NTUPLE:
1134                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1135         case RTE_ETH_FILTER_MACVLAN:
1136         case RTE_ETH_FILTER_ETHERTYPE:
1137         case RTE_ETH_FILTER_FLEXIBLE:
1138         case RTE_ETH_FILTER_SYN:
1139         case RTE_ETH_FILTER_HASH:
1140         case RTE_ETH_FILTER_L2_TUNNEL:
1141         case RTE_ETH_FILTER_MAX:
1142         default:
1143                 DP_ERR(edev, "Unsupported filter type %d\n",
1144                         filter_type);
1145                 return -EINVAL;
1146         }
1147
1148         return 0;
1149 }
1150
1151 /* RTE_FLOW */