net/qede: reorganize filter code
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11
12 #include "qede_ethdev.h"
13
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16         uint16_t rte_filter_type;
17         enum ecore_filter_ucast_type qede_type;
18         enum ecore_tunn_clss qede_tunn_clss;
19         const char *string;
20 } qede_tunn_types[] = {
21         {
22                 ETH_TUNNEL_FILTER_OMAC,
23                 ECORE_FILTER_MAC,
24                 ECORE_TUNN_CLSS_MAC_VLAN,
25                 "outer-mac"
26         },
27         {
28                 ETH_TUNNEL_FILTER_TENID,
29                 ECORE_FILTER_VNI,
30                 ECORE_TUNN_CLSS_MAC_VNI,
31                 "vni"
32         },
33         {
34                 ETH_TUNNEL_FILTER_IMAC,
35                 ECORE_FILTER_INNER_MAC,
36                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
37                 "inner-mac"
38         },
39         {
40                 ETH_TUNNEL_FILTER_IVLAN,
41                 ECORE_FILTER_INNER_VLAN,
42                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43                 "inner-vlan"
44         },
45         {
46                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47                 ECORE_FILTER_MAC_VNI_PAIR,
48                 ECORE_TUNN_CLSS_MAC_VNI,
49                 "outer-mac and vni"
50         },
51         {
52                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
53                 ECORE_FILTER_UNUSED,
54                 MAX_ECORE_TUNN_CLSS,
55                 "outer-mac and inner-mac"
56         },
57         {
58                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
59                 ECORE_FILTER_UNUSED,
60                 MAX_ECORE_TUNN_CLSS,
61                 "outer-mac and inner-vlan"
62         },
63         {
64                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
67                 "vni and inner-mac",
68         },
69         {
70                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
71                 ECORE_FILTER_UNUSED,
72                 MAX_ECORE_TUNN_CLSS,
73                 "vni and inner-vlan",
74         },
75         {
76                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77                 ECORE_FILTER_INNER_PAIR,
78                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79                 "inner-mac and inner-vlan",
80         },
81         {
82                 ETH_TUNNEL_FILTER_OIP,
83                 ECORE_FILTER_UNUSED,
84                 MAX_ECORE_TUNN_CLSS,
85                 "outer-IP"
86         },
87         {
88                 ETH_TUNNEL_FILTER_IIP,
89                 ECORE_FILTER_UNUSED,
90                 MAX_ECORE_TUNN_CLSS,
91                 "inner-IP"
92         },
93         {
94                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
95                 ECORE_FILTER_UNUSED,
96                 MAX_ECORE_TUNN_CLSS,
97                 "IMAC_IVLAN"
98         },
99         {
100                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
101                 ECORE_FILTER_UNUSED,
102                 MAX_ECORE_TUNN_CLSS,
103                 "IMAC_IVLAN_TENID"
104         },
105         {
106                 RTE_TUNNEL_FILTER_IMAC_TENID,
107                 ECORE_FILTER_UNUSED,
108                 MAX_ECORE_TUNN_CLSS,
109                 "IMAC_TENID"
110         },
111         {
112                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
113                 ECORE_FILTER_UNUSED,
114                 MAX_ECORE_TUNN_CLSS,
115                 "OMAC_TENID_IMAC"
116         },
117 };
118
119 #define IP_VERSION                              (0x40)
120 #define IP_HDRLEN                               (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
124
125 /* Sum of length of header types of L2, L3, L4.
126  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
127  * L3 : ipv6_hdr
128  * L4 : tcp_hdr
129  */
130 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
131
132 #ifndef IPV6_ADDR_LEN
133 #define IPV6_ADDR_LEN                           (16)
134 #endif
135
136 static inline bool qede_valid_flow(uint16_t flow_type)
137 {
138         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
139                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
140                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
141                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
142 }
143
144 /* Note: Flowdir support is only partial.
145  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
146  * Parameters like pballoc/status fields are irrelevant here.
147  */
148 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
149 {
150         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
151         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
152         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
153
154         /* check FDIR modes */
155         switch (fdir->mode) {
156         case RTE_FDIR_MODE_NONE:
157                 qdev->fdir_info.arfs.arfs_enable = false;
158                 DP_INFO(edev, "flowdir is disabled\n");
159         break;
160         case RTE_FDIR_MODE_PERFECT:
161                 if (ECORE_IS_CMT(edev)) {
162                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
163                         qdev->fdir_info.arfs.arfs_enable = false;
164                         return -ENOTSUP;
165                 }
166                 qdev->fdir_info.arfs.arfs_enable = true;
167                 DP_INFO(edev, "flowdir is enabled\n");
168         break;
169         case RTE_FDIR_MODE_PERFECT_TUNNEL:
170         case RTE_FDIR_MODE_SIGNATURE:
171         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
172                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
173                 return -ENOTSUP;
174         }
175
176         return 0;
177 }
178
179 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
180 {
181         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
182         struct qede_fdir_entry *tmp = NULL;
183
184         SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
185                 if (tmp) {
186                         if (tmp->mz)
187                                 rte_memzone_free(tmp->mz);
188                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
189                                      qede_fdir_entry, list);
190                         rte_free(tmp);
191                 }
192         }
193 }
194
195 static int
196 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
197                             struct rte_eth_fdir_filter *fdir_filter,
198                             bool add)
199 {
200         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
201         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
202         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
203         struct qede_fdir_entry *tmp = NULL;
204         struct qede_fdir_entry *fdir = NULL;
205         const struct rte_memzone *mz;
206         struct ecore_hwfn *p_hwfn;
207         enum _ecore_status_t rc;
208         uint16_t pkt_len;
209         void *pkt;
210
211         if (add) {
212                 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
213                         DP_ERR(edev, "Reached max flowdir filter limit\n");
214                         return -EINVAL;
215                 }
216                 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
217                                   RTE_CACHE_LINE_SIZE);
218                 if (!fdir) {
219                         DP_ERR(edev, "Did not allocate memory for fdir\n");
220                         return -ENOMEM;
221                 }
222         }
223         /* soft_id could have been used as memzone string, but soft_id is
224          * not currently used so it has no significance.
225          */
226         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
227                  (unsigned long)rte_get_timer_cycles());
228         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
229                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
230         if (!mz) {
231                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
232                        rte_strerror(rte_errno));
233                 rc = -rte_errno;
234                 goto err1;
235         }
236
237         pkt = mz->addr;
238         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
239         pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
240                                           &qdev->fdir_info.arfs);
241         if (pkt_len == 0) {
242                 rc = -EINVAL;
243                 goto err2;
244         }
245         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
246         if (add) {
247                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
248                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
249                                 DP_INFO(edev, "flowdir filter exist\n");
250                                 rc = 0;
251                                 goto err2;
252                         }
253                 }
254         } else {
255                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
256                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
257                                 break;
258                 }
259                 if (!tmp) {
260                         DP_ERR(edev, "flowdir filter does not exist\n");
261                         rc = -EEXIST;
262                         goto err2;
263                 }
264         }
265         p_hwfn = ECORE_LEADING_HWFN(edev);
266         if (add) {
267                 if (!qdev->fdir_info.arfs.arfs_enable) {
268                         /* Force update */
269                         eth_dev->data->dev_conf.fdir_conf.mode =
270                                                 RTE_FDIR_MODE_PERFECT;
271                         qdev->fdir_info.arfs.arfs_enable = true;
272                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
273                 }
274                 /* Enable ARFS searcher with updated flow_types */
275                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
276                                           &qdev->fdir_info.arfs);
277         }
278         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
279         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
280                                                (dma_addr_t)mz->iova,
281                                                pkt_len,
282                                                fdir_filter->action.rx_queue,
283                                                0, add);
284         if (rc == ECORE_SUCCESS) {
285                 if (add) {
286                         fdir->rx_queue = fdir_filter->action.rx_queue;
287                         fdir->pkt_len = pkt_len;
288                         fdir->mz = mz;
289                         SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
290                                           fdir, list);
291                         qdev->fdir_info.filter_count++;
292                         DP_INFO(edev, "flowdir filter added, count = %d\n",
293                                 qdev->fdir_info.filter_count);
294                 } else {
295                         rte_memzone_free(tmp->mz);
296                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
297                                      qede_fdir_entry, list);
298                         rte_free(tmp); /* the node deleted */
299                         rte_memzone_free(mz); /* temp node allocated */
300                         qdev->fdir_info.filter_count--;
301                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
302                                 qdev->fdir_info.filter_count);
303                 }
304         } else {
305                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
306                        rc, qdev->fdir_info.filter_count);
307         }
308
309         /* Disable ARFS searcher if there are no more filters */
310         if (qdev->fdir_info.filter_count == 0) {
311                 memset(&qdev->fdir_info.arfs, 0,
312                        sizeof(struct ecore_arfs_config_params));
313                 DP_INFO(edev, "Disabling flowdir\n");
314                 qdev->fdir_info.arfs.arfs_enable = false;
315                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
316                                           &qdev->fdir_info.arfs);
317         }
318         return 0;
319
320 err2:
321         rte_memzone_free(mz);
322 err1:
323         if (add)
324                 rte_free(fdir);
325         return rc;
326 }
327
328 static int
329 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
330                      struct rte_eth_fdir_filter *fdir,
331                      bool add)
332 {
333         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
334         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
335
336         if (!qede_valid_flow(fdir->input.flow_type)) {
337                 DP_ERR(edev, "invalid flow_type input\n");
338                 return -EINVAL;
339         }
340
341         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
342                 DP_ERR(edev, "invalid queue number %u\n",
343                        fdir->action.rx_queue);
344                 return -EINVAL;
345         }
346
347         if (fdir->input.flow_ext.is_vf) {
348                 DP_ERR(edev, "flowdir is not supported over VF\n");
349                 return -EINVAL;
350         }
351
352         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
353 }
354
355 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
356 uint16_t
357 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
358                         struct rte_eth_fdir_filter *fdir,
359                         void *buff,
360                         struct ecore_arfs_config_params *params)
361
362 {
363         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
364         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
365         uint16_t *ether_type;
366         uint8_t *raw_pkt;
367         struct rte_eth_fdir_input *input;
368         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
369         struct ipv4_hdr *ip;
370         struct ipv6_hdr *ip6;
371         struct udp_hdr *udp;
372         struct tcp_hdr *tcp;
373         uint16_t len;
374         static const uint8_t next_proto[] = {
375                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
376                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
377                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
378                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
379         };
380         raw_pkt = (uint8_t *)buff;
381         input = &fdir->input;
382         DP_INFO(edev, "flow_type %d\n", input->flow_type);
383
384         len =  2 * sizeof(struct ether_addr);
385         raw_pkt += 2 * sizeof(struct ether_addr);
386         if (input->flow_ext.vlan_tci) {
387                 DP_INFO(edev, "adding VLAN header\n");
388                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
389                 rte_memcpy(raw_pkt + sizeof(uint16_t),
390                            &input->flow_ext.vlan_tci,
391                            sizeof(uint16_t));
392                 raw_pkt += sizeof(vlan_frame);
393                 len += sizeof(vlan_frame);
394         }
395         ether_type = (uint16_t *)raw_pkt;
396         raw_pkt += sizeof(uint16_t);
397         len += sizeof(uint16_t);
398
399         switch (input->flow_type) {
400         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
401         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
402                 /* fill the common ip header */
403                 ip = (struct ipv4_hdr *)raw_pkt;
404                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
405                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
406                 ip->total_length = sizeof(struct ipv4_hdr);
407                 ip->next_proto_id = input->flow.ip4_flow.proto ?
408                                     input->flow.ip4_flow.proto :
409                                     next_proto[input->flow_type];
410                 ip->time_to_live = input->flow.ip4_flow.ttl ?
411                                    input->flow.ip4_flow.ttl :
412                                    QEDE_FDIR_IPV4_DEF_TTL;
413                 ip->type_of_service = input->flow.ip4_flow.tos;
414                 ip->dst_addr = input->flow.ip4_flow.dst_ip;
415                 ip->src_addr = input->flow.ip4_flow.src_ip;
416                 len += sizeof(struct ipv4_hdr);
417                 params->ipv4 = true;
418
419                 raw_pkt = (uint8_t *)buff;
420                 /* UDP */
421                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
422                         udp = (struct udp_hdr *)(raw_pkt + len);
423                         udp->dst_port = input->flow.udp4_flow.dst_port;
424                         udp->src_port = input->flow.udp4_flow.src_port;
425                         udp->dgram_len = sizeof(struct udp_hdr);
426                         len += sizeof(struct udp_hdr);
427                         /* adjust ip total_length */
428                         ip->total_length += sizeof(struct udp_hdr);
429                         params->udp = true;
430                 } else { /* TCP */
431                         tcp = (struct tcp_hdr *)(raw_pkt + len);
432                         tcp->src_port = input->flow.tcp4_flow.src_port;
433                         tcp->dst_port = input->flow.tcp4_flow.dst_port;
434                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
435                         len += sizeof(struct tcp_hdr);
436                         /* adjust ip total_length */
437                         ip->total_length += sizeof(struct tcp_hdr);
438                         params->tcp = true;
439                 }
440                 break;
441         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
442         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
443                 ip6 = (struct ipv6_hdr *)raw_pkt;
444                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
445                 ip6->proto = input->flow.ipv6_flow.proto ?
446                                         input->flow.ipv6_flow.proto :
447                                         next_proto[input->flow_type];
448                 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
449                            IPV6_ADDR_LEN);
450                 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
451                            IPV6_ADDR_LEN);
452                 len += sizeof(struct ipv6_hdr);
453
454                 raw_pkt = (uint8_t *)buff;
455                 /* UDP */
456                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
457                         udp = (struct udp_hdr *)(raw_pkt + len);
458                         udp->src_port = input->flow.udp6_flow.dst_port;
459                         udp->dst_port = input->flow.udp6_flow.src_port;
460                         len += sizeof(struct udp_hdr);
461                         params->udp = true;
462                 } else { /* TCP */
463                         tcp = (struct tcp_hdr *)(raw_pkt + len);
464                         tcp->src_port = input->flow.tcp4_flow.src_port;
465                         tcp->dst_port = input->flow.tcp4_flow.dst_port;
466                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
467                         len += sizeof(struct tcp_hdr);
468                         params->tcp = true;
469                 }
470                 break;
471         default:
472                 DP_ERR(edev, "Unsupported flow_type %u\n",
473                        input->flow_type);
474                 return 0;
475         }
476
477         return len;
478 }
479
480 static int
481 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
482                       enum rte_filter_op filter_op,
483                       void *arg)
484 {
485         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
486         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
487         struct rte_eth_fdir_filter *fdir;
488         int ret;
489
490         fdir = (struct rte_eth_fdir_filter *)arg;
491         switch (filter_op) {
492         case RTE_ETH_FILTER_NOP:
493                 /* Typically used to query flowdir support */
494                 if (ECORE_IS_CMT(edev)) {
495                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
496                         return -ENOTSUP;
497                 }
498                 return 0; /* means supported */
499         case RTE_ETH_FILTER_ADD:
500                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
501         break;
502         case RTE_ETH_FILTER_DELETE:
503                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
504         break;
505         case RTE_ETH_FILTER_FLUSH:
506         case RTE_ETH_FILTER_UPDATE:
507         case RTE_ETH_FILTER_INFO:
508                 return -ENOTSUP;
509         break;
510         default:
511                 DP_ERR(edev, "unknown operation %u", filter_op);
512                 ret = -EINVAL;
513         }
514
515         return ret;
516 }
517
518 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
519                             enum rte_filter_op filter_op,
520                             void *arg)
521 {
522         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
523         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
524         struct rte_eth_ntuple_filter *ntuple;
525         struct rte_eth_fdir_filter fdir_entry;
526         struct rte_eth_tcpv4_flow *tcpv4_flow;
527         struct rte_eth_udpv4_flow *udpv4_flow;
528         bool add = false;
529
530         switch (filter_op) {
531         case RTE_ETH_FILTER_NOP:
532                 /* Typically used to query fdir support */
533                 if (ECORE_IS_CMT(edev)) {
534                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
535                         return -ENOTSUP;
536                 }
537                 return 0; /* means supported */
538         case RTE_ETH_FILTER_ADD:
539                 add = true;
540         break;
541         case RTE_ETH_FILTER_DELETE:
542         break;
543         case RTE_ETH_FILTER_INFO:
544         case RTE_ETH_FILTER_GET:
545         case RTE_ETH_FILTER_UPDATE:
546         case RTE_ETH_FILTER_FLUSH:
547         case RTE_ETH_FILTER_SET:
548         case RTE_ETH_FILTER_STATS:
549         case RTE_ETH_FILTER_OP_MAX:
550                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
551                 return -ENOTSUP;
552         }
553         ntuple = (struct rte_eth_ntuple_filter *)arg;
554         /* Internally convert ntuple to fdir entry */
555         memset(&fdir_entry, 0, sizeof(fdir_entry));
556         if (ntuple->proto == IPPROTO_TCP) {
557                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
558                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
559                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
560                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
561                 tcpv4_flow->ip.proto = IPPROTO_TCP;
562                 tcpv4_flow->src_port = ntuple->src_port;
563                 tcpv4_flow->dst_port = ntuple->dst_port;
564         } else {
565                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
566                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
567                 udpv4_flow->ip.src_ip = ntuple->src_ip;
568                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
569                 udpv4_flow->ip.proto = IPPROTO_TCP;
570                 udpv4_flow->src_port = ntuple->src_port;
571                 udpv4_flow->dst_port = ntuple->dst_port;
572         }
573
574         fdir_entry.action.rx_queue = ntuple->queue;
575
576         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
577 }
578
579 static int
580 qede_tunnel_update(struct qede_dev *qdev,
581                    struct ecore_tunnel_info *tunn_info)
582 {
583         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
584         enum _ecore_status_t rc = ECORE_INVAL;
585         struct ecore_hwfn *p_hwfn;
586         struct ecore_ptt *p_ptt;
587         int i;
588
589         for_each_hwfn(edev, i) {
590                 p_hwfn = &edev->hwfns[i];
591                 if (IS_PF(edev)) {
592                         p_ptt = ecore_ptt_acquire(p_hwfn);
593                         if (!p_ptt) {
594                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
595                                 return -EAGAIN;
596                         }
597                 } else {
598                         p_ptt = NULL;
599                 }
600
601                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
602                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
603                 if (IS_PF(edev))
604                         ecore_ptt_release(p_hwfn, p_ptt);
605
606                 if (rc != ECORE_SUCCESS)
607                         break;
608         }
609
610         return rc;
611 }
612
613 static int
614 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
615                   bool enable)
616 {
617         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
618         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
619         enum _ecore_status_t rc = ECORE_INVAL;
620         struct ecore_tunnel_info tunn;
621
622         if (qdev->vxlan.enable == enable)
623                 return ECORE_SUCCESS;
624
625         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
626         tunn.vxlan.b_update_mode = true;
627         tunn.vxlan.b_mode_enabled = enable;
628         tunn.b_update_rx_cls = true;
629         tunn.b_update_tx_cls = true;
630         tunn.vxlan.tun_cls = clss;
631
632         tunn.vxlan_port.b_update_port = true;
633         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
634
635         rc = qede_tunnel_update(qdev, &tunn);
636         if (rc == ECORE_SUCCESS) {
637                 qdev->vxlan.enable = enable;
638                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
639                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
640                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
641         } else {
642                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
643                        tunn.vxlan.tun_cls);
644         }
645
646         return rc;
647 }
648
649 static int
650 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
651                   bool enable)
652 {
653         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
654         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
655         enum _ecore_status_t rc = ECORE_INVAL;
656         struct ecore_tunnel_info tunn;
657
658         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
659         tunn.l2_geneve.b_update_mode = true;
660         tunn.l2_geneve.b_mode_enabled = enable;
661         tunn.ip_geneve.b_update_mode = true;
662         tunn.ip_geneve.b_mode_enabled = enable;
663         tunn.l2_geneve.tun_cls = clss;
664         tunn.ip_geneve.tun_cls = clss;
665         tunn.b_update_rx_cls = true;
666         tunn.b_update_tx_cls = true;
667
668         tunn.geneve_port.b_update_port = true;
669         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
670
671         rc = qede_tunnel_update(qdev, &tunn);
672         if (rc == ECORE_SUCCESS) {
673                 qdev->geneve.enable = enable;
674                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
675                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
676                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
677         } else {
678                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
679                        clss);
680         }
681
682         return rc;
683 }
684
685 static int
686 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
687                   bool enable)
688 {
689         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
690         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
691         enum _ecore_status_t rc = ECORE_INVAL;
692         struct ecore_tunnel_info tunn;
693
694         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
695         tunn.ip_gre.b_update_mode = true;
696         tunn.ip_gre.b_mode_enabled = enable;
697         tunn.ip_gre.tun_cls = clss;
698         tunn.ip_gre.tun_cls = clss;
699         tunn.b_update_rx_cls = true;
700         tunn.b_update_tx_cls = true;
701
702         rc = qede_tunnel_update(qdev, &tunn);
703         if (rc == ECORE_SUCCESS) {
704                 qdev->ipgre.enable = enable;
705                 DP_INFO(edev, "IPGRE is %s\n",
706                         enable ? "enabled" : "disabled");
707         } else {
708                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
709                        clss);
710         }
711
712         return rc;
713 }
714
715 int
716 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
717                       struct rte_eth_udp_tunnel *tunnel_udp)
718 {
719         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
720         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
721         struct ecore_tunnel_info tunn; /* @DPDK */
722         uint16_t udp_port;
723         int rc;
724
725         PMD_INIT_FUNC_TRACE(edev);
726
727         memset(&tunn, 0, sizeof(tunn));
728
729         switch (tunnel_udp->prot_type) {
730         case RTE_TUNNEL_TYPE_VXLAN:
731                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
732                         DP_ERR(edev, "UDP port %u doesn't exist\n",
733                                 tunnel_udp->udp_port);
734                         return ECORE_INVAL;
735                 }
736                 udp_port = 0;
737
738                 tunn.vxlan_port.b_update_port = true;
739                 tunn.vxlan_port.port = udp_port;
740
741                 rc = qede_tunnel_update(qdev, &tunn);
742                 if (rc != ECORE_SUCCESS) {
743                         DP_ERR(edev, "Unable to config UDP port %u\n",
744                                tunn.vxlan_port.port);
745                         return rc;
746                 }
747
748                 qdev->vxlan.udp_port = udp_port;
749                 /* If the request is to delete UDP port and if the number of
750                  * VXLAN filters have reached 0 then VxLAN offload can be be
751                  * disabled.
752                  */
753                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
754                         return qede_vxlan_enable(eth_dev,
755                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
756
757                 break;
758         case RTE_TUNNEL_TYPE_GENEVE:
759                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
760                         DP_ERR(edev, "UDP port %u doesn't exist\n",
761                                 tunnel_udp->udp_port);
762                         return ECORE_INVAL;
763                 }
764
765                 udp_port = 0;
766
767                 tunn.geneve_port.b_update_port = true;
768                 tunn.geneve_port.port = udp_port;
769
770                 rc = qede_tunnel_update(qdev, &tunn);
771                 if (rc != ECORE_SUCCESS) {
772                         DP_ERR(edev, "Unable to config UDP port %u\n",
773                                tunn.vxlan_port.port);
774                         return rc;
775                 }
776
777                 qdev->vxlan.udp_port = udp_port;
778                 /* If the request is to delete UDP port and if the number of
779                  * GENEVE filters have reached 0 then GENEVE offload can be be
780                  * disabled.
781                  */
782                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
783                         return qede_geneve_enable(eth_dev,
784                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
785
786                 break;
787
788         default:
789                 return ECORE_INVAL;
790         }
791
792         return 0;
793 }
794
795 int
796 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
797                       struct rte_eth_udp_tunnel *tunnel_udp)
798 {
799         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
800         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
801         struct ecore_tunnel_info tunn; /* @DPDK */
802         uint16_t udp_port;
803         int rc;
804
805         PMD_INIT_FUNC_TRACE(edev);
806
807         memset(&tunn, 0, sizeof(tunn));
808
809         switch (tunnel_udp->prot_type) {
810         case RTE_TUNNEL_TYPE_VXLAN:
811                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
812                         DP_INFO(edev,
813                                 "UDP port %u for VXLAN was already configured\n",
814                                 tunnel_udp->udp_port);
815                         return ECORE_SUCCESS;
816                 }
817
818                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
819                  * it was not enabled while adding VXLAN filter before UDP port
820                  * update.
821                  */
822                 if (!qdev->vxlan.enable) {
823                         rc = qede_vxlan_enable(eth_dev,
824                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
825                         if (rc != ECORE_SUCCESS) {
826                                 DP_ERR(edev, "Failed to enable VXLAN "
827                                         "prior to updating UDP port\n");
828                                 return rc;
829                         }
830                 }
831                 udp_port = tunnel_udp->udp_port;
832
833                 tunn.vxlan_port.b_update_port = true;
834                 tunn.vxlan_port.port = udp_port;
835
836                 rc = qede_tunnel_update(qdev, &tunn);
837                 if (rc != ECORE_SUCCESS) {
838                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
839                                udp_port);
840                         return rc;
841                 }
842
843                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
844
845                 qdev->vxlan.udp_port = udp_port;
846                 break;
847         case RTE_TUNNEL_TYPE_GENEVE:
848                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
849                         DP_INFO(edev,
850                                 "UDP port %u for GENEVE was already configured\n",
851                                 tunnel_udp->udp_port);
852                         return ECORE_SUCCESS;
853                 }
854
855                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
856                  * it was not enabled while adding GENEVE filter before UDP port
857                  * update.
858                  */
859                 if (!qdev->geneve.enable) {
860                         rc = qede_geneve_enable(eth_dev,
861                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
862                         if (rc != ECORE_SUCCESS) {
863                                 DP_ERR(edev, "Failed to enable GENEVE "
864                                         "prior to updating UDP port\n");
865                                 return rc;
866                         }
867                 }
868                 udp_port = tunnel_udp->udp_port;
869
870                 tunn.geneve_port.b_update_port = true;
871                 tunn.geneve_port.port = udp_port;
872
873                 rc = qede_tunnel_update(qdev, &tunn);
874                 if (rc != ECORE_SUCCESS) {
875                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
876                                udp_port);
877                         return rc;
878                 }
879
880                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
881
882                 qdev->geneve.udp_port = udp_port;
883                 break;
884         default:
885                 return ECORE_INVAL;
886         }
887
888         return 0;
889 }
890
891 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
892                                        uint32_t *clss, char *str)
893 {
894         uint16_t j;
895         *clss = MAX_ECORE_TUNN_CLSS;
896
897         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
898                 if (filter == qede_tunn_types[j].rte_filter_type) {
899                         *type = qede_tunn_types[j].qede_type;
900                         *clss = qede_tunn_types[j].qede_tunn_clss;
901                         strcpy(str, qede_tunn_types[j].string);
902                         return;
903                 }
904         }
905 }
906
907 static int
908 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
909                               const struct rte_eth_tunnel_filter_conf *conf,
910                               uint32_t type)
911 {
912         /* Init commmon ucast params first */
913         qede_set_ucast_cmn_params(ucast);
914
915         /* Copy out the required fields based on classification type */
916         ucast->type = type;
917
918         switch (type) {
919         case ECORE_FILTER_VNI:
920                 ucast->vni = conf->tenant_id;
921         break;
922         case ECORE_FILTER_INNER_VLAN:
923                 ucast->vlan = conf->inner_vlan;
924         break;
925         case ECORE_FILTER_MAC:
926                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
927                        ETHER_ADDR_LEN);
928         break;
929         case ECORE_FILTER_INNER_MAC:
930                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
931                        ETHER_ADDR_LEN);
932         break;
933         case ECORE_FILTER_MAC_VNI_PAIR:
934                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
935                         ETHER_ADDR_LEN);
936                 ucast->vni = conf->tenant_id;
937         break;
938         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
939                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
940                         ETHER_ADDR_LEN);
941                 ucast->vni = conf->tenant_id;
942         break;
943         case ECORE_FILTER_INNER_PAIR:
944                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
945                         ETHER_ADDR_LEN);
946                 ucast->vlan = conf->inner_vlan;
947         break;
948         default:
949                 return -EINVAL;
950         }
951
952         return ECORE_SUCCESS;
953 }
954
955 static int
956 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
957                          const struct rte_eth_tunnel_filter_conf *conf,
958                          __attribute__((unused)) enum rte_filter_op filter_op,
959                          enum ecore_tunn_clss *clss,
960                          bool add)
961 {
962         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
963         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
964         struct ecore_filter_ucast ucast = {0};
965         enum ecore_filter_ucast_type type;
966         uint16_t filter_type = 0;
967         char str[80];
968         int rc;
969
970         filter_type = conf->filter_type;
971         /* Determine if the given filter classification is supported */
972         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
973         if (*clss == MAX_ECORE_TUNN_CLSS) {
974                 DP_ERR(edev, "Unsupported filter type\n");
975                 return -EINVAL;
976         }
977         /* Init tunnel ucast params */
978         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
979         if (rc != ECORE_SUCCESS) {
980                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
981                 conf->filter_type);
982                 return rc;
983         }
984         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
985                 str, filter_op, ucast.type);
986
987         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
988
989         /* Skip MAC/VLAN if filter is based on VNI */
990         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
991                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
992                 if (rc == 0 && add) {
993                         /* Enable accept anyvlan */
994                         qede_config_accept_any_vlan(qdev, true);
995                 }
996         } else {
997                 rc = qede_ucast_filter(eth_dev, &ucast, add);
998                 if (rc == 0)
999                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1000                                             ECORE_SPQ_MODE_CB, NULL);
1001         }
1002
1003         return rc;
1004 }
1005
1006 static int
1007 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1008                  enum rte_eth_tunnel_type tunn_type, bool enable)
1009 {
1010         int rc = -EINVAL;
1011
1012         switch (tunn_type) {
1013         case RTE_TUNNEL_TYPE_VXLAN:
1014                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1015                 break;
1016         case RTE_TUNNEL_TYPE_GENEVE:
1017                 rc = qede_geneve_enable(eth_dev, clss, enable);
1018                 break;
1019         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1020                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1021                 break;
1022         default:
1023                 rc = -EINVAL;
1024                 break;
1025         }
1026
1027         return rc;
1028 }
1029
1030 static int
1031 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1032                         enum rte_filter_op filter_op,
1033                         const struct rte_eth_tunnel_filter_conf *conf)
1034 {
1035         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1036         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1037         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1038         bool add;
1039         int rc;
1040
1041         PMD_INIT_FUNC_TRACE(edev);
1042
1043         switch (filter_op) {
1044         case RTE_ETH_FILTER_ADD:
1045                 add = true;
1046                 break;
1047         case RTE_ETH_FILTER_DELETE:
1048                 add = false;
1049                 break;
1050         default:
1051                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1052                 return -EINVAL;
1053         }
1054
1055         if (IS_VF(edev))
1056                 return qede_tunn_enable(eth_dev,
1057                                         ECORE_TUNN_CLSS_MAC_VLAN,
1058                                         conf->tunnel_type, add);
1059
1060         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1061         if (rc != ECORE_SUCCESS)
1062                 return rc;
1063
1064         if (add) {
1065                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1066                         qdev->vxlan.num_filters++;
1067                         qdev->vxlan.filter_type = conf->filter_type;
1068                 } else { /* GENEVE */
1069                         qdev->geneve.num_filters++;
1070                         qdev->geneve.filter_type = conf->filter_type;
1071                 }
1072
1073                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1074                     !qdev->ipgre.enable)
1075                         return qede_tunn_enable(eth_dev, clss,
1076                                                 conf->tunnel_type,
1077                                                 true);
1078         } else {
1079                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1080                         qdev->vxlan.num_filters--;
1081                 else /*GENEVE*/
1082                         qdev->geneve.num_filters--;
1083
1084                 /* Disable VXLAN if VXLAN filters become 0 */
1085                 if (qdev->vxlan.num_filters == 0 ||
1086                     qdev->geneve.num_filters == 0)
1087                         return qede_tunn_enable(eth_dev, clss,
1088                                                 conf->tunnel_type,
1089                                                 false);
1090         }
1091
1092         return 0;
1093 }
1094
1095 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1096                          enum rte_filter_type filter_type,
1097                          enum rte_filter_op filter_op,
1098                          void *arg)
1099 {
1100         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1101         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1102         struct rte_eth_tunnel_filter_conf *filter_conf =
1103                         (struct rte_eth_tunnel_filter_conf *)arg;
1104
1105         switch (filter_type) {
1106         case RTE_ETH_FILTER_TUNNEL:
1107                 switch (filter_conf->tunnel_type) {
1108                 case RTE_TUNNEL_TYPE_VXLAN:
1109                 case RTE_TUNNEL_TYPE_GENEVE:
1110                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1111                         DP_INFO(edev,
1112                                 "Packet steering to the specified Rx queue"
1113                                 " is not supported with UDP tunneling");
1114                         return(qede_tunn_filter_config(eth_dev, filter_op,
1115                                                       filter_conf));
1116                 case RTE_TUNNEL_TYPE_TEREDO:
1117                 case RTE_TUNNEL_TYPE_NVGRE:
1118                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1119                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1120                                 filter_conf->tunnel_type);
1121                         return -EINVAL;
1122                 case RTE_TUNNEL_TYPE_NONE:
1123                 default:
1124                         return 0;
1125                 }
1126                 break;
1127         case RTE_ETH_FILTER_FDIR:
1128                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1129         case RTE_ETH_FILTER_NTUPLE:
1130                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1131         case RTE_ETH_FILTER_MACVLAN:
1132         case RTE_ETH_FILTER_ETHERTYPE:
1133         case RTE_ETH_FILTER_FLEXIBLE:
1134         case RTE_ETH_FILTER_SYN:
1135         case RTE_ETH_FILTER_HASH:
1136         case RTE_ETH_FILTER_L2_TUNNEL:
1137         case RTE_ETH_FILTER_MAX:
1138         default:
1139                 DP_ERR(edev, "Unsupported filter type %d\n",
1140                         filter_type);
1141                 return -EINVAL;
1142         }
1143
1144         return 0;
1145 }
1146
1147 /* RTE_FLOW */