bdf2885821fbb9e00379b8427567be0a0b06a1d7
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11
12 #include "qede_ethdev.h"
13
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16         uint16_t rte_filter_type;
17         enum ecore_filter_ucast_type qede_type;
18         enum ecore_tunn_clss qede_tunn_clss;
19         const char *string;
20 } qede_tunn_types[] = {
21         {
22                 ETH_TUNNEL_FILTER_OMAC,
23                 ECORE_FILTER_MAC,
24                 ECORE_TUNN_CLSS_MAC_VLAN,
25                 "outer-mac"
26         },
27         {
28                 ETH_TUNNEL_FILTER_TENID,
29                 ECORE_FILTER_VNI,
30                 ECORE_TUNN_CLSS_MAC_VNI,
31                 "vni"
32         },
33         {
34                 ETH_TUNNEL_FILTER_IMAC,
35                 ECORE_FILTER_INNER_MAC,
36                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
37                 "inner-mac"
38         },
39         {
40                 ETH_TUNNEL_FILTER_IVLAN,
41                 ECORE_FILTER_INNER_VLAN,
42                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43                 "inner-vlan"
44         },
45         {
46                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47                 ECORE_FILTER_MAC_VNI_PAIR,
48                 ECORE_TUNN_CLSS_MAC_VNI,
49                 "outer-mac and vni"
50         },
51         {
52                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
53                 ECORE_FILTER_UNUSED,
54                 MAX_ECORE_TUNN_CLSS,
55                 "outer-mac and inner-mac"
56         },
57         {
58                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
59                 ECORE_FILTER_UNUSED,
60                 MAX_ECORE_TUNN_CLSS,
61                 "outer-mac and inner-vlan"
62         },
63         {
64                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
67                 "vni and inner-mac",
68         },
69         {
70                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
71                 ECORE_FILTER_UNUSED,
72                 MAX_ECORE_TUNN_CLSS,
73                 "vni and inner-vlan",
74         },
75         {
76                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77                 ECORE_FILTER_INNER_PAIR,
78                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79                 "inner-mac and inner-vlan",
80         },
81         {
82                 ETH_TUNNEL_FILTER_OIP,
83                 ECORE_FILTER_UNUSED,
84                 MAX_ECORE_TUNN_CLSS,
85                 "outer-IP"
86         },
87         {
88                 ETH_TUNNEL_FILTER_IIP,
89                 ECORE_FILTER_UNUSED,
90                 MAX_ECORE_TUNN_CLSS,
91                 "inner-IP"
92         },
93         {
94                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
95                 ECORE_FILTER_UNUSED,
96                 MAX_ECORE_TUNN_CLSS,
97                 "IMAC_IVLAN"
98         },
99         {
100                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
101                 ECORE_FILTER_UNUSED,
102                 MAX_ECORE_TUNN_CLSS,
103                 "IMAC_IVLAN_TENID"
104         },
105         {
106                 RTE_TUNNEL_FILTER_IMAC_TENID,
107                 ECORE_FILTER_UNUSED,
108                 MAX_ECORE_TUNN_CLSS,
109                 "IMAC_TENID"
110         },
111         {
112                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
113                 ECORE_FILTER_UNUSED,
114                 MAX_ECORE_TUNN_CLSS,
115                 "OMAC_TENID_IMAC"
116         },
117 };
118
119 #define IP_VERSION                              (0x40)
120 #define IP_HDRLEN                               (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
124 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
125 /* Sum of length of header types of L2, L3, L4.
126  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
127  * L3 : ipv6_hdr
128  * L4 : tcp_hdr
129  */
130 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
131
132 static inline bool qede_valid_flow(uint16_t flow_type)
133 {
134         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
135                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
136                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
137                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
138 }
139
140 static uint16_t
141 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
142                         struct qede_arfs_entry *arfs,
143                         void *buff,
144                         struct ecore_arfs_config_params *params);
145
146 /* Note: Flowdir support is only partial.
147  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
148  * Parameters like pballoc/status fields are irrelevant here.
149  */
150 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
151 {
152         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
153         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
154         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
155
156         /* check FDIR modes */
157         switch (fdir->mode) {
158         case RTE_FDIR_MODE_NONE:
159                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
160                 DP_INFO(edev, "flowdir is disabled\n");
161         break;
162         case RTE_FDIR_MODE_PERFECT:
163                 if (ECORE_IS_CMT(edev)) {
164                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
165                         qdev->arfs_info.arfs.mode =
166                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
167                         return -ENOTSUP;
168                 }
169                 qdev->arfs_info.arfs.mode =
170                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
171                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
172         break;
173         case RTE_FDIR_MODE_PERFECT_TUNNEL:
174         case RTE_FDIR_MODE_SIGNATURE:
175         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
176                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
177                 return -ENOTSUP;
178         }
179
180         return 0;
181 }
182
183 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
184 {
185         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
186         struct qede_arfs_entry *tmp = NULL;
187
188         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
189                 if (tmp) {
190                         if (tmp->mz)
191                                 rte_memzone_free(tmp->mz);
192                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
193                                      qede_arfs_entry, list);
194                         rte_free(tmp);
195                 }
196         }
197 }
198
199 static int
200 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
201                          struct rte_eth_fdir_filter *fdir,
202                          struct qede_arfs_entry *arfs)
203 {
204         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
205         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
206         struct rte_eth_fdir_input *input;
207
208         static const uint8_t next_proto[] = {
209                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
210                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
211                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
212                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
213         };
214
215         input = &fdir->input;
216
217         DP_INFO(edev, "flow_type %d\n", input->flow_type);
218
219         switch (input->flow_type) {
220         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
221         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
222                 /* fill the common ip header */
223                 arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
224                 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
225                 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
226                 arfs->tuple.ip_proto = next_proto[input->flow_type];
227
228                 /* UDP */
229                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
230                         arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
231                         arfs->tuple.src_port = input->flow.udp4_flow.src_port;
232                 } else { /* TCP */
233                         arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
234                         arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
235                 }
236                 break;
237         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
238         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
239                 arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
240                 arfs->tuple.ip_proto = next_proto[input->flow_type];
241                 rte_memcpy(arfs->tuple.dst_ipv6,
242                            &input->flow.ipv6_flow.dst_ip,
243                            IPV6_ADDR_LEN);
244                 rte_memcpy(arfs->tuple.src_ipv6,
245                            &input->flow.ipv6_flow.src_ip,
246                            IPV6_ADDR_LEN);
247
248                 /* UDP */
249                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
250                         arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
251                         arfs->tuple.src_port = input->flow.udp6_flow.src_port;
252                 } else { /* TCP */
253                         arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
254                         arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
255                 }
256                 break;
257         default:
258                 DP_ERR(edev, "Unsupported flow_type %u\n",
259                        input->flow_type);
260                 return -ENOTSUP;
261         }
262
263         arfs->rx_queue = fdir->action.rx_queue;
264         return 0;
265 }
266
267 static int
268 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
269                         struct qede_arfs_entry *arfs,
270                         bool add)
271 {
272         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
273         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
274         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
275         struct qede_arfs_entry *tmp = NULL;
276         const struct rte_memzone *mz;
277         struct ecore_hwfn *p_hwfn;
278         enum _ecore_status_t rc;
279         uint16_t pkt_len;
280         void *pkt;
281
282         if (add) {
283                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
284                         DP_ERR(edev, "Reached max flowdir filter limit\n");
285                         return -EINVAL;
286                 }
287         }
288
289         /* soft_id could have been used as memzone string, but soft_id is
290          * not currently used so it has no significance.
291          */
292         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
293                  (unsigned long)rte_get_timer_cycles());
294         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
295                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
296         if (!mz) {
297                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
298                        rte_strerror(rte_errno));
299                 return -rte_errno;
300         }
301
302         pkt = mz->addr;
303         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
304         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
305                                           &qdev->arfs_info.arfs);
306         if (pkt_len == 0) {
307                 rc = -EINVAL;
308                 goto err1;
309         }
310
311         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
312         if (add) {
313                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
314                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
315                                 DP_INFO(edev, "flowdir filter exist\n");
316                                 rc = -EEXIST;
317                                 goto err1;
318                         }
319                 }
320         } else {
321                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
322                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
323                                 break;
324                 }
325                 if (!tmp) {
326                         DP_ERR(edev, "flowdir filter does not exist\n");
327                         rc = -EEXIST;
328                         goto err1;
329                 }
330         }
331         p_hwfn = ECORE_LEADING_HWFN(edev);
332         if (add) {
333                 if (qdev->arfs_info.arfs.mode ==
334                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
335                         /* Force update */
336                         eth_dev->data->dev_conf.fdir_conf.mode =
337                                                 RTE_FDIR_MODE_PERFECT;
338                         qdev->arfs_info.arfs.mode =
339                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
340                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
341                 }
342                 /* Enable ARFS searcher with updated flow_types */
343                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
344                                           &qdev->arfs_info.arfs);
345         }
346         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
347         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
348                                                (dma_addr_t)mz->iova,
349                                                pkt_len,
350                                                arfs->rx_queue,
351                                                0, add);
352         if (rc == ECORE_SUCCESS) {
353                 if (add) {
354                         arfs->pkt_len = pkt_len;
355                         arfs->mz = mz;
356                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
357                                           arfs, list);
358                         qdev->arfs_info.filter_count++;
359                         DP_INFO(edev, "flowdir filter added, count = %d\n",
360                                 qdev->arfs_info.filter_count);
361                 } else {
362                         rte_memzone_free(tmp->mz);
363                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
364                                      qede_arfs_entry, list);
365                         rte_free(tmp); /* the node deleted */
366                         rte_memzone_free(mz); /* temp node allocated */
367                         qdev->arfs_info.filter_count--;
368                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
369                                 qdev->arfs_info.filter_count);
370                 }
371         } else {
372                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
373                        rc, qdev->arfs_info.filter_count);
374         }
375
376         /* Disable ARFS searcher if there are no more filters */
377         if (qdev->arfs_info.filter_count == 0) {
378                 memset(&qdev->arfs_info.arfs, 0,
379                        sizeof(struct ecore_arfs_config_params));
380                 DP_INFO(edev, "Disabling flowdir\n");
381                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
382                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
383                                           &qdev->arfs_info.arfs);
384         }
385         return 0;
386
387 err1:
388         rte_memzone_free(mz);
389         return rc;
390 }
391
392 static int
393 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
394                             struct rte_eth_fdir_filter *fdir_filter,
395                             bool add)
396 {
397         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
398         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
399         struct qede_arfs_entry *arfs = NULL;
400         int rc = 0;
401
402         arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
403                                   RTE_CACHE_LINE_SIZE);
404         if (!arfs) {
405                 DP_ERR(edev, "Did not allocate memory for arfs\n");
406                 return -ENOMEM;
407         }
408
409         rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
410         if (rc < 0)
411                 return rc;
412
413         rc = qede_config_arfs_filter(eth_dev, arfs, add);
414         if (rc < 0)
415                 rte_free(arfs);
416
417         return rc;
418 }
419
420 static int
421 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
422                      struct rte_eth_fdir_filter *fdir,
423                      bool add)
424 {
425         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
426         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
427
428         if (!qede_valid_flow(fdir->input.flow_type)) {
429                 DP_ERR(edev, "invalid flow_type input\n");
430                 return -EINVAL;
431         }
432
433         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
434                 DP_ERR(edev, "invalid queue number %u\n",
435                        fdir->action.rx_queue);
436                 return -EINVAL;
437         }
438
439         if (fdir->input.flow_ext.is_vf) {
440                 DP_ERR(edev, "flowdir is not supported over VF\n");
441                 return -EINVAL;
442         }
443
444         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
445 }
446
447 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
448 static uint16_t
449 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
450                         struct qede_arfs_entry *arfs,
451                         void *buff,
452                         struct ecore_arfs_config_params *params)
453
454 {
455         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
456         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
457         uint16_t *ether_type;
458         uint8_t *raw_pkt;
459         struct ipv4_hdr *ip;
460         struct ipv6_hdr *ip6;
461         struct udp_hdr *udp;
462         struct tcp_hdr *tcp;
463         uint16_t len;
464
465         raw_pkt = (uint8_t *)buff;
466
467         len =  2 * sizeof(struct ether_addr);
468         raw_pkt += 2 * sizeof(struct ether_addr);
469         ether_type = (uint16_t *)raw_pkt;
470         raw_pkt += sizeof(uint16_t);
471         len += sizeof(uint16_t);
472
473         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
474         switch (arfs->tuple.eth_proto) {
475         case ETHER_TYPE_IPv4:
476                 ip = (struct ipv4_hdr *)raw_pkt;
477                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
478                 ip->total_length = sizeof(struct ipv4_hdr);
479                 ip->next_proto_id = arfs->tuple.ip_proto;
480                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
481                 ip->dst_addr = arfs->tuple.dst_ipv4;
482                 ip->src_addr = arfs->tuple.src_ipv4;
483                 len += sizeof(struct ipv4_hdr);
484                 params->ipv4 = true;
485
486                 raw_pkt = (uint8_t *)buff;
487                 /* UDP */
488                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
489                         udp = (struct udp_hdr *)(raw_pkt + len);
490                         udp->dst_port = arfs->tuple.dst_port;
491                         udp->src_port = arfs->tuple.src_port;
492                         udp->dgram_len = sizeof(struct udp_hdr);
493                         len += sizeof(struct udp_hdr);
494                         /* adjust ip total_length */
495                         ip->total_length += sizeof(struct udp_hdr);
496                         params->udp = true;
497                 } else { /* TCP */
498                         tcp = (struct tcp_hdr *)(raw_pkt + len);
499                         tcp->src_port = arfs->tuple.src_port;
500                         tcp->dst_port = arfs->tuple.dst_port;
501                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
502                         len += sizeof(struct tcp_hdr);
503                         /* adjust ip total_length */
504                         ip->total_length += sizeof(struct tcp_hdr);
505                         params->tcp = true;
506                 }
507                 break;
508         case ETHER_TYPE_IPv6:
509                 ip6 = (struct ipv6_hdr *)raw_pkt;
510                 ip6->proto = arfs->tuple.ip_proto;
511                 ip6->vtc_flow =
512                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
513
514                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
515                            IPV6_ADDR_LEN);
516                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
517                            IPV6_ADDR_LEN);
518                 len += sizeof(struct ipv6_hdr);
519                 params->ipv6 = true;
520
521                 raw_pkt = (uint8_t *)buff;
522                 /* UDP */
523                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
524                         udp = (struct udp_hdr *)(raw_pkt + len);
525                         udp->src_port = arfs->tuple.src_port;
526                         udp->dst_port = arfs->tuple.dst_port;
527                         len += sizeof(struct udp_hdr);
528                         params->udp = true;
529                 } else { /* TCP */
530                         tcp = (struct tcp_hdr *)(raw_pkt + len);
531                         tcp->src_port = arfs->tuple.src_port;
532                         tcp->dst_port = arfs->tuple.dst_port;
533                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
534                         len += sizeof(struct tcp_hdr);
535                         params->tcp = true;
536                 }
537                 break;
538         default:
539                 DP_ERR(edev, "Unsupported eth_proto %u\n",
540                        arfs->tuple.eth_proto);
541                 return 0;
542         }
543
544         return len;
545 }
546
547 static int
548 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
549                       enum rte_filter_op filter_op,
550                       void *arg)
551 {
552         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
553         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
554         struct rte_eth_fdir_filter *fdir;
555         int ret;
556
557         fdir = (struct rte_eth_fdir_filter *)arg;
558         switch (filter_op) {
559         case RTE_ETH_FILTER_NOP:
560                 /* Typically used to query flowdir support */
561                 if (ECORE_IS_CMT(edev)) {
562                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
563                         return -ENOTSUP;
564                 }
565                 return 0; /* means supported */
566         case RTE_ETH_FILTER_ADD:
567                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
568         break;
569         case RTE_ETH_FILTER_DELETE:
570                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
571         break;
572         case RTE_ETH_FILTER_FLUSH:
573         case RTE_ETH_FILTER_UPDATE:
574         case RTE_ETH_FILTER_INFO:
575                 return -ENOTSUP;
576         break;
577         default:
578                 DP_ERR(edev, "unknown operation %u", filter_op);
579                 ret = -EINVAL;
580         }
581
582         return ret;
583 }
584
585 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
586                             enum rte_filter_op filter_op,
587                             void *arg)
588 {
589         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
590         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
591         struct rte_eth_ntuple_filter *ntuple;
592         struct rte_eth_fdir_filter fdir_entry;
593         struct rte_eth_tcpv4_flow *tcpv4_flow;
594         struct rte_eth_udpv4_flow *udpv4_flow;
595         bool add = false;
596
597         switch (filter_op) {
598         case RTE_ETH_FILTER_NOP:
599                 /* Typically used to query fdir support */
600                 if (ECORE_IS_CMT(edev)) {
601                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
602                         return -ENOTSUP;
603                 }
604                 return 0; /* means supported */
605         case RTE_ETH_FILTER_ADD:
606                 add = true;
607         break;
608         case RTE_ETH_FILTER_DELETE:
609         break;
610         case RTE_ETH_FILTER_INFO:
611         case RTE_ETH_FILTER_GET:
612         case RTE_ETH_FILTER_UPDATE:
613         case RTE_ETH_FILTER_FLUSH:
614         case RTE_ETH_FILTER_SET:
615         case RTE_ETH_FILTER_STATS:
616         case RTE_ETH_FILTER_OP_MAX:
617                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
618                 return -ENOTSUP;
619         }
620         ntuple = (struct rte_eth_ntuple_filter *)arg;
621         /* Internally convert ntuple to fdir entry */
622         memset(&fdir_entry, 0, sizeof(fdir_entry));
623         if (ntuple->proto == IPPROTO_TCP) {
624                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
625                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
626                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
627                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
628                 tcpv4_flow->ip.proto = IPPROTO_TCP;
629                 tcpv4_flow->src_port = ntuple->src_port;
630                 tcpv4_flow->dst_port = ntuple->dst_port;
631         } else {
632                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
633                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
634                 udpv4_flow->ip.src_ip = ntuple->src_ip;
635                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
636                 udpv4_flow->ip.proto = IPPROTO_TCP;
637                 udpv4_flow->src_port = ntuple->src_port;
638                 udpv4_flow->dst_port = ntuple->dst_port;
639         }
640
641         fdir_entry.action.rx_queue = ntuple->queue;
642
643         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
644 }
645
646 static int
647 qede_tunnel_update(struct qede_dev *qdev,
648                    struct ecore_tunnel_info *tunn_info)
649 {
650         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
651         enum _ecore_status_t rc = ECORE_INVAL;
652         struct ecore_hwfn *p_hwfn;
653         struct ecore_ptt *p_ptt;
654         int i;
655
656         for_each_hwfn(edev, i) {
657                 p_hwfn = &edev->hwfns[i];
658                 if (IS_PF(edev)) {
659                         p_ptt = ecore_ptt_acquire(p_hwfn);
660                         if (!p_ptt) {
661                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
662                                 return -EAGAIN;
663                         }
664                 } else {
665                         p_ptt = NULL;
666                 }
667
668                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
669                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
670                 if (IS_PF(edev))
671                         ecore_ptt_release(p_hwfn, p_ptt);
672
673                 if (rc != ECORE_SUCCESS)
674                         break;
675         }
676
677         return rc;
678 }
679
680 static int
681 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
682                   bool enable)
683 {
684         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
685         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
686         enum _ecore_status_t rc = ECORE_INVAL;
687         struct ecore_tunnel_info tunn;
688
689         if (qdev->vxlan.enable == enable)
690                 return ECORE_SUCCESS;
691
692         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
693         tunn.vxlan.b_update_mode = true;
694         tunn.vxlan.b_mode_enabled = enable;
695         tunn.b_update_rx_cls = true;
696         tunn.b_update_tx_cls = true;
697         tunn.vxlan.tun_cls = clss;
698
699         tunn.vxlan_port.b_update_port = true;
700         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
701
702         rc = qede_tunnel_update(qdev, &tunn);
703         if (rc == ECORE_SUCCESS) {
704                 qdev->vxlan.enable = enable;
705                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
706                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
707                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
708         } else {
709                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
710                        tunn.vxlan.tun_cls);
711         }
712
713         return rc;
714 }
715
716 static int
717 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
718                   bool enable)
719 {
720         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
721         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
722         enum _ecore_status_t rc = ECORE_INVAL;
723         struct ecore_tunnel_info tunn;
724
725         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
726         tunn.l2_geneve.b_update_mode = true;
727         tunn.l2_geneve.b_mode_enabled = enable;
728         tunn.ip_geneve.b_update_mode = true;
729         tunn.ip_geneve.b_mode_enabled = enable;
730         tunn.l2_geneve.tun_cls = clss;
731         tunn.ip_geneve.tun_cls = clss;
732         tunn.b_update_rx_cls = true;
733         tunn.b_update_tx_cls = true;
734
735         tunn.geneve_port.b_update_port = true;
736         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
737
738         rc = qede_tunnel_update(qdev, &tunn);
739         if (rc == ECORE_SUCCESS) {
740                 qdev->geneve.enable = enable;
741                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
742                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
743                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
744         } else {
745                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
746                        clss);
747         }
748
749         return rc;
750 }
751
752 static int
753 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
754                   bool enable)
755 {
756         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
757         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
758         enum _ecore_status_t rc = ECORE_INVAL;
759         struct ecore_tunnel_info tunn;
760
761         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
762         tunn.ip_gre.b_update_mode = true;
763         tunn.ip_gre.b_mode_enabled = enable;
764         tunn.ip_gre.tun_cls = clss;
765         tunn.ip_gre.tun_cls = clss;
766         tunn.b_update_rx_cls = true;
767         tunn.b_update_tx_cls = true;
768
769         rc = qede_tunnel_update(qdev, &tunn);
770         if (rc == ECORE_SUCCESS) {
771                 qdev->ipgre.enable = enable;
772                 DP_INFO(edev, "IPGRE is %s\n",
773                         enable ? "enabled" : "disabled");
774         } else {
775                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
776                        clss);
777         }
778
779         return rc;
780 }
781
782 int
783 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
784                       struct rte_eth_udp_tunnel *tunnel_udp)
785 {
786         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
787         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
788         struct ecore_tunnel_info tunn; /* @DPDK */
789         uint16_t udp_port;
790         int rc;
791
792         PMD_INIT_FUNC_TRACE(edev);
793
794         memset(&tunn, 0, sizeof(tunn));
795
796         switch (tunnel_udp->prot_type) {
797         case RTE_TUNNEL_TYPE_VXLAN:
798                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
799                         DP_ERR(edev, "UDP port %u doesn't exist\n",
800                                 tunnel_udp->udp_port);
801                         return ECORE_INVAL;
802                 }
803                 udp_port = 0;
804
805                 tunn.vxlan_port.b_update_port = true;
806                 tunn.vxlan_port.port = udp_port;
807
808                 rc = qede_tunnel_update(qdev, &tunn);
809                 if (rc != ECORE_SUCCESS) {
810                         DP_ERR(edev, "Unable to config UDP port %u\n",
811                                tunn.vxlan_port.port);
812                         return rc;
813                 }
814
815                 qdev->vxlan.udp_port = udp_port;
816                 /* If the request is to delete UDP port and if the number of
817                  * VXLAN filters have reached 0 then VxLAN offload can be be
818                  * disabled.
819                  */
820                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
821                         return qede_vxlan_enable(eth_dev,
822                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
823
824                 break;
825         case RTE_TUNNEL_TYPE_GENEVE:
826                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
827                         DP_ERR(edev, "UDP port %u doesn't exist\n",
828                                 tunnel_udp->udp_port);
829                         return ECORE_INVAL;
830                 }
831
832                 udp_port = 0;
833
834                 tunn.geneve_port.b_update_port = true;
835                 tunn.geneve_port.port = udp_port;
836
837                 rc = qede_tunnel_update(qdev, &tunn);
838                 if (rc != ECORE_SUCCESS) {
839                         DP_ERR(edev, "Unable to config UDP port %u\n",
840                                tunn.vxlan_port.port);
841                         return rc;
842                 }
843
844                 qdev->vxlan.udp_port = udp_port;
845                 /* If the request is to delete UDP port and if the number of
846                  * GENEVE filters have reached 0 then GENEVE offload can be be
847                  * disabled.
848                  */
849                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
850                         return qede_geneve_enable(eth_dev,
851                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
852
853                 break;
854
855         default:
856                 return ECORE_INVAL;
857         }
858
859         return 0;
860 }
861
862 int
863 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
864                       struct rte_eth_udp_tunnel *tunnel_udp)
865 {
866         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
867         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
868         struct ecore_tunnel_info tunn; /* @DPDK */
869         uint16_t udp_port;
870         int rc;
871
872         PMD_INIT_FUNC_TRACE(edev);
873
874         memset(&tunn, 0, sizeof(tunn));
875
876         switch (tunnel_udp->prot_type) {
877         case RTE_TUNNEL_TYPE_VXLAN:
878                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
879                         DP_INFO(edev,
880                                 "UDP port %u for VXLAN was already configured\n",
881                                 tunnel_udp->udp_port);
882                         return ECORE_SUCCESS;
883                 }
884
885                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
886                  * it was not enabled while adding VXLAN filter before UDP port
887                  * update.
888                  */
889                 if (!qdev->vxlan.enable) {
890                         rc = qede_vxlan_enable(eth_dev,
891                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
892                         if (rc != ECORE_SUCCESS) {
893                                 DP_ERR(edev, "Failed to enable VXLAN "
894                                         "prior to updating UDP port\n");
895                                 return rc;
896                         }
897                 }
898                 udp_port = tunnel_udp->udp_port;
899
900                 tunn.vxlan_port.b_update_port = true;
901                 tunn.vxlan_port.port = udp_port;
902
903                 rc = qede_tunnel_update(qdev, &tunn);
904                 if (rc != ECORE_SUCCESS) {
905                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
906                                udp_port);
907                         return rc;
908                 }
909
910                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
911
912                 qdev->vxlan.udp_port = udp_port;
913                 break;
914         case RTE_TUNNEL_TYPE_GENEVE:
915                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
916                         DP_INFO(edev,
917                                 "UDP port %u for GENEVE was already configured\n",
918                                 tunnel_udp->udp_port);
919                         return ECORE_SUCCESS;
920                 }
921
922                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
923                  * it was not enabled while adding GENEVE filter before UDP port
924                  * update.
925                  */
926                 if (!qdev->geneve.enable) {
927                         rc = qede_geneve_enable(eth_dev,
928                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
929                         if (rc != ECORE_SUCCESS) {
930                                 DP_ERR(edev, "Failed to enable GENEVE "
931                                         "prior to updating UDP port\n");
932                                 return rc;
933                         }
934                 }
935                 udp_port = tunnel_udp->udp_port;
936
937                 tunn.geneve_port.b_update_port = true;
938                 tunn.geneve_port.port = udp_port;
939
940                 rc = qede_tunnel_update(qdev, &tunn);
941                 if (rc != ECORE_SUCCESS) {
942                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
943                                udp_port);
944                         return rc;
945                 }
946
947                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
948
949                 qdev->geneve.udp_port = udp_port;
950                 break;
951         default:
952                 return ECORE_INVAL;
953         }
954
955         return 0;
956 }
957
958 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
959                                        uint32_t *clss, char *str)
960 {
961         uint16_t j;
962         *clss = MAX_ECORE_TUNN_CLSS;
963
964         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
965                 if (filter == qede_tunn_types[j].rte_filter_type) {
966                         *type = qede_tunn_types[j].qede_type;
967                         *clss = qede_tunn_types[j].qede_tunn_clss;
968                         strcpy(str, qede_tunn_types[j].string);
969                         return;
970                 }
971         }
972 }
973
974 static int
975 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
976                               const struct rte_eth_tunnel_filter_conf *conf,
977                               uint32_t type)
978 {
979         /* Init commmon ucast params first */
980         qede_set_ucast_cmn_params(ucast);
981
982         /* Copy out the required fields based on classification type */
983         ucast->type = type;
984
985         switch (type) {
986         case ECORE_FILTER_VNI:
987                 ucast->vni = conf->tenant_id;
988         break;
989         case ECORE_FILTER_INNER_VLAN:
990                 ucast->vlan = conf->inner_vlan;
991         break;
992         case ECORE_FILTER_MAC:
993                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
994                        ETHER_ADDR_LEN);
995         break;
996         case ECORE_FILTER_INNER_MAC:
997                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
998                        ETHER_ADDR_LEN);
999         break;
1000         case ECORE_FILTER_MAC_VNI_PAIR:
1001                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1002                         ETHER_ADDR_LEN);
1003                 ucast->vni = conf->tenant_id;
1004         break;
1005         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1006                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1007                         ETHER_ADDR_LEN);
1008                 ucast->vni = conf->tenant_id;
1009         break;
1010         case ECORE_FILTER_INNER_PAIR:
1011                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1012                         ETHER_ADDR_LEN);
1013                 ucast->vlan = conf->inner_vlan;
1014         break;
1015         default:
1016                 return -EINVAL;
1017         }
1018
1019         return ECORE_SUCCESS;
1020 }
1021
1022 static int
1023 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1024                          const struct rte_eth_tunnel_filter_conf *conf,
1025                          __attribute__((unused)) enum rte_filter_op filter_op,
1026                          enum ecore_tunn_clss *clss,
1027                          bool add)
1028 {
1029         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1030         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1031         struct ecore_filter_ucast ucast = {0};
1032         enum ecore_filter_ucast_type type;
1033         uint16_t filter_type = 0;
1034         char str[80];
1035         int rc;
1036
1037         filter_type = conf->filter_type;
1038         /* Determine if the given filter classification is supported */
1039         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
1040         if (*clss == MAX_ECORE_TUNN_CLSS) {
1041                 DP_ERR(edev, "Unsupported filter type\n");
1042                 return -EINVAL;
1043         }
1044         /* Init tunnel ucast params */
1045         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1046         if (rc != ECORE_SUCCESS) {
1047                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
1048                 conf->filter_type);
1049                 return rc;
1050         }
1051         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1052                 str, filter_op, ucast.type);
1053
1054         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1055
1056         /* Skip MAC/VLAN if filter is based on VNI */
1057         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1058                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1059                 if (rc == 0 && add) {
1060                         /* Enable accept anyvlan */
1061                         qede_config_accept_any_vlan(qdev, true);
1062                 }
1063         } else {
1064                 rc = qede_ucast_filter(eth_dev, &ucast, add);
1065                 if (rc == 0)
1066                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1067                                             ECORE_SPQ_MODE_CB, NULL);
1068         }
1069
1070         return rc;
1071 }
1072
1073 static int
1074 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1075                  enum rte_eth_tunnel_type tunn_type, bool enable)
1076 {
1077         int rc = -EINVAL;
1078
1079         switch (tunn_type) {
1080         case RTE_TUNNEL_TYPE_VXLAN:
1081                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1082                 break;
1083         case RTE_TUNNEL_TYPE_GENEVE:
1084                 rc = qede_geneve_enable(eth_dev, clss, enable);
1085                 break;
1086         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1087                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1088                 break;
1089         default:
1090                 rc = -EINVAL;
1091                 break;
1092         }
1093
1094         return rc;
1095 }
1096
1097 static int
1098 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1099                         enum rte_filter_op filter_op,
1100                         const struct rte_eth_tunnel_filter_conf *conf)
1101 {
1102         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1103         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1104         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1105         bool add;
1106         int rc;
1107
1108         PMD_INIT_FUNC_TRACE(edev);
1109
1110         switch (filter_op) {
1111         case RTE_ETH_FILTER_ADD:
1112                 add = true;
1113                 break;
1114         case RTE_ETH_FILTER_DELETE:
1115                 add = false;
1116                 break;
1117         default:
1118                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1119                 return -EINVAL;
1120         }
1121
1122         if (IS_VF(edev))
1123                 return qede_tunn_enable(eth_dev,
1124                                         ECORE_TUNN_CLSS_MAC_VLAN,
1125                                         conf->tunnel_type, add);
1126
1127         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1128         if (rc != ECORE_SUCCESS)
1129                 return rc;
1130
1131         if (add) {
1132                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1133                         qdev->vxlan.num_filters++;
1134                         qdev->vxlan.filter_type = conf->filter_type;
1135                 } else { /* GENEVE */
1136                         qdev->geneve.num_filters++;
1137                         qdev->geneve.filter_type = conf->filter_type;
1138                 }
1139
1140                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1141                     !qdev->ipgre.enable)
1142                         return qede_tunn_enable(eth_dev, clss,
1143                                                 conf->tunnel_type,
1144                                                 true);
1145         } else {
1146                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1147                         qdev->vxlan.num_filters--;
1148                 else /*GENEVE*/
1149                         qdev->geneve.num_filters--;
1150
1151                 /* Disable VXLAN if VXLAN filters become 0 */
1152                 if (qdev->vxlan.num_filters == 0 ||
1153                     qdev->geneve.num_filters == 0)
1154                         return qede_tunn_enable(eth_dev, clss,
1155                                                 conf->tunnel_type,
1156                                                 false);
1157         }
1158
1159         return 0;
1160 }
1161
1162 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1163                          enum rte_filter_type filter_type,
1164                          enum rte_filter_op filter_op,
1165                          void *arg)
1166 {
1167         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1168         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1169         struct rte_eth_tunnel_filter_conf *filter_conf =
1170                         (struct rte_eth_tunnel_filter_conf *)arg;
1171
1172         switch (filter_type) {
1173         case RTE_ETH_FILTER_TUNNEL:
1174                 switch (filter_conf->tunnel_type) {
1175                 case RTE_TUNNEL_TYPE_VXLAN:
1176                 case RTE_TUNNEL_TYPE_GENEVE:
1177                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1178                         DP_INFO(edev,
1179                                 "Packet steering to the specified Rx queue"
1180                                 " is not supported with UDP tunneling");
1181                         return(qede_tunn_filter_config(eth_dev, filter_op,
1182                                                       filter_conf));
1183                 case RTE_TUNNEL_TYPE_TEREDO:
1184                 case RTE_TUNNEL_TYPE_NVGRE:
1185                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1186                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1187                                 filter_conf->tunnel_type);
1188                         return -EINVAL;
1189                 case RTE_TUNNEL_TYPE_NONE:
1190                 default:
1191                         return 0;
1192                 }
1193                 break;
1194         case RTE_ETH_FILTER_FDIR:
1195                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1196         case RTE_ETH_FILTER_NTUPLE:
1197                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1198         case RTE_ETH_FILTER_MACVLAN:
1199         case RTE_ETH_FILTER_ETHERTYPE:
1200         case RTE_ETH_FILTER_FLEXIBLE:
1201         case RTE_ETH_FILTER_SYN:
1202         case RTE_ETH_FILTER_HASH:
1203         case RTE_ETH_FILTER_L2_TUNNEL:
1204         case RTE_ETH_FILTER_MAX:
1205         default:
1206                 DP_ERR(edev, "Unsupported filter type %d\n",
1207                         filter_type);
1208                 return -EINVAL;
1209         }
1210
1211         return 0;
1212 }
1213
1214 /* RTE_FLOW */