a9870338aac6505b5f320cff80964efeb94f9639
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12
13 #include "qede_ethdev.h"
14
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17         uint16_t rte_filter_type;
18         enum ecore_filter_ucast_type qede_type;
19         enum ecore_tunn_clss qede_tunn_clss;
20         const char *string;
21 } qede_tunn_types[] = {
22         {
23                 ETH_TUNNEL_FILTER_OMAC,
24                 ECORE_FILTER_MAC,
25                 ECORE_TUNN_CLSS_MAC_VLAN,
26                 "outer-mac"
27         },
28         {
29                 ETH_TUNNEL_FILTER_TENID,
30                 ECORE_FILTER_VNI,
31                 ECORE_TUNN_CLSS_MAC_VNI,
32                 "vni"
33         },
34         {
35                 ETH_TUNNEL_FILTER_IMAC,
36                 ECORE_FILTER_INNER_MAC,
37                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38                 "inner-mac"
39         },
40         {
41                 ETH_TUNNEL_FILTER_IVLAN,
42                 ECORE_FILTER_INNER_VLAN,
43                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44                 "inner-vlan"
45         },
46         {
47                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48                 ECORE_FILTER_MAC_VNI_PAIR,
49                 ECORE_TUNN_CLSS_MAC_VNI,
50                 "outer-mac and vni"
51         },
52         {
53                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
54                 ECORE_FILTER_UNUSED,
55                 MAX_ECORE_TUNN_CLSS,
56                 "outer-mac and inner-mac"
57         },
58         {
59                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
60                 ECORE_FILTER_UNUSED,
61                 MAX_ECORE_TUNN_CLSS,
62                 "outer-mac and inner-vlan"
63         },
64         {
65                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
68                 "vni and inner-mac",
69         },
70         {
71                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
72                 ECORE_FILTER_UNUSED,
73                 MAX_ECORE_TUNN_CLSS,
74                 "vni and inner-vlan",
75         },
76         {
77                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78                 ECORE_FILTER_INNER_PAIR,
79                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80                 "inner-mac and inner-vlan",
81         },
82         {
83                 ETH_TUNNEL_FILTER_OIP,
84                 ECORE_FILTER_UNUSED,
85                 MAX_ECORE_TUNN_CLSS,
86                 "outer-IP"
87         },
88         {
89                 ETH_TUNNEL_FILTER_IIP,
90                 ECORE_FILTER_UNUSED,
91                 MAX_ECORE_TUNN_CLSS,
92                 "inner-IP"
93         },
94         {
95                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
96                 ECORE_FILTER_UNUSED,
97                 MAX_ECORE_TUNN_CLSS,
98                 "IMAC_IVLAN"
99         },
100         {
101                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102                 ECORE_FILTER_UNUSED,
103                 MAX_ECORE_TUNN_CLSS,
104                 "IMAC_IVLAN_TENID"
105         },
106         {
107                 RTE_TUNNEL_FILTER_IMAC_TENID,
108                 ECORE_FILTER_UNUSED,
109                 MAX_ECORE_TUNN_CLSS,
110                 "IMAC_TENID"
111         },
112         {
113                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
114                 ECORE_FILTER_UNUSED,
115                 MAX_ECORE_TUNN_CLSS,
116                 "OMAC_TENID_IMAC"
117         },
118 };
119
120 #define IP_VERSION                              (0x40)
121 #define IP_HDRLEN                               (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
132
133 static inline bool qede_valid_flow(uint16_t flow_type)
134 {
135         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
139 }
140
141 static uint16_t
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143                         struct qede_arfs_entry *arfs,
144                         void *buff,
145                         struct ecore_arfs_config_params *params);
146
147 /* Note: Flowdir support is only partial.
148  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149  * Parameters like pballoc/status fields are irrelevant here.
150  */
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
152 {
153         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
156
157         /* check FDIR modes */
158         switch (fdir->mode) {
159         case RTE_FDIR_MODE_NONE:
160                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161                 DP_INFO(edev, "flowdir is disabled\n");
162         break;
163         case RTE_FDIR_MODE_PERFECT:
164                 if (ECORE_IS_CMT(edev)) {
165                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166                         qdev->arfs_info.arfs.mode =
167                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
168                         return -ENOTSUP;
169                 }
170                 qdev->arfs_info.arfs.mode =
171                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
173         break;
174         case RTE_FDIR_MODE_PERFECT_TUNNEL:
175         case RTE_FDIR_MODE_SIGNATURE:
176         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
178                 return -ENOTSUP;
179         }
180
181         return 0;
182 }
183
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
185 {
186         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187         struct qede_arfs_entry *tmp = NULL;
188
189         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
190                 if (tmp) {
191                         if (tmp->mz)
192                                 rte_memzone_free(tmp->mz);
193                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194                                      qede_arfs_entry, list);
195                         rte_free(tmp);
196                 }
197         }
198 }
199
200 static int
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202                          struct rte_eth_fdir_filter *fdir,
203                          struct qede_arfs_entry *arfs)
204 {
205         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207         struct rte_eth_fdir_input *input;
208
209         static const uint8_t next_proto[] = {
210                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
214         };
215
216         input = &fdir->input;
217
218         DP_INFO(edev, "flow_type %d\n", input->flow_type);
219
220         switch (input->flow_type) {
221         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223                 /* fill the common ip header */
224                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
225                 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226                 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227                 arfs->tuple.ip_proto = next_proto[input->flow_type];
228
229                 /* UDP */
230                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231                         arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232                         arfs->tuple.src_port = input->flow.udp4_flow.src_port;
233                 } else { /* TCP */
234                         arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235                         arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
236                 }
237                 break;
238         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
241                 arfs->tuple.ip_proto = next_proto[input->flow_type];
242                 rte_memcpy(arfs->tuple.dst_ipv6,
243                            &input->flow.ipv6_flow.dst_ip,
244                            IPV6_ADDR_LEN);
245                 rte_memcpy(arfs->tuple.src_ipv6,
246                            &input->flow.ipv6_flow.src_ip,
247                            IPV6_ADDR_LEN);
248
249                 /* UDP */
250                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251                         arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252                         arfs->tuple.src_port = input->flow.udp6_flow.src_port;
253                 } else { /* TCP */
254                         arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255                         arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
256                 }
257                 break;
258         default:
259                 DP_ERR(edev, "Unsupported flow_type %u\n",
260                        input->flow_type);
261                 return -ENOTSUP;
262         }
263
264         arfs->rx_queue = fdir->action.rx_queue;
265         return 0;
266 }
267
268 static int
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270                         struct qede_arfs_entry *arfs,
271                         bool add)
272 {
273         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275         struct ecore_ntuple_filter_params params;
276         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
277         struct qede_arfs_entry *tmp = NULL;
278         const struct rte_memzone *mz;
279         struct ecore_hwfn *p_hwfn;
280         enum _ecore_status_t rc;
281         uint16_t pkt_len;
282         void *pkt;
283
284         if (add) {
285                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
286                         DP_ERR(edev, "Reached max flowdir filter limit\n");
287                         return -EINVAL;
288                 }
289         }
290
291         /* soft_id could have been used as memzone string, but soft_id is
292          * not currently used so it has no significance.
293          */
294         snprintf(mz_name, sizeof(mz_name), "%lx",
295                  (unsigned long)rte_get_timer_cycles());
296         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
297                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
298         if (!mz) {
299                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
300                        rte_strerror(rte_errno));
301                 return -rte_errno;
302         }
303
304         pkt = mz->addr;
305         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
306         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
307                                           &qdev->arfs_info.arfs);
308         if (pkt_len == 0) {
309                 rc = -EINVAL;
310                 goto err1;
311         }
312
313         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
314         if (add) {
315                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
316                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
317                                 DP_INFO(edev, "flowdir filter exist\n");
318                                 rc = -EEXIST;
319                                 goto err1;
320                         }
321                 }
322         } else {
323                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
324                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
325                                 break;
326                 }
327                 if (!tmp) {
328                         DP_ERR(edev, "flowdir filter does not exist\n");
329                         rc = -EEXIST;
330                         goto err1;
331                 }
332         }
333         p_hwfn = ECORE_LEADING_HWFN(edev);
334         if (add) {
335                 if (qdev->arfs_info.arfs.mode ==
336                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
337                         /* Force update */
338                         eth_dev->data->dev_conf.fdir_conf.mode =
339                                                 RTE_FDIR_MODE_PERFECT;
340                         qdev->arfs_info.arfs.mode =
341                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
342                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
343                 }
344                 /* Enable ARFS searcher with updated flow_types */
345                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
346                                           &qdev->arfs_info.arfs);
347         }
348
349         memset(&params, 0, sizeof(params));
350         params.addr = (dma_addr_t)mz->iova;
351         params.length = pkt_len;
352         params.qid = arfs->rx_queue;
353         params.vport_id = 0;
354         params.b_is_add = add;
355         params.b_is_drop = arfs->is_drop;
356
357         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
358         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
359                                                &params);
360         if (rc == ECORE_SUCCESS) {
361                 if (add) {
362                         arfs->pkt_len = pkt_len;
363                         arfs->mz = mz;
364                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
365                                           arfs, list);
366                         qdev->arfs_info.filter_count++;
367                         DP_INFO(edev, "flowdir filter added, count = %d\n",
368                                 qdev->arfs_info.filter_count);
369                 } else {
370                         rte_memzone_free(tmp->mz);
371                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
372                                      qede_arfs_entry, list);
373                         rte_free(tmp); /* the node deleted */
374                         rte_memzone_free(mz); /* temp node allocated */
375                         qdev->arfs_info.filter_count--;
376                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
377                                 qdev->arfs_info.filter_count);
378                 }
379         } else {
380                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
381                        rc, qdev->arfs_info.filter_count);
382         }
383
384         /* Disable ARFS searcher if there are no more filters */
385         if (qdev->arfs_info.filter_count == 0) {
386                 memset(&qdev->arfs_info.arfs, 0,
387                        sizeof(struct ecore_arfs_config_params));
388                 DP_INFO(edev, "Disabling flowdir\n");
389                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
390                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
391                                           &qdev->arfs_info.arfs);
392         }
393         return 0;
394
395 err1:
396         rte_memzone_free(mz);
397         return rc;
398 }
399
400 static int
401 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
402                             struct rte_eth_fdir_filter *fdir_filter,
403                             bool add)
404 {
405         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
406         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
407         struct qede_arfs_entry *arfs = NULL;
408         int rc = 0;
409
410         arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
411                                   RTE_CACHE_LINE_SIZE);
412         if (!arfs) {
413                 DP_ERR(edev, "Did not allocate memory for arfs\n");
414                 return -ENOMEM;
415         }
416
417         rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
418         if (rc < 0)
419                 return rc;
420
421         rc = qede_config_arfs_filter(eth_dev, arfs, add);
422         if (rc < 0)
423                 rte_free(arfs);
424
425         return rc;
426 }
427
428 static int
429 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
430                      struct rte_eth_fdir_filter *fdir,
431                      bool add)
432 {
433         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
435
436         if (!qede_valid_flow(fdir->input.flow_type)) {
437                 DP_ERR(edev, "invalid flow_type input\n");
438                 return -EINVAL;
439         }
440
441         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
442                 DP_ERR(edev, "invalid queue number %u\n",
443                        fdir->action.rx_queue);
444                 return -EINVAL;
445         }
446
447         if (fdir->input.flow_ext.is_vf) {
448                 DP_ERR(edev, "flowdir is not supported over VF\n");
449                 return -EINVAL;
450         }
451
452         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
453 }
454
455 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
456 static uint16_t
457 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
458                         struct qede_arfs_entry *arfs,
459                         void *buff,
460                         struct ecore_arfs_config_params *params)
461
462 {
463         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465         uint16_t *ether_type;
466         uint8_t *raw_pkt;
467         struct rte_ipv4_hdr *ip;
468         struct rte_ipv6_hdr *ip6;
469         struct rte_udp_hdr *udp;
470         struct rte_tcp_hdr *tcp;
471         uint16_t len;
472
473         raw_pkt = (uint8_t *)buff;
474
475         len =  2 * sizeof(struct rte_ether_addr);
476         raw_pkt += 2 * sizeof(struct rte_ether_addr);
477         ether_type = (uint16_t *)raw_pkt;
478         raw_pkt += sizeof(uint16_t);
479         len += sizeof(uint16_t);
480
481         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
482         switch (arfs->tuple.eth_proto) {
483         case RTE_ETHER_TYPE_IPV4:
484                 ip = (struct rte_ipv4_hdr *)raw_pkt;
485                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
486                 ip->total_length = sizeof(struct rte_ipv4_hdr);
487                 ip->next_proto_id = arfs->tuple.ip_proto;
488                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
489                 ip->dst_addr = arfs->tuple.dst_ipv4;
490                 ip->src_addr = arfs->tuple.src_ipv4;
491                 len += sizeof(struct rte_ipv4_hdr);
492                 params->ipv4 = true;
493
494                 raw_pkt = (uint8_t *)buff;
495                 /* UDP */
496                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
497                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
498                         udp->dst_port = arfs->tuple.dst_port;
499                         udp->src_port = arfs->tuple.src_port;
500                         udp->dgram_len = sizeof(struct rte_udp_hdr);
501                         len += sizeof(struct rte_udp_hdr);
502                         /* adjust ip total_length */
503                         ip->total_length += sizeof(struct rte_udp_hdr);
504                         params->udp = true;
505                 } else { /* TCP */
506                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
507                         tcp->src_port = arfs->tuple.src_port;
508                         tcp->dst_port = arfs->tuple.dst_port;
509                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
510                         len += sizeof(struct rte_tcp_hdr);
511                         /* adjust ip total_length */
512                         ip->total_length += sizeof(struct rte_tcp_hdr);
513                         params->tcp = true;
514                 }
515                 break;
516         case RTE_ETHER_TYPE_IPV6:
517                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
518                 ip6->proto = arfs->tuple.ip_proto;
519                 ip6->vtc_flow =
520                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
521
522                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
523                            IPV6_ADDR_LEN);
524                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
525                            IPV6_ADDR_LEN);
526                 len += sizeof(struct rte_ipv6_hdr);
527                 params->ipv6 = true;
528
529                 raw_pkt = (uint8_t *)buff;
530                 /* UDP */
531                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
532                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
533                         udp->src_port = arfs->tuple.src_port;
534                         udp->dst_port = arfs->tuple.dst_port;
535                         len += sizeof(struct rte_udp_hdr);
536                         params->udp = true;
537                 } else { /* TCP */
538                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
539                         tcp->src_port = arfs->tuple.src_port;
540                         tcp->dst_port = arfs->tuple.dst_port;
541                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
542                         len += sizeof(struct rte_tcp_hdr);
543                         params->tcp = true;
544                 }
545                 break;
546         default:
547                 DP_ERR(edev, "Unsupported eth_proto %u\n",
548                        arfs->tuple.eth_proto);
549                 return 0;
550         }
551
552         return len;
553 }
554
555 static int
556 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
557                       enum rte_filter_op filter_op,
558                       void *arg)
559 {
560         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562         struct rte_eth_fdir_filter *fdir;
563         int ret;
564
565         fdir = (struct rte_eth_fdir_filter *)arg;
566         switch (filter_op) {
567         case RTE_ETH_FILTER_NOP:
568                 /* Typically used to query flowdir support */
569                 if (ECORE_IS_CMT(edev)) {
570                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
571                         return -ENOTSUP;
572                 }
573                 return 0; /* means supported */
574         case RTE_ETH_FILTER_ADD:
575                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
576         break;
577         case RTE_ETH_FILTER_DELETE:
578                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
579         break;
580         case RTE_ETH_FILTER_FLUSH:
581         case RTE_ETH_FILTER_UPDATE:
582         case RTE_ETH_FILTER_INFO:
583                 return -ENOTSUP;
584         break;
585         default:
586                 DP_ERR(edev, "unknown operation %u", filter_op);
587                 ret = -EINVAL;
588         }
589
590         return ret;
591 }
592
593 static int
594 qede_tunnel_update(struct qede_dev *qdev,
595                    struct ecore_tunnel_info *tunn_info)
596 {
597         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
598         enum _ecore_status_t rc = ECORE_INVAL;
599         struct ecore_hwfn *p_hwfn;
600         struct ecore_ptt *p_ptt;
601         int i;
602
603         for_each_hwfn(edev, i) {
604                 p_hwfn = &edev->hwfns[i];
605                 if (IS_PF(edev)) {
606                         p_ptt = ecore_ptt_acquire(p_hwfn);
607                         if (!p_ptt) {
608                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
609                                 return -EAGAIN;
610                         }
611                 } else {
612                         p_ptt = NULL;
613                 }
614
615                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
616                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
617                 if (IS_PF(edev))
618                         ecore_ptt_release(p_hwfn, p_ptt);
619
620                 if (rc != ECORE_SUCCESS)
621                         break;
622         }
623
624         return rc;
625 }
626
627 static int
628 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
629                   bool enable)
630 {
631         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
632         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
633         enum _ecore_status_t rc = ECORE_INVAL;
634         struct ecore_tunnel_info tunn;
635
636         if (qdev->vxlan.enable == enable)
637                 return ECORE_SUCCESS;
638
639         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
640         tunn.vxlan.b_update_mode = true;
641         tunn.vxlan.b_mode_enabled = enable;
642         tunn.b_update_rx_cls = true;
643         tunn.b_update_tx_cls = true;
644         tunn.vxlan.tun_cls = clss;
645
646         tunn.vxlan_port.b_update_port = true;
647         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
648
649         rc = qede_tunnel_update(qdev, &tunn);
650         if (rc == ECORE_SUCCESS) {
651                 qdev->vxlan.enable = enable;
652                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
653                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
654                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
655         } else {
656                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
657                        tunn.vxlan.tun_cls);
658         }
659
660         return rc;
661 }
662
663 static int
664 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
665                   bool enable)
666 {
667         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
668         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
669         enum _ecore_status_t rc = ECORE_INVAL;
670         struct ecore_tunnel_info tunn;
671
672         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
673         tunn.l2_geneve.b_update_mode = true;
674         tunn.l2_geneve.b_mode_enabled = enable;
675         tunn.ip_geneve.b_update_mode = true;
676         tunn.ip_geneve.b_mode_enabled = enable;
677         tunn.l2_geneve.tun_cls = clss;
678         tunn.ip_geneve.tun_cls = clss;
679         tunn.b_update_rx_cls = true;
680         tunn.b_update_tx_cls = true;
681
682         tunn.geneve_port.b_update_port = true;
683         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
684
685         rc = qede_tunnel_update(qdev, &tunn);
686         if (rc == ECORE_SUCCESS) {
687                 qdev->geneve.enable = enable;
688                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
689                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
690                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
691         } else {
692                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
693                        clss);
694         }
695
696         return rc;
697 }
698
699 int
700 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
701                       struct rte_eth_udp_tunnel *tunnel_udp)
702 {
703         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
704         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
705         struct ecore_tunnel_info tunn; /* @DPDK */
706         uint16_t udp_port;
707         int rc;
708
709         PMD_INIT_FUNC_TRACE(edev);
710
711         memset(&tunn, 0, sizeof(tunn));
712
713         switch (tunnel_udp->prot_type) {
714         case RTE_TUNNEL_TYPE_VXLAN:
715                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
716                         DP_ERR(edev, "UDP port %u doesn't exist\n",
717                                 tunnel_udp->udp_port);
718                         return ECORE_INVAL;
719                 }
720                 udp_port = 0;
721
722                 tunn.vxlan_port.b_update_port = true;
723                 tunn.vxlan_port.port = udp_port;
724
725                 rc = qede_tunnel_update(qdev, &tunn);
726                 if (rc != ECORE_SUCCESS) {
727                         DP_ERR(edev, "Unable to config UDP port %u\n",
728                                tunn.vxlan_port.port);
729                         return rc;
730                 }
731
732                 qdev->vxlan.udp_port = udp_port;
733                 /* If the request is to delete UDP port and if the number of
734                  * VXLAN filters have reached 0 then VxLAN offload can be be
735                  * disabled.
736                  */
737                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
738                         return qede_vxlan_enable(eth_dev,
739                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
740
741                 break;
742         case RTE_TUNNEL_TYPE_GENEVE:
743                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
744                         DP_ERR(edev, "UDP port %u doesn't exist\n",
745                                 tunnel_udp->udp_port);
746                         return ECORE_INVAL;
747                 }
748
749                 udp_port = 0;
750
751                 tunn.geneve_port.b_update_port = true;
752                 tunn.geneve_port.port = udp_port;
753
754                 rc = qede_tunnel_update(qdev, &tunn);
755                 if (rc != ECORE_SUCCESS) {
756                         DP_ERR(edev, "Unable to config UDP port %u\n",
757                                tunn.vxlan_port.port);
758                         return rc;
759                 }
760
761                 qdev->vxlan.udp_port = udp_port;
762                 /* If the request is to delete UDP port and if the number of
763                  * GENEVE filters have reached 0 then GENEVE offload can be be
764                  * disabled.
765                  */
766                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
767                         return qede_geneve_enable(eth_dev,
768                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
769
770                 break;
771
772         default:
773                 return ECORE_INVAL;
774         }
775
776         return 0;
777 }
778
779 int
780 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
781                       struct rte_eth_udp_tunnel *tunnel_udp)
782 {
783         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
784         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
785         struct ecore_tunnel_info tunn; /* @DPDK */
786         uint16_t udp_port;
787         int rc;
788
789         PMD_INIT_FUNC_TRACE(edev);
790
791         memset(&tunn, 0, sizeof(tunn));
792
793         switch (tunnel_udp->prot_type) {
794         case RTE_TUNNEL_TYPE_VXLAN:
795                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
796                         DP_INFO(edev,
797                                 "UDP port %u for VXLAN was already configured\n",
798                                 tunnel_udp->udp_port);
799                         return ECORE_SUCCESS;
800                 }
801
802                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
803                  * it was not enabled while adding VXLAN filter before UDP port
804                  * update.
805                  */
806                 if (!qdev->vxlan.enable) {
807                         rc = qede_vxlan_enable(eth_dev,
808                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
809                         if (rc != ECORE_SUCCESS) {
810                                 DP_ERR(edev, "Failed to enable VXLAN "
811                                         "prior to updating UDP port\n");
812                                 return rc;
813                         }
814                 }
815                 udp_port = tunnel_udp->udp_port;
816
817                 tunn.vxlan_port.b_update_port = true;
818                 tunn.vxlan_port.port = udp_port;
819
820                 rc = qede_tunnel_update(qdev, &tunn);
821                 if (rc != ECORE_SUCCESS) {
822                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
823                                udp_port);
824                         return rc;
825                 }
826
827                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
828
829                 qdev->vxlan.udp_port = udp_port;
830                 break;
831         case RTE_TUNNEL_TYPE_GENEVE:
832                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
833                         DP_INFO(edev,
834                                 "UDP port %u for GENEVE was already configured\n",
835                                 tunnel_udp->udp_port);
836                         return ECORE_SUCCESS;
837                 }
838
839                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
840                  * it was not enabled while adding GENEVE filter before UDP port
841                  * update.
842                  */
843                 if (!qdev->geneve.enable) {
844                         rc = qede_geneve_enable(eth_dev,
845                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
846                         if (rc != ECORE_SUCCESS) {
847                                 DP_ERR(edev, "Failed to enable GENEVE "
848                                         "prior to updating UDP port\n");
849                                 return rc;
850                         }
851                 }
852                 udp_port = tunnel_udp->udp_port;
853
854                 tunn.geneve_port.b_update_port = true;
855                 tunn.geneve_port.port = udp_port;
856
857                 rc = qede_tunnel_update(qdev, &tunn);
858                 if (rc != ECORE_SUCCESS) {
859                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
860                                udp_port);
861                         return rc;
862                 }
863
864                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
865
866                 qdev->geneve.udp_port = udp_port;
867                 break;
868         default:
869                 return ECORE_INVAL;
870         }
871
872         return 0;
873 }
874
875 static int
876 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
877                         const struct rte_flow_attr *attr,
878                         struct rte_flow_error *error)
879 {
880         if (attr == NULL) {
881                 rte_flow_error_set(error, EINVAL,
882                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
883                                    "NULL attribute");
884                 return -rte_errno;
885         }
886
887         if (attr->group != 0) {
888                 rte_flow_error_set(error, ENOTSUP,
889                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
890                                    "Groups are not supported");
891                 return -rte_errno;
892         }
893
894         if (attr->priority != 0) {
895                 rte_flow_error_set(error, ENOTSUP,
896                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
897                                    "Priorities are not supported");
898                 return -rte_errno;
899         }
900
901         if (attr->egress != 0) {
902                 rte_flow_error_set(error, ENOTSUP,
903                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
904                                    "Egress is not supported");
905                 return -rte_errno;
906         }
907
908         if (attr->transfer != 0) {
909                 rte_flow_error_set(error, ENOTSUP,
910                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
911                                    "Transfer is not supported");
912                 return -rte_errno;
913         }
914
915         if (attr->ingress == 0) {
916                 rte_flow_error_set(error, ENOTSUP,
917                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
918                                    "Only ingress is supported");
919                 return -rte_errno;
920         }
921
922         return 0;
923 }
924
925 static int
926 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
927                         const struct rte_flow_item pattern[],
928                         struct rte_flow_error *error,
929                         struct rte_flow *flow)
930 {
931         bool l3 = false, l4 = false;
932
933         if (pattern == NULL) {
934                 rte_flow_error_set(error, EINVAL,
935                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
936                                    "NULL pattern");
937                 return -rte_errno;
938         }
939
940         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
941                 if (!pattern->spec) {
942                         rte_flow_error_set(error, EINVAL,
943                                            RTE_FLOW_ERROR_TYPE_ITEM,
944                                            pattern,
945                                            "Item spec not defined");
946                         return -rte_errno;
947                 }
948
949                 if (pattern->last) {
950                         rte_flow_error_set(error, EINVAL,
951                                            RTE_FLOW_ERROR_TYPE_ITEM,
952                                            pattern,
953                                            "Item last not supported");
954                         return -rte_errno;
955                 }
956
957                 if (pattern->mask) {
958                         rte_flow_error_set(error, EINVAL,
959                                            RTE_FLOW_ERROR_TYPE_ITEM,
960                                            pattern,
961                                            "Item mask not supported");
962                         return -rte_errno;
963                 }
964
965                 /* Below validation is only for 4 tuple flow
966                  * (GFT_PROFILE_TYPE_4_TUPLE)
967                  * - src and dst L3 address (IPv4 or IPv6)
968                  * - src and dst L4 port (TCP or UDP)
969                  */
970
971                 switch (pattern->type) {
972                 case RTE_FLOW_ITEM_TYPE_IPV4:
973                         l3 = true;
974
975                         if (flow) {
976                                 const struct rte_flow_item_ipv4 *spec;
977
978                                 spec = pattern->spec;
979                                 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
980                                 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
981                                 flow->entry.tuple.eth_proto =
982                                         RTE_ETHER_TYPE_IPV4;
983                         }
984                         break;
985
986                 case RTE_FLOW_ITEM_TYPE_IPV6:
987                         l3 = true;
988
989                         if (flow) {
990                                 const struct rte_flow_item_ipv6 *spec;
991
992                                 spec = pattern->spec;
993                                 rte_memcpy(flow->entry.tuple.src_ipv6,
994                                            spec->hdr.src_addr,
995                                            IPV6_ADDR_LEN);
996                                 rte_memcpy(flow->entry.tuple.dst_ipv6,
997                                            spec->hdr.dst_addr,
998                                            IPV6_ADDR_LEN);
999                                 flow->entry.tuple.eth_proto =
1000                                         RTE_ETHER_TYPE_IPV6;
1001                         }
1002                         break;
1003
1004                 case RTE_FLOW_ITEM_TYPE_UDP:
1005                         l4 = true;
1006
1007                         if (flow) {
1008                                 const struct rte_flow_item_udp *spec;
1009
1010                                 spec = pattern->spec;
1011                                 flow->entry.tuple.src_port =
1012                                                 spec->hdr.src_port;
1013                                 flow->entry.tuple.dst_port =
1014                                                 spec->hdr.dst_port;
1015                                 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1016                         }
1017                         break;
1018
1019                 case RTE_FLOW_ITEM_TYPE_TCP:
1020                         l4 = true;
1021
1022                         if (flow) {
1023                                 const struct rte_flow_item_tcp *spec;
1024
1025                                 spec = pattern->spec;
1026                                 flow->entry.tuple.src_port =
1027                                                 spec->hdr.src_port;
1028                                 flow->entry.tuple.dst_port =
1029                                                 spec->hdr.dst_port;
1030                                 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1031                         }
1032
1033                         break;
1034                 default:
1035                         rte_flow_error_set(error, EINVAL,
1036                                            RTE_FLOW_ERROR_TYPE_ITEM,
1037                                            pattern,
1038                                            "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1039                         return -rte_errno;
1040                 }
1041         }
1042
1043         if (!(l3 && l4)) {
1044                 rte_flow_error_set(error, EINVAL,
1045                                    RTE_FLOW_ERROR_TYPE_ITEM,
1046                                    pattern,
1047                                    "Item types need to have both L3 and L4 protocols");
1048                 return -rte_errno;
1049         }
1050
1051         return 0;
1052 }
1053
1054 static int
1055 qede_flow_parse_actions(struct rte_eth_dev *dev,
1056                         const struct rte_flow_action actions[],
1057                         struct rte_flow_error *error,
1058                         struct rte_flow *flow)
1059 {
1060         const struct rte_flow_action_queue *queue;
1061
1062         if (actions == NULL) {
1063                 rte_flow_error_set(error, EINVAL,
1064                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1065                                    "NULL actions");
1066                 return -rte_errno;
1067         }
1068
1069         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1070                 switch (actions->type) {
1071                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1072                         queue = actions->conf;
1073
1074                         if (queue->index >= QEDE_RSS_COUNT(dev)) {
1075                                 rte_flow_error_set(error, EINVAL,
1076                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1077                                                    actions,
1078                                                    "Bad QUEUE action");
1079                                 return -rte_errno;
1080                         }
1081
1082                         if (flow)
1083                                 flow->entry.rx_queue = queue->index;
1084
1085                         break;
1086                 case RTE_FLOW_ACTION_TYPE_DROP:
1087                         if (flow)
1088                                 flow->entry.is_drop = true;
1089                         break;
1090                 default:
1091                         rte_flow_error_set(error, ENOTSUP,
1092                                            RTE_FLOW_ERROR_TYPE_ACTION,
1093                                            actions,
1094                                            "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
1095                         return -rte_errno;
1096                 }
1097         }
1098
1099         return 0;
1100 }
1101
1102 static int
1103 qede_flow_parse(struct rte_eth_dev *dev,
1104                 const struct rte_flow_attr *attr,
1105                 const struct rte_flow_item patterns[],
1106                 const struct rte_flow_action actions[],
1107                 struct rte_flow_error *error,
1108                 struct rte_flow *flow)
1109
1110 {
1111         int rc = 0;
1112
1113         rc = qede_flow_validate_attr(dev, attr, error);
1114         if (rc)
1115                 return rc;
1116
1117         /* parse and validate item pattern and actions.
1118          * Given item list and actions will be translate to qede PMD
1119          * specific arfs structure.
1120          */
1121         rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1122         if (rc)
1123                 return rc;
1124
1125         rc = qede_flow_parse_actions(dev, actions, error, flow);
1126
1127         return rc;
1128 }
1129
1130 static int
1131 qede_flow_validate(struct rte_eth_dev *dev,
1132                    const struct rte_flow_attr *attr,
1133                    const struct rte_flow_item patterns[],
1134                    const struct rte_flow_action actions[],
1135                    struct rte_flow_error *error)
1136 {
1137         return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1138 }
1139
1140 static struct rte_flow *
1141 qede_flow_create(struct rte_eth_dev *dev,
1142                  const struct rte_flow_attr *attr,
1143                  const struct rte_flow_item pattern[],
1144                  const struct rte_flow_action actions[],
1145                  struct rte_flow_error *error)
1146 {
1147         struct rte_flow *flow = NULL;
1148         int rc;
1149
1150         flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1151         if (flow == NULL) {
1152                 rte_flow_error_set(error, ENOMEM,
1153                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1154                                    "Failed to allocate memory");
1155                 return NULL;
1156         }
1157
1158         rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1159         if (rc < 0) {
1160                 rte_free(flow);
1161                 return NULL;
1162         }
1163
1164         rc = qede_config_arfs_filter(dev, &flow->entry, true);
1165         if (rc < 0) {
1166                 rte_flow_error_set(error, rc,
1167                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1168                                    "Failed to configure flow filter");
1169                 rte_free(flow);
1170                 return NULL;
1171         }
1172
1173         return flow;
1174 }
1175
1176 static int
1177 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1178                   struct rte_flow *flow,
1179                   struct rte_flow_error *error)
1180 {
1181         int rc = 0;
1182
1183         rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1184         if (rc < 0) {
1185                 rte_flow_error_set(error, rc,
1186                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1187                                    "Failed to delete flow filter");
1188                 rte_free(flow);
1189         }
1190
1191         return rc;
1192 }
1193
1194 static int
1195 qede_flow_flush(struct rte_eth_dev *eth_dev,
1196                 struct rte_flow_error *error)
1197 {
1198         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1199         struct qede_arfs_entry *tmp = NULL;
1200         int rc = 0;
1201
1202         while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1203                 tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1204
1205                 rc = qede_config_arfs_filter(eth_dev, tmp, false);
1206                 if (rc < 0)
1207                         rte_flow_error_set(error, rc,
1208                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1209                                            "Failed to flush flow filter");
1210         }
1211
1212         return rc;
1213 }
1214
1215 const struct rte_flow_ops qede_flow_ops = {
1216         .validate = qede_flow_validate,
1217         .create = qede_flow_create,
1218         .destroy = qede_flow_destroy,
1219         .flush = qede_flow_flush,
1220 };
1221
1222 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1223                          enum rte_filter_type filter_type,
1224                          enum rte_filter_op filter_op,
1225                          void *arg)
1226 {
1227         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1228         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1229
1230         switch (filter_type) {
1231         case RTE_ETH_FILTER_FDIR:
1232                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1233         case RTE_ETH_FILTER_GENERIC:
1234                 if (ECORE_IS_CMT(edev)) {
1235                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1236                         return -ENOTSUP;
1237                 }
1238
1239                 if (filter_op != RTE_ETH_FILTER_GET)
1240                         return -EINVAL;
1241
1242                 *(const void **)arg = &qede_flow_ops;
1243                 return 0;
1244         case RTE_ETH_FILTER_MAX:
1245         default:
1246                 DP_ERR(edev, "Unsupported filter type %d\n",
1247                         filter_type);
1248                 return -EINVAL;
1249         }
1250
1251         return 0;
1252 }