b7ad59ad6d182e653f5ca958bb14e1b453ebf39e
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12
13 #include "qede_ethdev.h"
14
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17         uint16_t rte_filter_type;
18         enum ecore_filter_ucast_type qede_type;
19         enum ecore_tunn_clss qede_tunn_clss;
20         const char *string;
21 } qede_tunn_types[] = {
22         {
23                 ETH_TUNNEL_FILTER_OMAC,
24                 ECORE_FILTER_MAC,
25                 ECORE_TUNN_CLSS_MAC_VLAN,
26                 "outer-mac"
27         },
28         {
29                 ETH_TUNNEL_FILTER_TENID,
30                 ECORE_FILTER_VNI,
31                 ECORE_TUNN_CLSS_MAC_VNI,
32                 "vni"
33         },
34         {
35                 ETH_TUNNEL_FILTER_IMAC,
36                 ECORE_FILTER_INNER_MAC,
37                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38                 "inner-mac"
39         },
40         {
41                 ETH_TUNNEL_FILTER_IVLAN,
42                 ECORE_FILTER_INNER_VLAN,
43                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44                 "inner-vlan"
45         },
46         {
47                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48                 ECORE_FILTER_MAC_VNI_PAIR,
49                 ECORE_TUNN_CLSS_MAC_VNI,
50                 "outer-mac and vni"
51         },
52         {
53                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
54                 ECORE_FILTER_UNUSED,
55                 MAX_ECORE_TUNN_CLSS,
56                 "outer-mac and inner-mac"
57         },
58         {
59                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
60                 ECORE_FILTER_UNUSED,
61                 MAX_ECORE_TUNN_CLSS,
62                 "outer-mac and inner-vlan"
63         },
64         {
65                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
68                 "vni and inner-mac",
69         },
70         {
71                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
72                 ECORE_FILTER_UNUSED,
73                 MAX_ECORE_TUNN_CLSS,
74                 "vni and inner-vlan",
75         },
76         {
77                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78                 ECORE_FILTER_INNER_PAIR,
79                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80                 "inner-mac and inner-vlan",
81         },
82         {
83                 ETH_TUNNEL_FILTER_OIP,
84                 ECORE_FILTER_UNUSED,
85                 MAX_ECORE_TUNN_CLSS,
86                 "outer-IP"
87         },
88         {
89                 ETH_TUNNEL_FILTER_IIP,
90                 ECORE_FILTER_UNUSED,
91                 MAX_ECORE_TUNN_CLSS,
92                 "inner-IP"
93         },
94         {
95                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
96                 ECORE_FILTER_UNUSED,
97                 MAX_ECORE_TUNN_CLSS,
98                 "IMAC_IVLAN"
99         },
100         {
101                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102                 ECORE_FILTER_UNUSED,
103                 MAX_ECORE_TUNN_CLSS,
104                 "IMAC_IVLAN_TENID"
105         },
106         {
107                 RTE_TUNNEL_FILTER_IMAC_TENID,
108                 ECORE_FILTER_UNUSED,
109                 MAX_ECORE_TUNN_CLSS,
110                 "IMAC_TENID"
111         },
112         {
113                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
114                 ECORE_FILTER_UNUSED,
115                 MAX_ECORE_TUNN_CLSS,
116                 "OMAC_TENID_IMAC"
117         },
118 };
119
120 #define IP_VERSION                              (0x40)
121 #define IP_HDRLEN                               (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
132
133 static inline bool qede_valid_flow(uint16_t flow_type)
134 {
135         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
139 }
140
141 static uint16_t
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143                         struct qede_arfs_entry *arfs,
144                         void *buff,
145                         struct ecore_arfs_config_params *params);
146
147 /* Note: Flowdir support is only partial.
148  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149  * Parameters like pballoc/status fields are irrelevant here.
150  */
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
152 {
153         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
156
157         /* check FDIR modes */
158         switch (fdir->mode) {
159         case RTE_FDIR_MODE_NONE:
160                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161                 DP_INFO(edev, "flowdir is disabled\n");
162         break;
163         case RTE_FDIR_MODE_PERFECT:
164                 if (ECORE_IS_CMT(edev)) {
165                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166                         qdev->arfs_info.arfs.mode =
167                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
168                         return -ENOTSUP;
169                 }
170                 qdev->arfs_info.arfs.mode =
171                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
173         break;
174         case RTE_FDIR_MODE_PERFECT_TUNNEL:
175         case RTE_FDIR_MODE_SIGNATURE:
176         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
178                 return -ENOTSUP;
179         }
180
181         return 0;
182 }
183
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
185 {
186         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187         struct qede_arfs_entry *tmp = NULL;
188
189         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
190                 if (tmp) {
191                         if (tmp->mz)
192                                 rte_memzone_free(tmp->mz);
193                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194                                      qede_arfs_entry, list);
195                         rte_free(tmp);
196                 }
197         }
198 }
199
200 static int
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202                          struct rte_eth_fdir_filter *fdir,
203                          struct qede_arfs_entry *arfs)
204 {
205         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207         struct rte_eth_fdir_input *input;
208
209         static const uint8_t next_proto[] = {
210                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
214         };
215
216         input = &fdir->input;
217
218         DP_INFO(edev, "flow_type %d\n", input->flow_type);
219
220         switch (input->flow_type) {
221         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223                 /* fill the common ip header */
224                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
225                 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226                 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227                 arfs->tuple.ip_proto = next_proto[input->flow_type];
228
229                 /* UDP */
230                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231                         arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232                         arfs->tuple.src_port = input->flow.udp4_flow.src_port;
233                 } else { /* TCP */
234                         arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235                         arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
236                 }
237                 break;
238         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
241                 arfs->tuple.ip_proto = next_proto[input->flow_type];
242                 rte_memcpy(arfs->tuple.dst_ipv6,
243                            &input->flow.ipv6_flow.dst_ip,
244                            IPV6_ADDR_LEN);
245                 rte_memcpy(arfs->tuple.src_ipv6,
246                            &input->flow.ipv6_flow.src_ip,
247                            IPV6_ADDR_LEN);
248
249                 /* UDP */
250                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251                         arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252                         arfs->tuple.src_port = input->flow.udp6_flow.src_port;
253                 } else { /* TCP */
254                         arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255                         arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
256                 }
257                 break;
258         default:
259                 DP_ERR(edev, "Unsupported flow_type %u\n",
260                        input->flow_type);
261                 return -ENOTSUP;
262         }
263
264         arfs->rx_queue = fdir->action.rx_queue;
265         return 0;
266 }
267
268 static int
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270                         struct qede_arfs_entry *arfs,
271                         bool add)
272 {
273         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275         struct ecore_ntuple_filter_params params;
276         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
277         struct qede_arfs_entry *tmp = NULL;
278         const struct rte_memzone *mz;
279         struct ecore_hwfn *p_hwfn;
280         enum _ecore_status_t rc;
281         uint16_t pkt_len;
282         void *pkt;
283
284         if (add) {
285                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
286                         DP_ERR(edev, "Reached max flowdir filter limit\n");
287                         return -EINVAL;
288                 }
289         }
290
291         /* soft_id could have been used as memzone string, but soft_id is
292          * not currently used so it has no significance.
293          */
294         snprintf(mz_name, sizeof(mz_name), "%lx",
295                  (unsigned long)rte_get_timer_cycles());
296         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
297                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
298         if (!mz) {
299                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
300                        rte_strerror(rte_errno));
301                 return -rte_errno;
302         }
303
304         pkt = mz->addr;
305         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
306         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
307                                           &qdev->arfs_info.arfs);
308         if (pkt_len == 0) {
309                 rc = -EINVAL;
310                 goto err1;
311         }
312
313         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
314         if (add) {
315                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
316                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
317                                 DP_INFO(edev, "flowdir filter exist\n");
318                                 rc = -EEXIST;
319                                 goto err1;
320                         }
321                 }
322         } else {
323                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
324                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
325                                 break;
326                 }
327                 if (!tmp) {
328                         DP_ERR(edev, "flowdir filter does not exist\n");
329                         rc = -EEXIST;
330                         goto err1;
331                 }
332         }
333         p_hwfn = ECORE_LEADING_HWFN(edev);
334         if (add) {
335                 if (qdev->arfs_info.arfs.mode ==
336                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
337                         /* Force update */
338                         eth_dev->data->dev_conf.fdir_conf.mode =
339                                                 RTE_FDIR_MODE_PERFECT;
340                         qdev->arfs_info.arfs.mode =
341                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
342                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
343                 }
344                 /* Enable ARFS searcher with updated flow_types */
345                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
346                                           &qdev->arfs_info.arfs);
347         }
348
349         memset(&params, 0, sizeof(params));
350         params.addr = (dma_addr_t)mz->iova;
351         params.length = pkt_len;
352         params.qid = arfs->rx_queue;
353         params.vport_id = 0;
354         params.b_is_add = add;
355         params.b_is_drop = arfs->is_drop;
356
357         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
358         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
359                                                &params);
360         if (rc == ECORE_SUCCESS) {
361                 if (add) {
362                         arfs->pkt_len = pkt_len;
363                         arfs->mz = mz;
364                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
365                                           arfs, list);
366                         qdev->arfs_info.filter_count++;
367                         DP_INFO(edev, "flowdir filter added, count = %d\n",
368                                 qdev->arfs_info.filter_count);
369                 } else {
370                         rte_memzone_free(tmp->mz);
371                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
372                                      qede_arfs_entry, list);
373                         rte_free(tmp); /* the node deleted */
374                         rte_memzone_free(mz); /* temp node allocated */
375                         qdev->arfs_info.filter_count--;
376                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
377                                 qdev->arfs_info.filter_count);
378                 }
379         } else {
380                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
381                        rc, qdev->arfs_info.filter_count);
382         }
383
384         /* Disable ARFS searcher if there are no more filters */
385         if (qdev->arfs_info.filter_count == 0) {
386                 memset(&qdev->arfs_info.arfs, 0,
387                        sizeof(struct ecore_arfs_config_params));
388                 DP_INFO(edev, "Disabling flowdir\n");
389                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
390                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
391                                           &qdev->arfs_info.arfs);
392         }
393         return 0;
394
395 err1:
396         rte_memzone_free(mz);
397         return rc;
398 }
399
400 static int
401 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
402                             struct rte_eth_fdir_filter *fdir_filter,
403                             bool add)
404 {
405         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
406         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
407         struct qede_arfs_entry *arfs = NULL;
408         int rc = 0;
409
410         arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
411                                   RTE_CACHE_LINE_SIZE);
412         if (!arfs) {
413                 DP_ERR(edev, "Did not allocate memory for arfs\n");
414                 return -ENOMEM;
415         }
416
417         rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
418         if (rc < 0)
419                 return rc;
420
421         rc = qede_config_arfs_filter(eth_dev, arfs, add);
422         if (rc < 0)
423                 rte_free(arfs);
424
425         return rc;
426 }
427
428 static int
429 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
430                      struct rte_eth_fdir_filter *fdir,
431                      bool add)
432 {
433         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
435
436         if (!qede_valid_flow(fdir->input.flow_type)) {
437                 DP_ERR(edev, "invalid flow_type input\n");
438                 return -EINVAL;
439         }
440
441         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
442                 DP_ERR(edev, "invalid queue number %u\n",
443                        fdir->action.rx_queue);
444                 return -EINVAL;
445         }
446
447         if (fdir->input.flow_ext.is_vf) {
448                 DP_ERR(edev, "flowdir is not supported over VF\n");
449                 return -EINVAL;
450         }
451
452         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
453 }
454
455 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
456 static uint16_t
457 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
458                         struct qede_arfs_entry *arfs,
459                         void *buff,
460                         struct ecore_arfs_config_params *params)
461
462 {
463         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465         uint16_t *ether_type;
466         uint8_t *raw_pkt;
467         struct rte_ipv4_hdr *ip;
468         struct rte_ipv6_hdr *ip6;
469         struct rte_udp_hdr *udp;
470         struct rte_tcp_hdr *tcp;
471         uint16_t len;
472
473         raw_pkt = (uint8_t *)buff;
474
475         len =  2 * sizeof(struct rte_ether_addr);
476         raw_pkt += 2 * sizeof(struct rte_ether_addr);
477         ether_type = (uint16_t *)raw_pkt;
478         raw_pkt += sizeof(uint16_t);
479         len += sizeof(uint16_t);
480
481         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
482         switch (arfs->tuple.eth_proto) {
483         case RTE_ETHER_TYPE_IPV4:
484                 ip = (struct rte_ipv4_hdr *)raw_pkt;
485                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
486                 ip->total_length = sizeof(struct rte_ipv4_hdr);
487                 ip->next_proto_id = arfs->tuple.ip_proto;
488                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
489                 ip->dst_addr = arfs->tuple.dst_ipv4;
490                 ip->src_addr = arfs->tuple.src_ipv4;
491                 len += sizeof(struct rte_ipv4_hdr);
492                 params->ipv4 = true;
493
494                 raw_pkt = (uint8_t *)buff;
495                 /* UDP */
496                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
497                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
498                         udp->dst_port = arfs->tuple.dst_port;
499                         udp->src_port = arfs->tuple.src_port;
500                         udp->dgram_len = sizeof(struct rte_udp_hdr);
501                         len += sizeof(struct rte_udp_hdr);
502                         /* adjust ip total_length */
503                         ip->total_length += sizeof(struct rte_udp_hdr);
504                         params->udp = true;
505                 } else { /* TCP */
506                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
507                         tcp->src_port = arfs->tuple.src_port;
508                         tcp->dst_port = arfs->tuple.dst_port;
509                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
510                         len += sizeof(struct rte_tcp_hdr);
511                         /* adjust ip total_length */
512                         ip->total_length += sizeof(struct rte_tcp_hdr);
513                         params->tcp = true;
514                 }
515                 break;
516         case RTE_ETHER_TYPE_IPV6:
517                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
518                 ip6->proto = arfs->tuple.ip_proto;
519                 ip6->vtc_flow =
520                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
521
522                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
523                            IPV6_ADDR_LEN);
524                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
525                            IPV6_ADDR_LEN);
526                 len += sizeof(struct rte_ipv6_hdr);
527                 params->ipv6 = true;
528
529                 raw_pkt = (uint8_t *)buff;
530                 /* UDP */
531                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
532                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
533                         udp->src_port = arfs->tuple.src_port;
534                         udp->dst_port = arfs->tuple.dst_port;
535                         len += sizeof(struct rte_udp_hdr);
536                         params->udp = true;
537                 } else { /* TCP */
538                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
539                         tcp->src_port = arfs->tuple.src_port;
540                         tcp->dst_port = arfs->tuple.dst_port;
541                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
542                         len += sizeof(struct rte_tcp_hdr);
543                         params->tcp = true;
544                 }
545                 break;
546         default:
547                 DP_ERR(edev, "Unsupported eth_proto %u\n",
548                        arfs->tuple.eth_proto);
549                 return 0;
550         }
551
552         return len;
553 }
554
555 static int
556 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
557                       enum rte_filter_op filter_op,
558                       void *arg)
559 {
560         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562         struct rte_eth_fdir_filter *fdir;
563         int ret;
564
565         fdir = (struct rte_eth_fdir_filter *)arg;
566         switch (filter_op) {
567         case RTE_ETH_FILTER_NOP:
568                 /* Typically used to query flowdir support */
569                 if (ECORE_IS_CMT(edev)) {
570                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
571                         return -ENOTSUP;
572                 }
573                 return 0; /* means supported */
574         case RTE_ETH_FILTER_ADD:
575                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
576         break;
577         case RTE_ETH_FILTER_DELETE:
578                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
579         break;
580         case RTE_ETH_FILTER_FLUSH:
581         case RTE_ETH_FILTER_UPDATE:
582         case RTE_ETH_FILTER_INFO:
583                 return -ENOTSUP;
584         break;
585         default:
586                 DP_ERR(edev, "unknown operation %u", filter_op);
587                 ret = -EINVAL;
588         }
589
590         return ret;
591 }
592
593 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
594                             enum rte_filter_op filter_op,
595                             void *arg)
596 {
597         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
598         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
599         struct rte_eth_ntuple_filter *ntuple;
600         struct rte_eth_fdir_filter fdir_entry;
601         struct rte_eth_tcpv4_flow *tcpv4_flow;
602         struct rte_eth_udpv4_flow *udpv4_flow;
603         bool add = false;
604
605         switch (filter_op) {
606         case RTE_ETH_FILTER_NOP:
607                 /* Typically used to query fdir support */
608                 if (ECORE_IS_CMT(edev)) {
609                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
610                         return -ENOTSUP;
611                 }
612                 return 0; /* means supported */
613         case RTE_ETH_FILTER_ADD:
614                 add = true;
615         break;
616         case RTE_ETH_FILTER_DELETE:
617         break;
618         case RTE_ETH_FILTER_INFO:
619         case RTE_ETH_FILTER_GET:
620         case RTE_ETH_FILTER_UPDATE:
621         case RTE_ETH_FILTER_FLUSH:
622         case RTE_ETH_FILTER_SET:
623         case RTE_ETH_FILTER_STATS:
624         case RTE_ETH_FILTER_OP_MAX:
625                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
626                 return -ENOTSUP;
627         }
628         ntuple = (struct rte_eth_ntuple_filter *)arg;
629         /* Internally convert ntuple to fdir entry */
630         memset(&fdir_entry, 0, sizeof(fdir_entry));
631         if (ntuple->proto == IPPROTO_TCP) {
632                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
633                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
634                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
635                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
636                 tcpv4_flow->ip.proto = IPPROTO_TCP;
637                 tcpv4_flow->src_port = ntuple->src_port;
638                 tcpv4_flow->dst_port = ntuple->dst_port;
639         } else {
640                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
641                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
642                 udpv4_flow->ip.src_ip = ntuple->src_ip;
643                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
644                 udpv4_flow->ip.proto = IPPROTO_TCP;
645                 udpv4_flow->src_port = ntuple->src_port;
646                 udpv4_flow->dst_port = ntuple->dst_port;
647         }
648
649         fdir_entry.action.rx_queue = ntuple->queue;
650
651         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
652 }
653
654 static int
655 qede_tunnel_update(struct qede_dev *qdev,
656                    struct ecore_tunnel_info *tunn_info)
657 {
658         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
659         enum _ecore_status_t rc = ECORE_INVAL;
660         struct ecore_hwfn *p_hwfn;
661         struct ecore_ptt *p_ptt;
662         int i;
663
664         for_each_hwfn(edev, i) {
665                 p_hwfn = &edev->hwfns[i];
666                 if (IS_PF(edev)) {
667                         p_ptt = ecore_ptt_acquire(p_hwfn);
668                         if (!p_ptt) {
669                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
670                                 return -EAGAIN;
671                         }
672                 } else {
673                         p_ptt = NULL;
674                 }
675
676                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
677                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
678                 if (IS_PF(edev))
679                         ecore_ptt_release(p_hwfn, p_ptt);
680
681                 if (rc != ECORE_SUCCESS)
682                         break;
683         }
684
685         return rc;
686 }
687
688 static int
689 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
690                   bool enable)
691 {
692         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
693         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
694         enum _ecore_status_t rc = ECORE_INVAL;
695         struct ecore_tunnel_info tunn;
696
697         if (qdev->vxlan.enable == enable)
698                 return ECORE_SUCCESS;
699
700         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
701         tunn.vxlan.b_update_mode = true;
702         tunn.vxlan.b_mode_enabled = enable;
703         tunn.b_update_rx_cls = true;
704         tunn.b_update_tx_cls = true;
705         tunn.vxlan.tun_cls = clss;
706
707         tunn.vxlan_port.b_update_port = true;
708         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
709
710         rc = qede_tunnel_update(qdev, &tunn);
711         if (rc == ECORE_SUCCESS) {
712                 qdev->vxlan.enable = enable;
713                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
714                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
715                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
716         } else {
717                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
718                        tunn.vxlan.tun_cls);
719         }
720
721         return rc;
722 }
723
724 static int
725 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
726                   bool enable)
727 {
728         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
729         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
730         enum _ecore_status_t rc = ECORE_INVAL;
731         struct ecore_tunnel_info tunn;
732
733         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
734         tunn.l2_geneve.b_update_mode = true;
735         tunn.l2_geneve.b_mode_enabled = enable;
736         tunn.ip_geneve.b_update_mode = true;
737         tunn.ip_geneve.b_mode_enabled = enable;
738         tunn.l2_geneve.tun_cls = clss;
739         tunn.ip_geneve.tun_cls = clss;
740         tunn.b_update_rx_cls = true;
741         tunn.b_update_tx_cls = true;
742
743         tunn.geneve_port.b_update_port = true;
744         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
745
746         rc = qede_tunnel_update(qdev, &tunn);
747         if (rc == ECORE_SUCCESS) {
748                 qdev->geneve.enable = enable;
749                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
750                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
751                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
752         } else {
753                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
754                        clss);
755         }
756
757         return rc;
758 }
759
760 static int
761 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
762                   bool enable)
763 {
764         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
765         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
766         enum _ecore_status_t rc = ECORE_INVAL;
767         struct ecore_tunnel_info tunn;
768
769         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
770         tunn.ip_gre.b_update_mode = true;
771         tunn.ip_gre.b_mode_enabled = enable;
772         tunn.ip_gre.tun_cls = clss;
773         tunn.ip_gre.tun_cls = clss;
774         tunn.b_update_rx_cls = true;
775         tunn.b_update_tx_cls = true;
776
777         rc = qede_tunnel_update(qdev, &tunn);
778         if (rc == ECORE_SUCCESS) {
779                 qdev->ipgre.enable = enable;
780                 DP_INFO(edev, "IPGRE is %s\n",
781                         enable ? "enabled" : "disabled");
782         } else {
783                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
784                        clss);
785         }
786
787         return rc;
788 }
789
790 int
791 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
792                       struct rte_eth_udp_tunnel *tunnel_udp)
793 {
794         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
795         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
796         struct ecore_tunnel_info tunn; /* @DPDK */
797         uint16_t udp_port;
798         int rc;
799
800         PMD_INIT_FUNC_TRACE(edev);
801
802         memset(&tunn, 0, sizeof(tunn));
803
804         switch (tunnel_udp->prot_type) {
805         case RTE_TUNNEL_TYPE_VXLAN:
806                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
807                         DP_ERR(edev, "UDP port %u doesn't exist\n",
808                                 tunnel_udp->udp_port);
809                         return ECORE_INVAL;
810                 }
811                 udp_port = 0;
812
813                 tunn.vxlan_port.b_update_port = true;
814                 tunn.vxlan_port.port = udp_port;
815
816                 rc = qede_tunnel_update(qdev, &tunn);
817                 if (rc != ECORE_SUCCESS) {
818                         DP_ERR(edev, "Unable to config UDP port %u\n",
819                                tunn.vxlan_port.port);
820                         return rc;
821                 }
822
823                 qdev->vxlan.udp_port = udp_port;
824                 /* If the request is to delete UDP port and if the number of
825                  * VXLAN filters have reached 0 then VxLAN offload can be be
826                  * disabled.
827                  */
828                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
829                         return qede_vxlan_enable(eth_dev,
830                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
831
832                 break;
833         case RTE_TUNNEL_TYPE_GENEVE:
834                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
835                         DP_ERR(edev, "UDP port %u doesn't exist\n",
836                                 tunnel_udp->udp_port);
837                         return ECORE_INVAL;
838                 }
839
840                 udp_port = 0;
841
842                 tunn.geneve_port.b_update_port = true;
843                 tunn.geneve_port.port = udp_port;
844
845                 rc = qede_tunnel_update(qdev, &tunn);
846                 if (rc != ECORE_SUCCESS) {
847                         DP_ERR(edev, "Unable to config UDP port %u\n",
848                                tunn.vxlan_port.port);
849                         return rc;
850                 }
851
852                 qdev->vxlan.udp_port = udp_port;
853                 /* If the request is to delete UDP port and if the number of
854                  * GENEVE filters have reached 0 then GENEVE offload can be be
855                  * disabled.
856                  */
857                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
858                         return qede_geneve_enable(eth_dev,
859                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
860
861                 break;
862
863         default:
864                 return ECORE_INVAL;
865         }
866
867         return 0;
868 }
869
870 int
871 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
872                       struct rte_eth_udp_tunnel *tunnel_udp)
873 {
874         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
875         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
876         struct ecore_tunnel_info tunn; /* @DPDK */
877         uint16_t udp_port;
878         int rc;
879
880         PMD_INIT_FUNC_TRACE(edev);
881
882         memset(&tunn, 0, sizeof(tunn));
883
884         switch (tunnel_udp->prot_type) {
885         case RTE_TUNNEL_TYPE_VXLAN:
886                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
887                         DP_INFO(edev,
888                                 "UDP port %u for VXLAN was already configured\n",
889                                 tunnel_udp->udp_port);
890                         return ECORE_SUCCESS;
891                 }
892
893                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
894                  * it was not enabled while adding VXLAN filter before UDP port
895                  * update.
896                  */
897                 if (!qdev->vxlan.enable) {
898                         rc = qede_vxlan_enable(eth_dev,
899                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
900                         if (rc != ECORE_SUCCESS) {
901                                 DP_ERR(edev, "Failed to enable VXLAN "
902                                         "prior to updating UDP port\n");
903                                 return rc;
904                         }
905                 }
906                 udp_port = tunnel_udp->udp_port;
907
908                 tunn.vxlan_port.b_update_port = true;
909                 tunn.vxlan_port.port = udp_port;
910
911                 rc = qede_tunnel_update(qdev, &tunn);
912                 if (rc != ECORE_SUCCESS) {
913                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
914                                udp_port);
915                         return rc;
916                 }
917
918                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
919
920                 qdev->vxlan.udp_port = udp_port;
921                 break;
922         case RTE_TUNNEL_TYPE_GENEVE:
923                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
924                         DP_INFO(edev,
925                                 "UDP port %u for GENEVE was already configured\n",
926                                 tunnel_udp->udp_port);
927                         return ECORE_SUCCESS;
928                 }
929
930                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
931                  * it was not enabled while adding GENEVE filter before UDP port
932                  * update.
933                  */
934                 if (!qdev->geneve.enable) {
935                         rc = qede_geneve_enable(eth_dev,
936                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
937                         if (rc != ECORE_SUCCESS) {
938                                 DP_ERR(edev, "Failed to enable GENEVE "
939                                         "prior to updating UDP port\n");
940                                 return rc;
941                         }
942                 }
943                 udp_port = tunnel_udp->udp_port;
944
945                 tunn.geneve_port.b_update_port = true;
946                 tunn.geneve_port.port = udp_port;
947
948                 rc = qede_tunnel_update(qdev, &tunn);
949                 if (rc != ECORE_SUCCESS) {
950                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
951                                udp_port);
952                         return rc;
953                 }
954
955                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
956
957                 qdev->geneve.udp_port = udp_port;
958                 break;
959         default:
960                 return ECORE_INVAL;
961         }
962
963         return 0;
964 }
965
966 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
967                                        uint32_t *clss, char *str)
968 {
969         uint16_t j;
970         *clss = MAX_ECORE_TUNN_CLSS;
971
972         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
973                 if (filter == qede_tunn_types[j].rte_filter_type) {
974                         *type = qede_tunn_types[j].qede_type;
975                         *clss = qede_tunn_types[j].qede_tunn_clss;
976                         strcpy(str, qede_tunn_types[j].string);
977                         return;
978                 }
979         }
980 }
981
982 static int
983 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
984                               const struct rte_eth_tunnel_filter_conf *conf,
985                               uint32_t type)
986 {
987         /* Init commmon ucast params first */
988         qede_set_ucast_cmn_params(ucast);
989
990         /* Copy out the required fields based on classification type */
991         ucast->type = type;
992
993         switch (type) {
994         case ECORE_FILTER_VNI:
995                 ucast->vni = conf->tenant_id;
996         break;
997         case ECORE_FILTER_INNER_VLAN:
998                 ucast->vlan = conf->inner_vlan;
999         break;
1000         case ECORE_FILTER_MAC:
1001                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1002                        RTE_ETHER_ADDR_LEN);
1003         break;
1004         case ECORE_FILTER_INNER_MAC:
1005                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1006                        RTE_ETHER_ADDR_LEN);
1007         break;
1008         case ECORE_FILTER_MAC_VNI_PAIR:
1009                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1010                         RTE_ETHER_ADDR_LEN);
1011                 ucast->vni = conf->tenant_id;
1012         break;
1013         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1014                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1015                         RTE_ETHER_ADDR_LEN);
1016                 ucast->vni = conf->tenant_id;
1017         break;
1018         case ECORE_FILTER_INNER_PAIR:
1019                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1020                         RTE_ETHER_ADDR_LEN);
1021                 ucast->vlan = conf->inner_vlan;
1022         break;
1023         default:
1024                 return -EINVAL;
1025         }
1026
1027         return ECORE_SUCCESS;
1028 }
1029
1030 static int
1031 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1032                          const struct rte_eth_tunnel_filter_conf *conf,
1033                          __attribute__((unused)) enum rte_filter_op filter_op,
1034                          enum ecore_tunn_clss *clss,
1035                          bool add)
1036 {
1037         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1038         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1039         struct ecore_filter_ucast ucast = {0};
1040         enum ecore_filter_ucast_type type;
1041         uint16_t filter_type = 0;
1042         char str[80];
1043         int rc;
1044
1045         filter_type = conf->filter_type;
1046         /* Determine if the given filter classification is supported */
1047         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
1048         if (*clss == MAX_ECORE_TUNN_CLSS) {
1049                 DP_ERR(edev, "Unsupported filter type\n");
1050                 return -EINVAL;
1051         }
1052         /* Init tunnel ucast params */
1053         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1054         if (rc != ECORE_SUCCESS) {
1055                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
1056                 conf->filter_type);
1057                 return rc;
1058         }
1059         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1060                 str, filter_op, ucast.type);
1061
1062         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1063
1064         /* Skip MAC/VLAN if filter is based on VNI */
1065         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1066                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1067                 if (rc == 0 && add) {
1068                         /* Enable accept anyvlan */
1069                         qede_config_accept_any_vlan(qdev, true);
1070                 }
1071         } else {
1072                 rc = qede_ucast_filter(eth_dev, &ucast, add);
1073                 if (rc == 0)
1074                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1075                                             ECORE_SPQ_MODE_CB, NULL);
1076         }
1077
1078         return rc;
1079 }
1080
1081 static int
1082 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1083                  enum rte_eth_tunnel_type tunn_type, bool enable)
1084 {
1085         int rc = -EINVAL;
1086
1087         switch (tunn_type) {
1088         case RTE_TUNNEL_TYPE_VXLAN:
1089                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1090                 break;
1091         case RTE_TUNNEL_TYPE_GENEVE:
1092                 rc = qede_geneve_enable(eth_dev, clss, enable);
1093                 break;
1094         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1095                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1096                 break;
1097         default:
1098                 rc = -EINVAL;
1099                 break;
1100         }
1101
1102         return rc;
1103 }
1104
1105 static int
1106 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1107                         enum rte_filter_op filter_op,
1108                         const struct rte_eth_tunnel_filter_conf *conf)
1109 {
1110         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1111         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1112         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1113         bool add;
1114         int rc;
1115
1116         PMD_INIT_FUNC_TRACE(edev);
1117
1118         switch (filter_op) {
1119         case RTE_ETH_FILTER_ADD:
1120                 add = true;
1121                 break;
1122         case RTE_ETH_FILTER_DELETE:
1123                 add = false;
1124                 break;
1125         default:
1126                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1127                 return -EINVAL;
1128         }
1129
1130         if (IS_VF(edev))
1131                 return qede_tunn_enable(eth_dev,
1132                                         ECORE_TUNN_CLSS_MAC_VLAN,
1133                                         conf->tunnel_type, add);
1134
1135         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1136         if (rc != ECORE_SUCCESS)
1137                 return rc;
1138
1139         if (add) {
1140                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1141                         qdev->vxlan.num_filters++;
1142                         qdev->vxlan.filter_type = conf->filter_type;
1143                 } else { /* GENEVE */
1144                         qdev->geneve.num_filters++;
1145                         qdev->geneve.filter_type = conf->filter_type;
1146                 }
1147
1148                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1149                     !qdev->ipgre.enable)
1150                         return qede_tunn_enable(eth_dev, clss,
1151                                                 conf->tunnel_type,
1152                                                 true);
1153         } else {
1154                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1155                         qdev->vxlan.num_filters--;
1156                 else /*GENEVE*/
1157                         qdev->geneve.num_filters--;
1158
1159                 /* Disable VXLAN if VXLAN filters become 0 */
1160                 if (qdev->vxlan.num_filters == 0 ||
1161                     qdev->geneve.num_filters == 0)
1162                         return qede_tunn_enable(eth_dev, clss,
1163                                                 conf->tunnel_type,
1164                                                 false);
1165         }
1166
1167         return 0;
1168 }
1169
1170 static int
1171 qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
1172                         const struct rte_flow_attr *attr,
1173                         struct rte_flow_error *error)
1174 {
1175         if (attr == NULL) {
1176                 rte_flow_error_set(error, EINVAL,
1177                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1178                                    "NULL attribute");
1179                 return -rte_errno;
1180         }
1181
1182         if (attr->group != 0) {
1183                 rte_flow_error_set(error, ENOTSUP,
1184                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1185                                    "Groups are not supported");
1186                 return -rte_errno;
1187         }
1188
1189         if (attr->priority != 0) {
1190                 rte_flow_error_set(error, ENOTSUP,
1191                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1192                                    "Priorities are not supported");
1193                 return -rte_errno;
1194         }
1195
1196         if (attr->egress != 0) {
1197                 rte_flow_error_set(error, ENOTSUP,
1198                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1199                                    "Egress is not supported");
1200                 return -rte_errno;
1201         }
1202
1203         if (attr->transfer != 0) {
1204                 rte_flow_error_set(error, ENOTSUP,
1205                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1206                                    "Transfer is not supported");
1207                 return -rte_errno;
1208         }
1209
1210         if (attr->ingress == 0) {
1211                 rte_flow_error_set(error, ENOTSUP,
1212                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1213                                    "Only ingress is supported");
1214                 return -rte_errno;
1215         }
1216
1217         return 0;
1218 }
1219
1220 static int
1221 qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
1222                         const struct rte_flow_item pattern[],
1223                         struct rte_flow_error *error,
1224                         struct rte_flow *flow)
1225 {
1226         bool l3 = false, l4 = false;
1227
1228         if (pattern == NULL) {
1229                 rte_flow_error_set(error, EINVAL,
1230                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1231                                    "NULL pattern");
1232                 return -rte_errno;
1233         }
1234
1235         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1236                 if (!pattern->spec) {
1237                         rte_flow_error_set(error, EINVAL,
1238                                            RTE_FLOW_ERROR_TYPE_ITEM,
1239                                            pattern,
1240                                            "Item spec not defined");
1241                         return -rte_errno;
1242                 }
1243
1244                 if (pattern->last) {
1245                         rte_flow_error_set(error, EINVAL,
1246                                            RTE_FLOW_ERROR_TYPE_ITEM,
1247                                            pattern,
1248                                            "Item last not supported");
1249                         return -rte_errno;
1250                 }
1251
1252                 if (pattern->mask) {
1253                         rte_flow_error_set(error, EINVAL,
1254                                            RTE_FLOW_ERROR_TYPE_ITEM,
1255                                            pattern,
1256                                            "Item mask not supported");
1257                         return -rte_errno;
1258                 }
1259
1260                 /* Below validation is only for 4 tuple flow
1261                  * (GFT_PROFILE_TYPE_4_TUPLE)
1262                  * - src and dst L3 address (IPv4 or IPv6)
1263                  * - src and dst L4 port (TCP or UDP)
1264                  */
1265
1266                 switch (pattern->type) {
1267                 case RTE_FLOW_ITEM_TYPE_IPV4:
1268                         l3 = true;
1269
1270                         if (flow) {
1271                                 const struct rte_flow_item_ipv4 *spec;
1272
1273                                 spec = pattern->spec;
1274                                 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
1275                                 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
1276                                 flow->entry.tuple.eth_proto =
1277                                         RTE_ETHER_TYPE_IPV4;
1278                         }
1279                         break;
1280
1281                 case RTE_FLOW_ITEM_TYPE_IPV6:
1282                         l3 = true;
1283
1284                         if (flow) {
1285                                 const struct rte_flow_item_ipv6 *spec;
1286
1287                                 spec = pattern->spec;
1288                                 rte_memcpy(flow->entry.tuple.src_ipv6,
1289                                            spec->hdr.src_addr,
1290                                            IPV6_ADDR_LEN);
1291                                 rte_memcpy(flow->entry.tuple.dst_ipv6,
1292                                            spec->hdr.dst_addr,
1293                                            IPV6_ADDR_LEN);
1294                                 flow->entry.tuple.eth_proto =
1295                                         RTE_ETHER_TYPE_IPV6;
1296                         }
1297                         break;
1298
1299                 case RTE_FLOW_ITEM_TYPE_UDP:
1300                         l4 = true;
1301
1302                         if (flow) {
1303                                 const struct rte_flow_item_udp *spec;
1304
1305                                 spec = pattern->spec;
1306                                 flow->entry.tuple.src_port =
1307                                                 spec->hdr.src_port;
1308                                 flow->entry.tuple.dst_port =
1309                                                 spec->hdr.dst_port;
1310                                 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1311                         }
1312                         break;
1313
1314                 case RTE_FLOW_ITEM_TYPE_TCP:
1315                         l4 = true;
1316
1317                         if (flow) {
1318                                 const struct rte_flow_item_tcp *spec;
1319
1320                                 spec = pattern->spec;
1321                                 flow->entry.tuple.src_port =
1322                                                 spec->hdr.src_port;
1323                                 flow->entry.tuple.dst_port =
1324                                                 spec->hdr.dst_port;
1325                                 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1326                         }
1327
1328                         break;
1329                 default:
1330                         rte_flow_error_set(error, EINVAL,
1331                                            RTE_FLOW_ERROR_TYPE_ITEM,
1332                                            pattern,
1333                                            "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1334                         return -rte_errno;
1335                 }
1336         }
1337
1338         if (!(l3 && l4)) {
1339                 rte_flow_error_set(error, EINVAL,
1340                                    RTE_FLOW_ERROR_TYPE_ITEM,
1341                                    pattern,
1342                                    "Item types need to have both L3 and L4 protocols");
1343                 return -rte_errno;
1344         }
1345
1346         return 0;
1347 }
1348
1349 static int
1350 qede_flow_parse_actions(struct rte_eth_dev *dev,
1351                         const struct rte_flow_action actions[],
1352                         struct rte_flow_error *error,
1353                         struct rte_flow *flow)
1354 {
1355         const struct rte_flow_action_queue *queue;
1356
1357         if (actions == NULL) {
1358                 rte_flow_error_set(error, EINVAL,
1359                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1360                                    "NULL actions");
1361                 return -rte_errno;
1362         }
1363
1364         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1365                 switch (actions->type) {
1366                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1367                         queue = actions->conf;
1368
1369                         if (queue->index >= QEDE_RSS_COUNT(dev)) {
1370                                 rte_flow_error_set(error, EINVAL,
1371                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1372                                                    actions,
1373                                                    "Bad QUEUE action");
1374                                 return -rte_errno;
1375                         }
1376
1377                         if (flow)
1378                                 flow->entry.rx_queue = queue->index;
1379
1380                         break;
1381                 case RTE_FLOW_ACTION_TYPE_DROP:
1382                         if (flow)
1383                                 flow->entry.is_drop = true;
1384                         break;
1385                 default:
1386                         rte_flow_error_set(error, ENOTSUP,
1387                                            RTE_FLOW_ERROR_TYPE_ACTION,
1388                                            actions,
1389                                            "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
1390                         return -rte_errno;
1391                 }
1392         }
1393
1394         return 0;
1395 }
1396
1397 static int
1398 qede_flow_parse(struct rte_eth_dev *dev,
1399                 const struct rte_flow_attr *attr,
1400                 const struct rte_flow_item patterns[],
1401                 const struct rte_flow_action actions[],
1402                 struct rte_flow_error *error,
1403                 struct rte_flow *flow)
1404
1405 {
1406         int rc = 0;
1407
1408         rc = qede_flow_validate_attr(dev, attr, error);
1409         if (rc)
1410                 return rc;
1411
1412         /* parse and validate item pattern and actions.
1413          * Given item list and actions will be translate to qede PMD
1414          * specific arfs structure.
1415          */
1416         rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1417         if (rc)
1418                 return rc;
1419
1420         rc = qede_flow_parse_actions(dev, actions, error, flow);
1421
1422         return rc;
1423 }
1424
1425 static int
1426 qede_flow_validate(struct rte_eth_dev *dev,
1427                    const struct rte_flow_attr *attr,
1428                    const struct rte_flow_item patterns[],
1429                    const struct rte_flow_action actions[],
1430                    struct rte_flow_error *error)
1431 {
1432         return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1433 }
1434
1435 static struct rte_flow *
1436 qede_flow_create(struct rte_eth_dev *dev,
1437                  const struct rte_flow_attr *attr,
1438                  const struct rte_flow_item pattern[],
1439                  const struct rte_flow_action actions[],
1440                  struct rte_flow_error *error)
1441 {
1442         struct rte_flow *flow = NULL;
1443         int rc;
1444
1445         flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1446         if (flow == NULL) {
1447                 rte_flow_error_set(error, ENOMEM,
1448                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1449                                    "Failed to allocate memory");
1450                 return NULL;
1451         }
1452
1453         rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1454         if (rc < 0) {
1455                 rte_free(flow);
1456                 return NULL;
1457         }
1458
1459         rc = qede_config_arfs_filter(dev, &flow->entry, true);
1460         if (rc < 0) {
1461                 rte_flow_error_set(error, rc,
1462                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1463                                    "Failed to configure flow filter");
1464                 rte_free(flow);
1465                 return NULL;
1466         }
1467
1468         return flow;
1469 }
1470
1471 static int
1472 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1473                   struct rte_flow *flow,
1474                   struct rte_flow_error *error)
1475 {
1476         int rc = 0;
1477
1478         rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1479         if (rc < 0) {
1480                 rte_flow_error_set(error, rc,
1481                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1482                                    "Failed to delete flow filter");
1483                 rte_free(flow);
1484         }
1485
1486         return rc;
1487 }
1488
1489 const struct rte_flow_ops qede_flow_ops = {
1490         .validate = qede_flow_validate,
1491         .create = qede_flow_create,
1492         .destroy = qede_flow_destroy,
1493 };
1494
1495 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1496                          enum rte_filter_type filter_type,
1497                          enum rte_filter_op filter_op,
1498                          void *arg)
1499 {
1500         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1501         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1502         struct rte_eth_tunnel_filter_conf *filter_conf =
1503                         (struct rte_eth_tunnel_filter_conf *)arg;
1504
1505         switch (filter_type) {
1506         case RTE_ETH_FILTER_TUNNEL:
1507                 switch (filter_conf->tunnel_type) {
1508                 case RTE_TUNNEL_TYPE_VXLAN:
1509                 case RTE_TUNNEL_TYPE_GENEVE:
1510                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1511                         DP_INFO(edev,
1512                                 "Packet steering to the specified Rx queue"
1513                                 " is not supported with UDP tunneling");
1514                         return(qede_tunn_filter_config(eth_dev, filter_op,
1515                                                       filter_conf));
1516                 case RTE_TUNNEL_TYPE_TEREDO:
1517                 case RTE_TUNNEL_TYPE_NVGRE:
1518                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1519                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1520                                 filter_conf->tunnel_type);
1521                         return -EINVAL;
1522                 case RTE_TUNNEL_TYPE_NONE:
1523                 default:
1524                         return 0;
1525                 }
1526                 break;
1527         case RTE_ETH_FILTER_FDIR:
1528                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1529         case RTE_ETH_FILTER_NTUPLE:
1530                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1531         case RTE_ETH_FILTER_GENERIC:
1532                 if (ECORE_IS_CMT(edev)) {
1533                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1534                         return -ENOTSUP;
1535                 }
1536
1537                 if (filter_op != RTE_ETH_FILTER_GET)
1538                         return -EINVAL;
1539
1540                 *(const void **)arg = &qede_flow_ops;
1541                 return 0;
1542         case RTE_ETH_FILTER_MACVLAN:
1543         case RTE_ETH_FILTER_ETHERTYPE:
1544         case RTE_ETH_FILTER_FLEXIBLE:
1545         case RTE_ETH_FILTER_SYN:
1546         case RTE_ETH_FILTER_HASH:
1547         case RTE_ETH_FILTER_L2_TUNNEL:
1548         case RTE_ETH_FILTER_MAX:
1549         default:
1550                 DP_ERR(edev, "Unsupported filter type %d\n",
1551                         filter_type);
1552                 return -EINVAL;
1553         }
1554
1555         return 0;
1556 }