net: add rte prefix to ether structures
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12
13 #include "qede_ethdev.h"
14
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17         uint16_t rte_filter_type;
18         enum ecore_filter_ucast_type qede_type;
19         enum ecore_tunn_clss qede_tunn_clss;
20         const char *string;
21 } qede_tunn_types[] = {
22         {
23                 ETH_TUNNEL_FILTER_OMAC,
24                 ECORE_FILTER_MAC,
25                 ECORE_TUNN_CLSS_MAC_VLAN,
26                 "outer-mac"
27         },
28         {
29                 ETH_TUNNEL_FILTER_TENID,
30                 ECORE_FILTER_VNI,
31                 ECORE_TUNN_CLSS_MAC_VNI,
32                 "vni"
33         },
34         {
35                 ETH_TUNNEL_FILTER_IMAC,
36                 ECORE_FILTER_INNER_MAC,
37                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38                 "inner-mac"
39         },
40         {
41                 ETH_TUNNEL_FILTER_IVLAN,
42                 ECORE_FILTER_INNER_VLAN,
43                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44                 "inner-vlan"
45         },
46         {
47                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48                 ECORE_FILTER_MAC_VNI_PAIR,
49                 ECORE_TUNN_CLSS_MAC_VNI,
50                 "outer-mac and vni"
51         },
52         {
53                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
54                 ECORE_FILTER_UNUSED,
55                 MAX_ECORE_TUNN_CLSS,
56                 "outer-mac and inner-mac"
57         },
58         {
59                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
60                 ECORE_FILTER_UNUSED,
61                 MAX_ECORE_TUNN_CLSS,
62                 "outer-mac and inner-vlan"
63         },
64         {
65                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
68                 "vni and inner-mac",
69         },
70         {
71                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
72                 ECORE_FILTER_UNUSED,
73                 MAX_ECORE_TUNN_CLSS,
74                 "vni and inner-vlan",
75         },
76         {
77                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78                 ECORE_FILTER_INNER_PAIR,
79                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80                 "inner-mac and inner-vlan",
81         },
82         {
83                 ETH_TUNNEL_FILTER_OIP,
84                 ECORE_FILTER_UNUSED,
85                 MAX_ECORE_TUNN_CLSS,
86                 "outer-IP"
87         },
88         {
89                 ETH_TUNNEL_FILTER_IIP,
90                 ECORE_FILTER_UNUSED,
91                 MAX_ECORE_TUNN_CLSS,
92                 "inner-IP"
93         },
94         {
95                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
96                 ECORE_FILTER_UNUSED,
97                 MAX_ECORE_TUNN_CLSS,
98                 "IMAC_IVLAN"
99         },
100         {
101                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102                 ECORE_FILTER_UNUSED,
103                 MAX_ECORE_TUNN_CLSS,
104                 "IMAC_IVLAN_TENID"
105         },
106         {
107                 RTE_TUNNEL_FILTER_IMAC_TENID,
108                 ECORE_FILTER_UNUSED,
109                 MAX_ECORE_TUNN_CLSS,
110                 "IMAC_TENID"
111         },
112         {
113                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
114                 ECORE_FILTER_UNUSED,
115                 MAX_ECORE_TUNN_CLSS,
116                 "OMAC_TENID_IMAC"
117         },
118 };
119
120 #define IP_VERSION                              (0x40)
121 #define IP_HDRLEN                               (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
132
133 static inline bool qede_valid_flow(uint16_t flow_type)
134 {
135         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
139 }
140
141 static uint16_t
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143                         struct qede_arfs_entry *arfs,
144                         void *buff,
145                         struct ecore_arfs_config_params *params);
146
147 /* Note: Flowdir support is only partial.
148  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149  * Parameters like pballoc/status fields are irrelevant here.
150  */
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
152 {
153         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
156
157         /* check FDIR modes */
158         switch (fdir->mode) {
159         case RTE_FDIR_MODE_NONE:
160                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161                 DP_INFO(edev, "flowdir is disabled\n");
162         break;
163         case RTE_FDIR_MODE_PERFECT:
164                 if (ECORE_IS_CMT(edev)) {
165                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166                         qdev->arfs_info.arfs.mode =
167                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
168                         return -ENOTSUP;
169                 }
170                 qdev->arfs_info.arfs.mode =
171                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
173         break;
174         case RTE_FDIR_MODE_PERFECT_TUNNEL:
175         case RTE_FDIR_MODE_SIGNATURE:
176         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
178                 return -ENOTSUP;
179         }
180
181         return 0;
182 }
183
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
185 {
186         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187         struct qede_arfs_entry *tmp = NULL;
188
189         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
190                 if (tmp) {
191                         if (tmp->mz)
192                                 rte_memzone_free(tmp->mz);
193                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194                                      qede_arfs_entry, list);
195                         rte_free(tmp);
196                 }
197         }
198 }
199
200 static int
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202                          struct rte_eth_fdir_filter *fdir,
203                          struct qede_arfs_entry *arfs)
204 {
205         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207         struct rte_eth_fdir_input *input;
208
209         static const uint8_t next_proto[] = {
210                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
214         };
215
216         input = &fdir->input;
217
218         DP_INFO(edev, "flow_type %d\n", input->flow_type);
219
220         switch (input->flow_type) {
221         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223                 /* fill the common ip header */
224                 arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
225                 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226                 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227                 arfs->tuple.ip_proto = next_proto[input->flow_type];
228
229                 /* UDP */
230                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231                         arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232                         arfs->tuple.src_port = input->flow.udp4_flow.src_port;
233                 } else { /* TCP */
234                         arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235                         arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
236                 }
237                 break;
238         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240                 arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
241                 arfs->tuple.ip_proto = next_proto[input->flow_type];
242                 rte_memcpy(arfs->tuple.dst_ipv6,
243                            &input->flow.ipv6_flow.dst_ip,
244                            IPV6_ADDR_LEN);
245                 rte_memcpy(arfs->tuple.src_ipv6,
246                            &input->flow.ipv6_flow.src_ip,
247                            IPV6_ADDR_LEN);
248
249                 /* UDP */
250                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251                         arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252                         arfs->tuple.src_port = input->flow.udp6_flow.src_port;
253                 } else { /* TCP */
254                         arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255                         arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
256                 }
257                 break;
258         default:
259                 DP_ERR(edev, "Unsupported flow_type %u\n",
260                        input->flow_type);
261                 return -ENOTSUP;
262         }
263
264         arfs->rx_queue = fdir->action.rx_queue;
265         return 0;
266 }
267
268 static int
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270                         struct qede_arfs_entry *arfs,
271                         bool add)
272 {
273         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
276         struct qede_arfs_entry *tmp = NULL;
277         const struct rte_memzone *mz;
278         struct ecore_hwfn *p_hwfn;
279         enum _ecore_status_t rc;
280         uint16_t pkt_len;
281         void *pkt;
282
283         if (add) {
284                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
285                         DP_ERR(edev, "Reached max flowdir filter limit\n");
286                         return -EINVAL;
287                 }
288         }
289
290         /* soft_id could have been used as memzone string, but soft_id is
291          * not currently used so it has no significance.
292          */
293         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
294                  (unsigned long)rte_get_timer_cycles());
295         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
296                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
297         if (!mz) {
298                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
299                        rte_strerror(rte_errno));
300                 return -rte_errno;
301         }
302
303         pkt = mz->addr;
304         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
305         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
306                                           &qdev->arfs_info.arfs);
307         if (pkt_len == 0) {
308                 rc = -EINVAL;
309                 goto err1;
310         }
311
312         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
313         if (add) {
314                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
315                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
316                                 DP_INFO(edev, "flowdir filter exist\n");
317                                 rc = -EEXIST;
318                                 goto err1;
319                         }
320                 }
321         } else {
322                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
323                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
324                                 break;
325                 }
326                 if (!tmp) {
327                         DP_ERR(edev, "flowdir filter does not exist\n");
328                         rc = -EEXIST;
329                         goto err1;
330                 }
331         }
332         p_hwfn = ECORE_LEADING_HWFN(edev);
333         if (add) {
334                 if (qdev->arfs_info.arfs.mode ==
335                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
336                         /* Force update */
337                         eth_dev->data->dev_conf.fdir_conf.mode =
338                                                 RTE_FDIR_MODE_PERFECT;
339                         qdev->arfs_info.arfs.mode =
340                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
341                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
342                 }
343                 /* Enable ARFS searcher with updated flow_types */
344                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
345                                           &qdev->arfs_info.arfs);
346         }
347         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
348         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
349                                                (dma_addr_t)mz->iova,
350                                                pkt_len,
351                                                arfs->rx_queue,
352                                                0, add);
353         if (rc == ECORE_SUCCESS) {
354                 if (add) {
355                         arfs->pkt_len = pkt_len;
356                         arfs->mz = mz;
357                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
358                                           arfs, list);
359                         qdev->arfs_info.filter_count++;
360                         DP_INFO(edev, "flowdir filter added, count = %d\n",
361                                 qdev->arfs_info.filter_count);
362                 } else {
363                         rte_memzone_free(tmp->mz);
364                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
365                                      qede_arfs_entry, list);
366                         rte_free(tmp); /* the node deleted */
367                         rte_memzone_free(mz); /* temp node allocated */
368                         qdev->arfs_info.filter_count--;
369                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
370                                 qdev->arfs_info.filter_count);
371                 }
372         } else {
373                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
374                        rc, qdev->arfs_info.filter_count);
375         }
376
377         /* Disable ARFS searcher if there are no more filters */
378         if (qdev->arfs_info.filter_count == 0) {
379                 memset(&qdev->arfs_info.arfs, 0,
380                        sizeof(struct ecore_arfs_config_params));
381                 DP_INFO(edev, "Disabling flowdir\n");
382                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
383                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
384                                           &qdev->arfs_info.arfs);
385         }
386         return 0;
387
388 err1:
389         rte_memzone_free(mz);
390         return rc;
391 }
392
393 static int
394 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
395                             struct rte_eth_fdir_filter *fdir_filter,
396                             bool add)
397 {
398         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
399         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
400         struct qede_arfs_entry *arfs = NULL;
401         int rc = 0;
402
403         arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
404                                   RTE_CACHE_LINE_SIZE);
405         if (!arfs) {
406                 DP_ERR(edev, "Did not allocate memory for arfs\n");
407                 return -ENOMEM;
408         }
409
410         rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
411         if (rc < 0)
412                 return rc;
413
414         rc = qede_config_arfs_filter(eth_dev, arfs, add);
415         if (rc < 0)
416                 rte_free(arfs);
417
418         return rc;
419 }
420
421 static int
422 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
423                      struct rte_eth_fdir_filter *fdir,
424                      bool add)
425 {
426         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
427         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
428
429         if (!qede_valid_flow(fdir->input.flow_type)) {
430                 DP_ERR(edev, "invalid flow_type input\n");
431                 return -EINVAL;
432         }
433
434         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
435                 DP_ERR(edev, "invalid queue number %u\n",
436                        fdir->action.rx_queue);
437                 return -EINVAL;
438         }
439
440         if (fdir->input.flow_ext.is_vf) {
441                 DP_ERR(edev, "flowdir is not supported over VF\n");
442                 return -EINVAL;
443         }
444
445         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
446 }
447
448 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
449 static uint16_t
450 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
451                         struct qede_arfs_entry *arfs,
452                         void *buff,
453                         struct ecore_arfs_config_params *params)
454
455 {
456         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
457         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
458         uint16_t *ether_type;
459         uint8_t *raw_pkt;
460         struct ipv4_hdr *ip;
461         struct ipv6_hdr *ip6;
462         struct udp_hdr *udp;
463         struct tcp_hdr *tcp;
464         uint16_t len;
465
466         raw_pkt = (uint8_t *)buff;
467
468         len =  2 * sizeof(struct rte_ether_addr);
469         raw_pkt += 2 * sizeof(struct rte_ether_addr);
470         ether_type = (uint16_t *)raw_pkt;
471         raw_pkt += sizeof(uint16_t);
472         len += sizeof(uint16_t);
473
474         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
475         switch (arfs->tuple.eth_proto) {
476         case ETHER_TYPE_IPv4:
477                 ip = (struct ipv4_hdr *)raw_pkt;
478                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
479                 ip->total_length = sizeof(struct ipv4_hdr);
480                 ip->next_proto_id = arfs->tuple.ip_proto;
481                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
482                 ip->dst_addr = arfs->tuple.dst_ipv4;
483                 ip->src_addr = arfs->tuple.src_ipv4;
484                 len += sizeof(struct ipv4_hdr);
485                 params->ipv4 = true;
486
487                 raw_pkt = (uint8_t *)buff;
488                 /* UDP */
489                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
490                         udp = (struct udp_hdr *)(raw_pkt + len);
491                         udp->dst_port = arfs->tuple.dst_port;
492                         udp->src_port = arfs->tuple.src_port;
493                         udp->dgram_len = sizeof(struct udp_hdr);
494                         len += sizeof(struct udp_hdr);
495                         /* adjust ip total_length */
496                         ip->total_length += sizeof(struct udp_hdr);
497                         params->udp = true;
498                 } else { /* TCP */
499                         tcp = (struct tcp_hdr *)(raw_pkt + len);
500                         tcp->src_port = arfs->tuple.src_port;
501                         tcp->dst_port = arfs->tuple.dst_port;
502                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
503                         len += sizeof(struct tcp_hdr);
504                         /* adjust ip total_length */
505                         ip->total_length += sizeof(struct tcp_hdr);
506                         params->tcp = true;
507                 }
508                 break;
509         case ETHER_TYPE_IPv6:
510                 ip6 = (struct ipv6_hdr *)raw_pkt;
511                 ip6->proto = arfs->tuple.ip_proto;
512                 ip6->vtc_flow =
513                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
514
515                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
516                            IPV6_ADDR_LEN);
517                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
518                            IPV6_ADDR_LEN);
519                 len += sizeof(struct ipv6_hdr);
520                 params->ipv6 = true;
521
522                 raw_pkt = (uint8_t *)buff;
523                 /* UDP */
524                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
525                         udp = (struct udp_hdr *)(raw_pkt + len);
526                         udp->src_port = arfs->tuple.src_port;
527                         udp->dst_port = arfs->tuple.dst_port;
528                         len += sizeof(struct udp_hdr);
529                         params->udp = true;
530                 } else { /* TCP */
531                         tcp = (struct tcp_hdr *)(raw_pkt + len);
532                         tcp->src_port = arfs->tuple.src_port;
533                         tcp->dst_port = arfs->tuple.dst_port;
534                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
535                         len += sizeof(struct tcp_hdr);
536                         params->tcp = true;
537                 }
538                 break;
539         default:
540                 DP_ERR(edev, "Unsupported eth_proto %u\n",
541                        arfs->tuple.eth_proto);
542                 return 0;
543         }
544
545         return len;
546 }
547
548 static int
549 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
550                       enum rte_filter_op filter_op,
551                       void *arg)
552 {
553         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
554         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
555         struct rte_eth_fdir_filter *fdir;
556         int ret;
557
558         fdir = (struct rte_eth_fdir_filter *)arg;
559         switch (filter_op) {
560         case RTE_ETH_FILTER_NOP:
561                 /* Typically used to query flowdir support */
562                 if (ECORE_IS_CMT(edev)) {
563                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
564                         return -ENOTSUP;
565                 }
566                 return 0; /* means supported */
567         case RTE_ETH_FILTER_ADD:
568                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
569         break;
570         case RTE_ETH_FILTER_DELETE:
571                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
572         break;
573         case RTE_ETH_FILTER_FLUSH:
574         case RTE_ETH_FILTER_UPDATE:
575         case RTE_ETH_FILTER_INFO:
576                 return -ENOTSUP;
577         break;
578         default:
579                 DP_ERR(edev, "unknown operation %u", filter_op);
580                 ret = -EINVAL;
581         }
582
583         return ret;
584 }
585
586 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
587                             enum rte_filter_op filter_op,
588                             void *arg)
589 {
590         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
591         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
592         struct rte_eth_ntuple_filter *ntuple;
593         struct rte_eth_fdir_filter fdir_entry;
594         struct rte_eth_tcpv4_flow *tcpv4_flow;
595         struct rte_eth_udpv4_flow *udpv4_flow;
596         bool add = false;
597
598         switch (filter_op) {
599         case RTE_ETH_FILTER_NOP:
600                 /* Typically used to query fdir support */
601                 if (ECORE_IS_CMT(edev)) {
602                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
603                         return -ENOTSUP;
604                 }
605                 return 0; /* means supported */
606         case RTE_ETH_FILTER_ADD:
607                 add = true;
608         break;
609         case RTE_ETH_FILTER_DELETE:
610         break;
611         case RTE_ETH_FILTER_INFO:
612         case RTE_ETH_FILTER_GET:
613         case RTE_ETH_FILTER_UPDATE:
614         case RTE_ETH_FILTER_FLUSH:
615         case RTE_ETH_FILTER_SET:
616         case RTE_ETH_FILTER_STATS:
617         case RTE_ETH_FILTER_OP_MAX:
618                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
619                 return -ENOTSUP;
620         }
621         ntuple = (struct rte_eth_ntuple_filter *)arg;
622         /* Internally convert ntuple to fdir entry */
623         memset(&fdir_entry, 0, sizeof(fdir_entry));
624         if (ntuple->proto == IPPROTO_TCP) {
625                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
626                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
627                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
628                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
629                 tcpv4_flow->ip.proto = IPPROTO_TCP;
630                 tcpv4_flow->src_port = ntuple->src_port;
631                 tcpv4_flow->dst_port = ntuple->dst_port;
632         } else {
633                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
634                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
635                 udpv4_flow->ip.src_ip = ntuple->src_ip;
636                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
637                 udpv4_flow->ip.proto = IPPROTO_TCP;
638                 udpv4_flow->src_port = ntuple->src_port;
639                 udpv4_flow->dst_port = ntuple->dst_port;
640         }
641
642         fdir_entry.action.rx_queue = ntuple->queue;
643
644         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
645 }
646
647 static int
648 qede_tunnel_update(struct qede_dev *qdev,
649                    struct ecore_tunnel_info *tunn_info)
650 {
651         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
652         enum _ecore_status_t rc = ECORE_INVAL;
653         struct ecore_hwfn *p_hwfn;
654         struct ecore_ptt *p_ptt;
655         int i;
656
657         for_each_hwfn(edev, i) {
658                 p_hwfn = &edev->hwfns[i];
659                 if (IS_PF(edev)) {
660                         p_ptt = ecore_ptt_acquire(p_hwfn);
661                         if (!p_ptt) {
662                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
663                                 return -EAGAIN;
664                         }
665                 } else {
666                         p_ptt = NULL;
667                 }
668
669                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
670                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
671                 if (IS_PF(edev))
672                         ecore_ptt_release(p_hwfn, p_ptt);
673
674                 if (rc != ECORE_SUCCESS)
675                         break;
676         }
677
678         return rc;
679 }
680
681 static int
682 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
683                   bool enable)
684 {
685         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
686         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
687         enum _ecore_status_t rc = ECORE_INVAL;
688         struct ecore_tunnel_info tunn;
689
690         if (qdev->vxlan.enable == enable)
691                 return ECORE_SUCCESS;
692
693         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
694         tunn.vxlan.b_update_mode = true;
695         tunn.vxlan.b_mode_enabled = enable;
696         tunn.b_update_rx_cls = true;
697         tunn.b_update_tx_cls = true;
698         tunn.vxlan.tun_cls = clss;
699
700         tunn.vxlan_port.b_update_port = true;
701         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
702
703         rc = qede_tunnel_update(qdev, &tunn);
704         if (rc == ECORE_SUCCESS) {
705                 qdev->vxlan.enable = enable;
706                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
707                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
708                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
709         } else {
710                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
711                        tunn.vxlan.tun_cls);
712         }
713
714         return rc;
715 }
716
717 static int
718 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
719                   bool enable)
720 {
721         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
722         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
723         enum _ecore_status_t rc = ECORE_INVAL;
724         struct ecore_tunnel_info tunn;
725
726         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
727         tunn.l2_geneve.b_update_mode = true;
728         tunn.l2_geneve.b_mode_enabled = enable;
729         tunn.ip_geneve.b_update_mode = true;
730         tunn.ip_geneve.b_mode_enabled = enable;
731         tunn.l2_geneve.tun_cls = clss;
732         tunn.ip_geneve.tun_cls = clss;
733         tunn.b_update_rx_cls = true;
734         tunn.b_update_tx_cls = true;
735
736         tunn.geneve_port.b_update_port = true;
737         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
738
739         rc = qede_tunnel_update(qdev, &tunn);
740         if (rc == ECORE_SUCCESS) {
741                 qdev->geneve.enable = enable;
742                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
743                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
744                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
745         } else {
746                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
747                        clss);
748         }
749
750         return rc;
751 }
752
753 static int
754 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
755                   bool enable)
756 {
757         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
758         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
759         enum _ecore_status_t rc = ECORE_INVAL;
760         struct ecore_tunnel_info tunn;
761
762         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
763         tunn.ip_gre.b_update_mode = true;
764         tunn.ip_gre.b_mode_enabled = enable;
765         tunn.ip_gre.tun_cls = clss;
766         tunn.ip_gre.tun_cls = clss;
767         tunn.b_update_rx_cls = true;
768         tunn.b_update_tx_cls = true;
769
770         rc = qede_tunnel_update(qdev, &tunn);
771         if (rc == ECORE_SUCCESS) {
772                 qdev->ipgre.enable = enable;
773                 DP_INFO(edev, "IPGRE is %s\n",
774                         enable ? "enabled" : "disabled");
775         } else {
776                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
777                        clss);
778         }
779
780         return rc;
781 }
782
783 int
784 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
785                       struct rte_eth_udp_tunnel *tunnel_udp)
786 {
787         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
788         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
789         struct ecore_tunnel_info tunn; /* @DPDK */
790         uint16_t udp_port;
791         int rc;
792
793         PMD_INIT_FUNC_TRACE(edev);
794
795         memset(&tunn, 0, sizeof(tunn));
796
797         switch (tunnel_udp->prot_type) {
798         case RTE_TUNNEL_TYPE_VXLAN:
799                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
800                         DP_ERR(edev, "UDP port %u doesn't exist\n",
801                                 tunnel_udp->udp_port);
802                         return ECORE_INVAL;
803                 }
804                 udp_port = 0;
805
806                 tunn.vxlan_port.b_update_port = true;
807                 tunn.vxlan_port.port = udp_port;
808
809                 rc = qede_tunnel_update(qdev, &tunn);
810                 if (rc != ECORE_SUCCESS) {
811                         DP_ERR(edev, "Unable to config UDP port %u\n",
812                                tunn.vxlan_port.port);
813                         return rc;
814                 }
815
816                 qdev->vxlan.udp_port = udp_port;
817                 /* If the request is to delete UDP port and if the number of
818                  * VXLAN filters have reached 0 then VxLAN offload can be be
819                  * disabled.
820                  */
821                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
822                         return qede_vxlan_enable(eth_dev,
823                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
824
825                 break;
826         case RTE_TUNNEL_TYPE_GENEVE:
827                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
828                         DP_ERR(edev, "UDP port %u doesn't exist\n",
829                                 tunnel_udp->udp_port);
830                         return ECORE_INVAL;
831                 }
832
833                 udp_port = 0;
834
835                 tunn.geneve_port.b_update_port = true;
836                 tunn.geneve_port.port = udp_port;
837
838                 rc = qede_tunnel_update(qdev, &tunn);
839                 if (rc != ECORE_SUCCESS) {
840                         DP_ERR(edev, "Unable to config UDP port %u\n",
841                                tunn.vxlan_port.port);
842                         return rc;
843                 }
844
845                 qdev->vxlan.udp_port = udp_port;
846                 /* If the request is to delete UDP port and if the number of
847                  * GENEVE filters have reached 0 then GENEVE offload can be be
848                  * disabled.
849                  */
850                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
851                         return qede_geneve_enable(eth_dev,
852                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
853
854                 break;
855
856         default:
857                 return ECORE_INVAL;
858         }
859
860         return 0;
861 }
862
863 int
864 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
865                       struct rte_eth_udp_tunnel *tunnel_udp)
866 {
867         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
868         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
869         struct ecore_tunnel_info tunn; /* @DPDK */
870         uint16_t udp_port;
871         int rc;
872
873         PMD_INIT_FUNC_TRACE(edev);
874
875         memset(&tunn, 0, sizeof(tunn));
876
877         switch (tunnel_udp->prot_type) {
878         case RTE_TUNNEL_TYPE_VXLAN:
879                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
880                         DP_INFO(edev,
881                                 "UDP port %u for VXLAN was already configured\n",
882                                 tunnel_udp->udp_port);
883                         return ECORE_SUCCESS;
884                 }
885
886                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
887                  * it was not enabled while adding VXLAN filter before UDP port
888                  * update.
889                  */
890                 if (!qdev->vxlan.enable) {
891                         rc = qede_vxlan_enable(eth_dev,
892                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
893                         if (rc != ECORE_SUCCESS) {
894                                 DP_ERR(edev, "Failed to enable VXLAN "
895                                         "prior to updating UDP port\n");
896                                 return rc;
897                         }
898                 }
899                 udp_port = tunnel_udp->udp_port;
900
901                 tunn.vxlan_port.b_update_port = true;
902                 tunn.vxlan_port.port = udp_port;
903
904                 rc = qede_tunnel_update(qdev, &tunn);
905                 if (rc != ECORE_SUCCESS) {
906                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
907                                udp_port);
908                         return rc;
909                 }
910
911                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
912
913                 qdev->vxlan.udp_port = udp_port;
914                 break;
915         case RTE_TUNNEL_TYPE_GENEVE:
916                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
917                         DP_INFO(edev,
918                                 "UDP port %u for GENEVE was already configured\n",
919                                 tunnel_udp->udp_port);
920                         return ECORE_SUCCESS;
921                 }
922
923                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
924                  * it was not enabled while adding GENEVE filter before UDP port
925                  * update.
926                  */
927                 if (!qdev->geneve.enable) {
928                         rc = qede_geneve_enable(eth_dev,
929                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
930                         if (rc != ECORE_SUCCESS) {
931                                 DP_ERR(edev, "Failed to enable GENEVE "
932                                         "prior to updating UDP port\n");
933                                 return rc;
934                         }
935                 }
936                 udp_port = tunnel_udp->udp_port;
937
938                 tunn.geneve_port.b_update_port = true;
939                 tunn.geneve_port.port = udp_port;
940
941                 rc = qede_tunnel_update(qdev, &tunn);
942                 if (rc != ECORE_SUCCESS) {
943                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
944                                udp_port);
945                         return rc;
946                 }
947
948                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
949
950                 qdev->geneve.udp_port = udp_port;
951                 break;
952         default:
953                 return ECORE_INVAL;
954         }
955
956         return 0;
957 }
958
959 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
960                                        uint32_t *clss, char *str)
961 {
962         uint16_t j;
963         *clss = MAX_ECORE_TUNN_CLSS;
964
965         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
966                 if (filter == qede_tunn_types[j].rte_filter_type) {
967                         *type = qede_tunn_types[j].qede_type;
968                         *clss = qede_tunn_types[j].qede_tunn_clss;
969                         strcpy(str, qede_tunn_types[j].string);
970                         return;
971                 }
972         }
973 }
974
975 static int
976 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
977                               const struct rte_eth_tunnel_filter_conf *conf,
978                               uint32_t type)
979 {
980         /* Init commmon ucast params first */
981         qede_set_ucast_cmn_params(ucast);
982
983         /* Copy out the required fields based on classification type */
984         ucast->type = type;
985
986         switch (type) {
987         case ECORE_FILTER_VNI:
988                 ucast->vni = conf->tenant_id;
989         break;
990         case ECORE_FILTER_INNER_VLAN:
991                 ucast->vlan = conf->inner_vlan;
992         break;
993         case ECORE_FILTER_MAC:
994                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
995                        ETHER_ADDR_LEN);
996         break;
997         case ECORE_FILTER_INNER_MAC:
998                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
999                        ETHER_ADDR_LEN);
1000         break;
1001         case ECORE_FILTER_MAC_VNI_PAIR:
1002                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1003                         ETHER_ADDR_LEN);
1004                 ucast->vni = conf->tenant_id;
1005         break;
1006         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1007                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1008                         ETHER_ADDR_LEN);
1009                 ucast->vni = conf->tenant_id;
1010         break;
1011         case ECORE_FILTER_INNER_PAIR:
1012                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1013                         ETHER_ADDR_LEN);
1014                 ucast->vlan = conf->inner_vlan;
1015         break;
1016         default:
1017                 return -EINVAL;
1018         }
1019
1020         return ECORE_SUCCESS;
1021 }
1022
1023 static int
1024 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1025                          const struct rte_eth_tunnel_filter_conf *conf,
1026                          __attribute__((unused)) enum rte_filter_op filter_op,
1027                          enum ecore_tunn_clss *clss,
1028                          bool add)
1029 {
1030         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1031         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1032         struct ecore_filter_ucast ucast = {0};
1033         enum ecore_filter_ucast_type type;
1034         uint16_t filter_type = 0;
1035         char str[80];
1036         int rc;
1037
1038         filter_type = conf->filter_type;
1039         /* Determine if the given filter classification is supported */
1040         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
1041         if (*clss == MAX_ECORE_TUNN_CLSS) {
1042                 DP_ERR(edev, "Unsupported filter type\n");
1043                 return -EINVAL;
1044         }
1045         /* Init tunnel ucast params */
1046         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1047         if (rc != ECORE_SUCCESS) {
1048                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
1049                 conf->filter_type);
1050                 return rc;
1051         }
1052         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1053                 str, filter_op, ucast.type);
1054
1055         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1056
1057         /* Skip MAC/VLAN if filter is based on VNI */
1058         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1059                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1060                 if (rc == 0 && add) {
1061                         /* Enable accept anyvlan */
1062                         qede_config_accept_any_vlan(qdev, true);
1063                 }
1064         } else {
1065                 rc = qede_ucast_filter(eth_dev, &ucast, add);
1066                 if (rc == 0)
1067                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1068                                             ECORE_SPQ_MODE_CB, NULL);
1069         }
1070
1071         return rc;
1072 }
1073
1074 static int
1075 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1076                  enum rte_eth_tunnel_type tunn_type, bool enable)
1077 {
1078         int rc = -EINVAL;
1079
1080         switch (tunn_type) {
1081         case RTE_TUNNEL_TYPE_VXLAN:
1082                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1083                 break;
1084         case RTE_TUNNEL_TYPE_GENEVE:
1085                 rc = qede_geneve_enable(eth_dev, clss, enable);
1086                 break;
1087         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1088                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1089                 break;
1090         default:
1091                 rc = -EINVAL;
1092                 break;
1093         }
1094
1095         return rc;
1096 }
1097
1098 static int
1099 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1100                         enum rte_filter_op filter_op,
1101                         const struct rte_eth_tunnel_filter_conf *conf)
1102 {
1103         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1105         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1106         bool add;
1107         int rc;
1108
1109         PMD_INIT_FUNC_TRACE(edev);
1110
1111         switch (filter_op) {
1112         case RTE_ETH_FILTER_ADD:
1113                 add = true;
1114                 break;
1115         case RTE_ETH_FILTER_DELETE:
1116                 add = false;
1117                 break;
1118         default:
1119                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1120                 return -EINVAL;
1121         }
1122
1123         if (IS_VF(edev))
1124                 return qede_tunn_enable(eth_dev,
1125                                         ECORE_TUNN_CLSS_MAC_VLAN,
1126                                         conf->tunnel_type, add);
1127
1128         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1129         if (rc != ECORE_SUCCESS)
1130                 return rc;
1131
1132         if (add) {
1133                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1134                         qdev->vxlan.num_filters++;
1135                         qdev->vxlan.filter_type = conf->filter_type;
1136                 } else { /* GENEVE */
1137                         qdev->geneve.num_filters++;
1138                         qdev->geneve.filter_type = conf->filter_type;
1139                 }
1140
1141                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1142                     !qdev->ipgre.enable)
1143                         return qede_tunn_enable(eth_dev, clss,
1144                                                 conf->tunnel_type,
1145                                                 true);
1146         } else {
1147                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1148                         qdev->vxlan.num_filters--;
1149                 else /*GENEVE*/
1150                         qdev->geneve.num_filters--;
1151
1152                 /* Disable VXLAN if VXLAN filters become 0 */
1153                 if (qdev->vxlan.num_filters == 0 ||
1154                     qdev->geneve.num_filters == 0)
1155                         return qede_tunn_enable(eth_dev, clss,
1156                                                 conf->tunnel_type,
1157                                                 false);
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int
1164 qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
1165                         const struct rte_flow_attr *attr,
1166                         struct rte_flow_error *error)
1167 {
1168         if (attr == NULL) {
1169                 rte_flow_error_set(error, EINVAL,
1170                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1171                                    "NULL attribute");
1172                 return -rte_errno;
1173         }
1174
1175         if (attr->group != 0) {
1176                 rte_flow_error_set(error, ENOTSUP,
1177                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1178                                    "Groups are not supported");
1179                 return -rte_errno;
1180         }
1181
1182         if (attr->priority != 0) {
1183                 rte_flow_error_set(error, ENOTSUP,
1184                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1185                                    "Priorities are not supported");
1186                 return -rte_errno;
1187         }
1188
1189         if (attr->egress != 0) {
1190                 rte_flow_error_set(error, ENOTSUP,
1191                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1192                                    "Egress is not supported");
1193                 return -rte_errno;
1194         }
1195
1196         if (attr->transfer != 0) {
1197                 rte_flow_error_set(error, ENOTSUP,
1198                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1199                                    "Transfer is not supported");
1200                 return -rte_errno;
1201         }
1202
1203         if (attr->ingress == 0) {
1204                 rte_flow_error_set(error, ENOTSUP,
1205                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1206                                    "Only ingress is supported");
1207                 return -rte_errno;
1208         }
1209
1210         return 0;
1211 }
1212
1213 static int
1214 qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
1215                         const struct rte_flow_item pattern[],
1216                         struct rte_flow_error *error,
1217                         struct rte_flow *flow)
1218 {
1219         bool l3 = false, l4 = false;
1220
1221         if (pattern == NULL) {
1222                 rte_flow_error_set(error, EINVAL,
1223                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1224                                    "NULL pattern");
1225                 return -rte_errno;
1226         }
1227
1228         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1229                 if (!pattern->spec) {
1230                         rte_flow_error_set(error, EINVAL,
1231                                            RTE_FLOW_ERROR_TYPE_ITEM,
1232                                            pattern,
1233                                            "Item spec not defined");
1234                         return -rte_errno;
1235                 }
1236
1237                 if (pattern->last) {
1238                         rte_flow_error_set(error, EINVAL,
1239                                            RTE_FLOW_ERROR_TYPE_ITEM,
1240                                            pattern,
1241                                            "Item last not supported");
1242                         return -rte_errno;
1243                 }
1244
1245                 if (pattern->mask) {
1246                         rte_flow_error_set(error, EINVAL,
1247                                            RTE_FLOW_ERROR_TYPE_ITEM,
1248                                            pattern,
1249                                            "Item mask not supported");
1250                         return -rte_errno;
1251                 }
1252
1253                 /* Below validation is only for 4 tuple flow
1254                  * (GFT_PROFILE_TYPE_4_TUPLE)
1255                  * - src and dst L3 address (IPv4 or IPv6)
1256                  * - src and dst L4 port (TCP or UDP)
1257                  */
1258
1259                 switch (pattern->type) {
1260                 case RTE_FLOW_ITEM_TYPE_IPV4:
1261                         l3 = true;
1262
1263                         if (flow) {
1264                                 const struct rte_flow_item_ipv4 *spec;
1265
1266                                 spec = pattern->spec;
1267                                 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
1268                                 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
1269                                 flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
1270                         }
1271                         break;
1272
1273                 case RTE_FLOW_ITEM_TYPE_IPV6:
1274                         l3 = true;
1275
1276                         if (flow) {
1277                                 const struct rte_flow_item_ipv6 *spec;
1278
1279                                 spec = pattern->spec;
1280                                 rte_memcpy(flow->entry.tuple.src_ipv6,
1281                                            spec->hdr.src_addr,
1282                                            IPV6_ADDR_LEN);
1283                                 rte_memcpy(flow->entry.tuple.dst_ipv6,
1284                                            spec->hdr.dst_addr,
1285                                            IPV6_ADDR_LEN);
1286                                 flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
1287                         }
1288                         break;
1289
1290                 case RTE_FLOW_ITEM_TYPE_UDP:
1291                         l4 = true;
1292
1293                         if (flow) {
1294                                 const struct rte_flow_item_udp *spec;
1295
1296                                 spec = pattern->spec;
1297                                 flow->entry.tuple.src_port =
1298                                                 spec->hdr.src_port;
1299                                 flow->entry.tuple.dst_port =
1300                                                 spec->hdr.dst_port;
1301                                 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1302                         }
1303                         break;
1304
1305                 case RTE_FLOW_ITEM_TYPE_TCP:
1306                         l4 = true;
1307
1308                         if (flow) {
1309                                 const struct rte_flow_item_tcp *spec;
1310
1311                                 spec = pattern->spec;
1312                                 flow->entry.tuple.src_port =
1313                                                 spec->hdr.src_port;
1314                                 flow->entry.tuple.dst_port =
1315                                                 spec->hdr.dst_port;
1316                                 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1317                         }
1318
1319                         break;
1320                 default:
1321                         rte_flow_error_set(error, EINVAL,
1322                                            RTE_FLOW_ERROR_TYPE_ITEM,
1323                                            pattern,
1324                                            "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1325                         return -rte_errno;
1326                 }
1327         }
1328
1329         if (!(l3 && l4)) {
1330                 rte_flow_error_set(error, EINVAL,
1331                                    RTE_FLOW_ERROR_TYPE_ITEM,
1332                                    pattern,
1333                                    "Item types need to have both L3 and L4 protocols");
1334                 return -rte_errno;
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int
1341 qede_flow_parse_actions(struct rte_eth_dev *dev,
1342                         const struct rte_flow_action actions[],
1343                         struct rte_flow_error *error,
1344                         struct rte_flow *flow)
1345 {
1346         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
1347         const struct rte_flow_action_queue *queue;
1348
1349         if (actions == NULL) {
1350                 rte_flow_error_set(error, EINVAL,
1351                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1352                                    "NULL actions");
1353                 return -rte_errno;
1354         }
1355
1356         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1357                 switch (actions->type) {
1358                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1359                         queue = actions->conf;
1360
1361                         if (queue->index >= QEDE_RSS_COUNT(qdev)) {
1362                                 rte_flow_error_set(error, EINVAL,
1363                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1364                                                    actions,
1365                                                    "Bad QUEUE action");
1366                                 return -rte_errno;
1367                         }
1368
1369                         if (flow)
1370                                 flow->entry.rx_queue = queue->index;
1371
1372                         break;
1373
1374                 default:
1375                         rte_flow_error_set(error, ENOTSUP,
1376                                            RTE_FLOW_ERROR_TYPE_ACTION,
1377                                            actions,
1378                                            "Action is not supported - only ACTION_TYPE_QUEUE supported");
1379                         return -rte_errno;
1380                 }
1381         }
1382
1383         return 0;
1384 }
1385
1386 static int
1387 qede_flow_parse(struct rte_eth_dev *dev,
1388                 const struct rte_flow_attr *attr,
1389                 const struct rte_flow_item patterns[],
1390                 const struct rte_flow_action actions[],
1391                 struct rte_flow_error *error,
1392                 struct rte_flow *flow)
1393
1394 {
1395         int rc = 0;
1396
1397         rc = qede_flow_validate_attr(dev, attr, error);
1398         if (rc)
1399                 return rc;
1400
1401         /* parse and validate item pattern and actions.
1402          * Given item list and actions will be translate to qede PMD
1403          * specific arfs structure.
1404          */
1405         rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1406         if (rc)
1407                 return rc;
1408
1409         rc = qede_flow_parse_actions(dev, actions, error, flow);
1410
1411         return rc;
1412 }
1413
1414 static int
1415 qede_flow_validate(struct rte_eth_dev *dev,
1416                    const struct rte_flow_attr *attr,
1417                    const struct rte_flow_item patterns[],
1418                    const struct rte_flow_action actions[],
1419                    struct rte_flow_error *error)
1420 {
1421         return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1422 }
1423
1424 static struct rte_flow *
1425 qede_flow_create(struct rte_eth_dev *dev,
1426                  const struct rte_flow_attr *attr,
1427                  const struct rte_flow_item pattern[],
1428                  const struct rte_flow_action actions[],
1429                  struct rte_flow_error *error)
1430 {
1431         struct rte_flow *flow = NULL;
1432         int rc;
1433
1434         flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1435         if (flow == NULL) {
1436                 rte_flow_error_set(error, ENOMEM,
1437                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1438                                    "Failed to allocate memory");
1439                 return NULL;
1440         }
1441
1442         rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1443         if (rc < 0) {
1444                 rte_free(flow);
1445                 return NULL;
1446         }
1447
1448         rc = qede_config_arfs_filter(dev, &flow->entry, true);
1449         if (rc < 0) {
1450                 rte_flow_error_set(error, rc,
1451                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1452                                    "Failed to configure flow filter");
1453                 rte_free(flow);
1454                 return NULL;
1455         }
1456
1457         return flow;
1458 }
1459
1460 static int
1461 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1462                   struct rte_flow *flow,
1463                   struct rte_flow_error *error)
1464 {
1465         int rc = 0;
1466
1467         rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1468         if (rc < 0) {
1469                 rte_flow_error_set(error, rc,
1470                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1471                                    "Failed to delete flow filter");
1472                 rte_free(flow);
1473         }
1474
1475         return rc;
1476 }
1477
1478 const struct rte_flow_ops qede_flow_ops = {
1479         .validate = qede_flow_validate,
1480         .create = qede_flow_create,
1481         .destroy = qede_flow_destroy,
1482 };
1483
1484 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1485                          enum rte_filter_type filter_type,
1486                          enum rte_filter_op filter_op,
1487                          void *arg)
1488 {
1489         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1490         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1491         struct rte_eth_tunnel_filter_conf *filter_conf =
1492                         (struct rte_eth_tunnel_filter_conf *)arg;
1493
1494         switch (filter_type) {
1495         case RTE_ETH_FILTER_TUNNEL:
1496                 switch (filter_conf->tunnel_type) {
1497                 case RTE_TUNNEL_TYPE_VXLAN:
1498                 case RTE_TUNNEL_TYPE_GENEVE:
1499                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1500                         DP_INFO(edev,
1501                                 "Packet steering to the specified Rx queue"
1502                                 " is not supported with UDP tunneling");
1503                         return(qede_tunn_filter_config(eth_dev, filter_op,
1504                                                       filter_conf));
1505                 case RTE_TUNNEL_TYPE_TEREDO:
1506                 case RTE_TUNNEL_TYPE_NVGRE:
1507                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1508                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1509                                 filter_conf->tunnel_type);
1510                         return -EINVAL;
1511                 case RTE_TUNNEL_TYPE_NONE:
1512                 default:
1513                         return 0;
1514                 }
1515                 break;
1516         case RTE_ETH_FILTER_FDIR:
1517                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1518         case RTE_ETH_FILTER_NTUPLE:
1519                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1520         case RTE_ETH_FILTER_GENERIC:
1521                 if (ECORE_IS_CMT(edev)) {
1522                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1523                         return -ENOTSUP;
1524                 }
1525
1526                 if (filter_op != RTE_ETH_FILTER_GET)
1527                         return -EINVAL;
1528
1529                 *(const void **)arg = &qede_flow_ops;
1530                 return 0;
1531         case RTE_ETH_FILTER_MACVLAN:
1532         case RTE_ETH_FILTER_ETHERTYPE:
1533         case RTE_ETH_FILTER_FLEXIBLE:
1534         case RTE_ETH_FILTER_SYN:
1535         case RTE_ETH_FILTER_HASH:
1536         case RTE_ETH_FILTER_L2_TUNNEL:
1537         case RTE_ETH_FILTER_MAX:
1538         default:
1539                 DP_ERR(edev, "Unsupported filter type %d\n",
1540                         filter_type);
1541                 return -EINVAL;
1542         }
1543
1544         return 0;
1545 }