ethdev: remove legacy N-tuple filter type support
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12
13 #include "qede_ethdev.h"
14
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17         uint16_t rte_filter_type;
18         enum ecore_filter_ucast_type qede_type;
19         enum ecore_tunn_clss qede_tunn_clss;
20         const char *string;
21 } qede_tunn_types[] = {
22         {
23                 ETH_TUNNEL_FILTER_OMAC,
24                 ECORE_FILTER_MAC,
25                 ECORE_TUNN_CLSS_MAC_VLAN,
26                 "outer-mac"
27         },
28         {
29                 ETH_TUNNEL_FILTER_TENID,
30                 ECORE_FILTER_VNI,
31                 ECORE_TUNN_CLSS_MAC_VNI,
32                 "vni"
33         },
34         {
35                 ETH_TUNNEL_FILTER_IMAC,
36                 ECORE_FILTER_INNER_MAC,
37                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38                 "inner-mac"
39         },
40         {
41                 ETH_TUNNEL_FILTER_IVLAN,
42                 ECORE_FILTER_INNER_VLAN,
43                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44                 "inner-vlan"
45         },
46         {
47                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48                 ECORE_FILTER_MAC_VNI_PAIR,
49                 ECORE_TUNN_CLSS_MAC_VNI,
50                 "outer-mac and vni"
51         },
52         {
53                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
54                 ECORE_FILTER_UNUSED,
55                 MAX_ECORE_TUNN_CLSS,
56                 "outer-mac and inner-mac"
57         },
58         {
59                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
60                 ECORE_FILTER_UNUSED,
61                 MAX_ECORE_TUNN_CLSS,
62                 "outer-mac and inner-vlan"
63         },
64         {
65                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
68                 "vni and inner-mac",
69         },
70         {
71                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
72                 ECORE_FILTER_UNUSED,
73                 MAX_ECORE_TUNN_CLSS,
74                 "vni and inner-vlan",
75         },
76         {
77                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78                 ECORE_FILTER_INNER_PAIR,
79                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80                 "inner-mac and inner-vlan",
81         },
82         {
83                 ETH_TUNNEL_FILTER_OIP,
84                 ECORE_FILTER_UNUSED,
85                 MAX_ECORE_TUNN_CLSS,
86                 "outer-IP"
87         },
88         {
89                 ETH_TUNNEL_FILTER_IIP,
90                 ECORE_FILTER_UNUSED,
91                 MAX_ECORE_TUNN_CLSS,
92                 "inner-IP"
93         },
94         {
95                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
96                 ECORE_FILTER_UNUSED,
97                 MAX_ECORE_TUNN_CLSS,
98                 "IMAC_IVLAN"
99         },
100         {
101                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102                 ECORE_FILTER_UNUSED,
103                 MAX_ECORE_TUNN_CLSS,
104                 "IMAC_IVLAN_TENID"
105         },
106         {
107                 RTE_TUNNEL_FILTER_IMAC_TENID,
108                 ECORE_FILTER_UNUSED,
109                 MAX_ECORE_TUNN_CLSS,
110                 "IMAC_TENID"
111         },
112         {
113                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
114                 ECORE_FILTER_UNUSED,
115                 MAX_ECORE_TUNN_CLSS,
116                 "OMAC_TENID_IMAC"
117         },
118 };
119
120 #define IP_VERSION                              (0x40)
121 #define IP_HDRLEN                               (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
132
133 static inline bool qede_valid_flow(uint16_t flow_type)
134 {
135         return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138                  (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
139 }
140
141 static uint16_t
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143                         struct qede_arfs_entry *arfs,
144                         void *buff,
145                         struct ecore_arfs_config_params *params);
146
147 /* Note: Flowdir support is only partial.
148  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149  * Parameters like pballoc/status fields are irrelevant here.
150  */
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
152 {
153         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
156
157         /* check FDIR modes */
158         switch (fdir->mode) {
159         case RTE_FDIR_MODE_NONE:
160                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161                 DP_INFO(edev, "flowdir is disabled\n");
162         break;
163         case RTE_FDIR_MODE_PERFECT:
164                 if (ECORE_IS_CMT(edev)) {
165                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166                         qdev->arfs_info.arfs.mode =
167                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
168                         return -ENOTSUP;
169                 }
170                 qdev->arfs_info.arfs.mode =
171                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
173         break;
174         case RTE_FDIR_MODE_PERFECT_TUNNEL:
175         case RTE_FDIR_MODE_SIGNATURE:
176         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
178                 return -ENOTSUP;
179         }
180
181         return 0;
182 }
183
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
185 {
186         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187         struct qede_arfs_entry *tmp = NULL;
188
189         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
190                 if (tmp) {
191                         if (tmp->mz)
192                                 rte_memzone_free(tmp->mz);
193                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194                                      qede_arfs_entry, list);
195                         rte_free(tmp);
196                 }
197         }
198 }
199
200 static int
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202                          struct rte_eth_fdir_filter *fdir,
203                          struct qede_arfs_entry *arfs)
204 {
205         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207         struct rte_eth_fdir_input *input;
208
209         static const uint8_t next_proto[] = {
210                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
214         };
215
216         input = &fdir->input;
217
218         DP_INFO(edev, "flow_type %d\n", input->flow_type);
219
220         switch (input->flow_type) {
221         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223                 /* fill the common ip header */
224                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
225                 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226                 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227                 arfs->tuple.ip_proto = next_proto[input->flow_type];
228
229                 /* UDP */
230                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231                         arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232                         arfs->tuple.src_port = input->flow.udp4_flow.src_port;
233                 } else { /* TCP */
234                         arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235                         arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
236                 }
237                 break;
238         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240                 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
241                 arfs->tuple.ip_proto = next_proto[input->flow_type];
242                 rte_memcpy(arfs->tuple.dst_ipv6,
243                            &input->flow.ipv6_flow.dst_ip,
244                            IPV6_ADDR_LEN);
245                 rte_memcpy(arfs->tuple.src_ipv6,
246                            &input->flow.ipv6_flow.src_ip,
247                            IPV6_ADDR_LEN);
248
249                 /* UDP */
250                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251                         arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252                         arfs->tuple.src_port = input->flow.udp6_flow.src_port;
253                 } else { /* TCP */
254                         arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255                         arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
256                 }
257                 break;
258         default:
259                 DP_ERR(edev, "Unsupported flow_type %u\n",
260                        input->flow_type);
261                 return -ENOTSUP;
262         }
263
264         arfs->rx_queue = fdir->action.rx_queue;
265         return 0;
266 }
267
268 static int
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270                         struct qede_arfs_entry *arfs,
271                         bool add)
272 {
273         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275         struct ecore_ntuple_filter_params params;
276         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
277         struct qede_arfs_entry *tmp = NULL;
278         const struct rte_memzone *mz;
279         struct ecore_hwfn *p_hwfn;
280         enum _ecore_status_t rc;
281         uint16_t pkt_len;
282         void *pkt;
283
284         if (add) {
285                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
286                         DP_ERR(edev, "Reached max flowdir filter limit\n");
287                         return -EINVAL;
288                 }
289         }
290
291         /* soft_id could have been used as memzone string, but soft_id is
292          * not currently used so it has no significance.
293          */
294         snprintf(mz_name, sizeof(mz_name), "%lx",
295                  (unsigned long)rte_get_timer_cycles());
296         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
297                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
298         if (!mz) {
299                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
300                        rte_strerror(rte_errno));
301                 return -rte_errno;
302         }
303
304         pkt = mz->addr;
305         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
306         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
307                                           &qdev->arfs_info.arfs);
308         if (pkt_len == 0) {
309                 rc = -EINVAL;
310                 goto err1;
311         }
312
313         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
314         if (add) {
315                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
316                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
317                                 DP_INFO(edev, "flowdir filter exist\n");
318                                 rc = -EEXIST;
319                                 goto err1;
320                         }
321                 }
322         } else {
323                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
324                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
325                                 break;
326                 }
327                 if (!tmp) {
328                         DP_ERR(edev, "flowdir filter does not exist\n");
329                         rc = -EEXIST;
330                         goto err1;
331                 }
332         }
333         p_hwfn = ECORE_LEADING_HWFN(edev);
334         if (add) {
335                 if (qdev->arfs_info.arfs.mode ==
336                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
337                         /* Force update */
338                         eth_dev->data->dev_conf.fdir_conf.mode =
339                                                 RTE_FDIR_MODE_PERFECT;
340                         qdev->arfs_info.arfs.mode =
341                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
342                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
343                 }
344                 /* Enable ARFS searcher with updated flow_types */
345                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
346                                           &qdev->arfs_info.arfs);
347         }
348
349         memset(&params, 0, sizeof(params));
350         params.addr = (dma_addr_t)mz->iova;
351         params.length = pkt_len;
352         params.qid = arfs->rx_queue;
353         params.vport_id = 0;
354         params.b_is_add = add;
355         params.b_is_drop = arfs->is_drop;
356
357         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
358         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
359                                                &params);
360         if (rc == ECORE_SUCCESS) {
361                 if (add) {
362                         arfs->pkt_len = pkt_len;
363                         arfs->mz = mz;
364                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
365                                           arfs, list);
366                         qdev->arfs_info.filter_count++;
367                         DP_INFO(edev, "flowdir filter added, count = %d\n",
368                                 qdev->arfs_info.filter_count);
369                 } else {
370                         rte_memzone_free(tmp->mz);
371                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
372                                      qede_arfs_entry, list);
373                         rte_free(tmp); /* the node deleted */
374                         rte_memzone_free(mz); /* temp node allocated */
375                         qdev->arfs_info.filter_count--;
376                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
377                                 qdev->arfs_info.filter_count);
378                 }
379         } else {
380                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
381                        rc, qdev->arfs_info.filter_count);
382         }
383
384         /* Disable ARFS searcher if there are no more filters */
385         if (qdev->arfs_info.filter_count == 0) {
386                 memset(&qdev->arfs_info.arfs, 0,
387                        sizeof(struct ecore_arfs_config_params));
388                 DP_INFO(edev, "Disabling flowdir\n");
389                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
390                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
391                                           &qdev->arfs_info.arfs);
392         }
393         return 0;
394
395 err1:
396         rte_memzone_free(mz);
397         return rc;
398 }
399
400 static int
401 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
402                             struct rte_eth_fdir_filter *fdir_filter,
403                             bool add)
404 {
405         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
406         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
407         struct qede_arfs_entry *arfs = NULL;
408         int rc = 0;
409
410         arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
411                                   RTE_CACHE_LINE_SIZE);
412         if (!arfs) {
413                 DP_ERR(edev, "Did not allocate memory for arfs\n");
414                 return -ENOMEM;
415         }
416
417         rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
418         if (rc < 0)
419                 return rc;
420
421         rc = qede_config_arfs_filter(eth_dev, arfs, add);
422         if (rc < 0)
423                 rte_free(arfs);
424
425         return rc;
426 }
427
428 static int
429 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
430                      struct rte_eth_fdir_filter *fdir,
431                      bool add)
432 {
433         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
435
436         if (!qede_valid_flow(fdir->input.flow_type)) {
437                 DP_ERR(edev, "invalid flow_type input\n");
438                 return -EINVAL;
439         }
440
441         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
442                 DP_ERR(edev, "invalid queue number %u\n",
443                        fdir->action.rx_queue);
444                 return -EINVAL;
445         }
446
447         if (fdir->input.flow_ext.is_vf) {
448                 DP_ERR(edev, "flowdir is not supported over VF\n");
449                 return -EINVAL;
450         }
451
452         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
453 }
454
455 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
456 static uint16_t
457 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
458                         struct qede_arfs_entry *arfs,
459                         void *buff,
460                         struct ecore_arfs_config_params *params)
461
462 {
463         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465         uint16_t *ether_type;
466         uint8_t *raw_pkt;
467         struct rte_ipv4_hdr *ip;
468         struct rte_ipv6_hdr *ip6;
469         struct rte_udp_hdr *udp;
470         struct rte_tcp_hdr *tcp;
471         uint16_t len;
472
473         raw_pkt = (uint8_t *)buff;
474
475         len =  2 * sizeof(struct rte_ether_addr);
476         raw_pkt += 2 * sizeof(struct rte_ether_addr);
477         ether_type = (uint16_t *)raw_pkt;
478         raw_pkt += sizeof(uint16_t);
479         len += sizeof(uint16_t);
480
481         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
482         switch (arfs->tuple.eth_proto) {
483         case RTE_ETHER_TYPE_IPV4:
484                 ip = (struct rte_ipv4_hdr *)raw_pkt;
485                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
486                 ip->total_length = sizeof(struct rte_ipv4_hdr);
487                 ip->next_proto_id = arfs->tuple.ip_proto;
488                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
489                 ip->dst_addr = arfs->tuple.dst_ipv4;
490                 ip->src_addr = arfs->tuple.src_ipv4;
491                 len += sizeof(struct rte_ipv4_hdr);
492                 params->ipv4 = true;
493
494                 raw_pkt = (uint8_t *)buff;
495                 /* UDP */
496                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
497                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
498                         udp->dst_port = arfs->tuple.dst_port;
499                         udp->src_port = arfs->tuple.src_port;
500                         udp->dgram_len = sizeof(struct rte_udp_hdr);
501                         len += sizeof(struct rte_udp_hdr);
502                         /* adjust ip total_length */
503                         ip->total_length += sizeof(struct rte_udp_hdr);
504                         params->udp = true;
505                 } else { /* TCP */
506                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
507                         tcp->src_port = arfs->tuple.src_port;
508                         tcp->dst_port = arfs->tuple.dst_port;
509                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
510                         len += sizeof(struct rte_tcp_hdr);
511                         /* adjust ip total_length */
512                         ip->total_length += sizeof(struct rte_tcp_hdr);
513                         params->tcp = true;
514                 }
515                 break;
516         case RTE_ETHER_TYPE_IPV6:
517                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
518                 ip6->proto = arfs->tuple.ip_proto;
519                 ip6->vtc_flow =
520                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
521
522                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
523                            IPV6_ADDR_LEN);
524                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
525                            IPV6_ADDR_LEN);
526                 len += sizeof(struct rte_ipv6_hdr);
527                 params->ipv6 = true;
528
529                 raw_pkt = (uint8_t *)buff;
530                 /* UDP */
531                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
532                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
533                         udp->src_port = arfs->tuple.src_port;
534                         udp->dst_port = arfs->tuple.dst_port;
535                         len += sizeof(struct rte_udp_hdr);
536                         params->udp = true;
537                 } else { /* TCP */
538                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
539                         tcp->src_port = arfs->tuple.src_port;
540                         tcp->dst_port = arfs->tuple.dst_port;
541                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
542                         len += sizeof(struct rte_tcp_hdr);
543                         params->tcp = true;
544                 }
545                 break;
546         default:
547                 DP_ERR(edev, "Unsupported eth_proto %u\n",
548                        arfs->tuple.eth_proto);
549                 return 0;
550         }
551
552         return len;
553 }
554
555 static int
556 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
557                       enum rte_filter_op filter_op,
558                       void *arg)
559 {
560         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562         struct rte_eth_fdir_filter *fdir;
563         int ret;
564
565         fdir = (struct rte_eth_fdir_filter *)arg;
566         switch (filter_op) {
567         case RTE_ETH_FILTER_NOP:
568                 /* Typically used to query flowdir support */
569                 if (ECORE_IS_CMT(edev)) {
570                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
571                         return -ENOTSUP;
572                 }
573                 return 0; /* means supported */
574         case RTE_ETH_FILTER_ADD:
575                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
576         break;
577         case RTE_ETH_FILTER_DELETE:
578                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
579         break;
580         case RTE_ETH_FILTER_FLUSH:
581         case RTE_ETH_FILTER_UPDATE:
582         case RTE_ETH_FILTER_INFO:
583                 return -ENOTSUP;
584         break;
585         default:
586                 DP_ERR(edev, "unknown operation %u", filter_op);
587                 ret = -EINVAL;
588         }
589
590         return ret;
591 }
592
593 static int
594 qede_tunnel_update(struct qede_dev *qdev,
595                    struct ecore_tunnel_info *tunn_info)
596 {
597         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
598         enum _ecore_status_t rc = ECORE_INVAL;
599         struct ecore_hwfn *p_hwfn;
600         struct ecore_ptt *p_ptt;
601         int i;
602
603         for_each_hwfn(edev, i) {
604                 p_hwfn = &edev->hwfns[i];
605                 if (IS_PF(edev)) {
606                         p_ptt = ecore_ptt_acquire(p_hwfn);
607                         if (!p_ptt) {
608                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
609                                 return -EAGAIN;
610                         }
611                 } else {
612                         p_ptt = NULL;
613                 }
614
615                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
616                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
617                 if (IS_PF(edev))
618                         ecore_ptt_release(p_hwfn, p_ptt);
619
620                 if (rc != ECORE_SUCCESS)
621                         break;
622         }
623
624         return rc;
625 }
626
627 static int
628 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
629                   bool enable)
630 {
631         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
632         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
633         enum _ecore_status_t rc = ECORE_INVAL;
634         struct ecore_tunnel_info tunn;
635
636         if (qdev->vxlan.enable == enable)
637                 return ECORE_SUCCESS;
638
639         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
640         tunn.vxlan.b_update_mode = true;
641         tunn.vxlan.b_mode_enabled = enable;
642         tunn.b_update_rx_cls = true;
643         tunn.b_update_tx_cls = true;
644         tunn.vxlan.tun_cls = clss;
645
646         tunn.vxlan_port.b_update_port = true;
647         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
648
649         rc = qede_tunnel_update(qdev, &tunn);
650         if (rc == ECORE_SUCCESS) {
651                 qdev->vxlan.enable = enable;
652                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
653                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
654                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
655         } else {
656                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
657                        tunn.vxlan.tun_cls);
658         }
659
660         return rc;
661 }
662
663 static int
664 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
665                   bool enable)
666 {
667         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
668         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
669         enum _ecore_status_t rc = ECORE_INVAL;
670         struct ecore_tunnel_info tunn;
671
672         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
673         tunn.l2_geneve.b_update_mode = true;
674         tunn.l2_geneve.b_mode_enabled = enable;
675         tunn.ip_geneve.b_update_mode = true;
676         tunn.ip_geneve.b_mode_enabled = enable;
677         tunn.l2_geneve.tun_cls = clss;
678         tunn.ip_geneve.tun_cls = clss;
679         tunn.b_update_rx_cls = true;
680         tunn.b_update_tx_cls = true;
681
682         tunn.geneve_port.b_update_port = true;
683         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
684
685         rc = qede_tunnel_update(qdev, &tunn);
686         if (rc == ECORE_SUCCESS) {
687                 qdev->geneve.enable = enable;
688                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
689                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
690                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
691         } else {
692                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
693                        clss);
694         }
695
696         return rc;
697 }
698
699 static int
700 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
701                   bool enable)
702 {
703         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
704         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
705         enum _ecore_status_t rc = ECORE_INVAL;
706         struct ecore_tunnel_info tunn;
707
708         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
709         tunn.ip_gre.b_update_mode = true;
710         tunn.ip_gre.b_mode_enabled = enable;
711         tunn.ip_gre.tun_cls = clss;
712         tunn.ip_gre.tun_cls = clss;
713         tunn.b_update_rx_cls = true;
714         tunn.b_update_tx_cls = true;
715
716         rc = qede_tunnel_update(qdev, &tunn);
717         if (rc == ECORE_SUCCESS) {
718                 qdev->ipgre.enable = enable;
719                 DP_INFO(edev, "IPGRE is %s\n",
720                         enable ? "enabled" : "disabled");
721         } else {
722                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
723                        clss);
724         }
725
726         return rc;
727 }
728
729 int
730 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
731                       struct rte_eth_udp_tunnel *tunnel_udp)
732 {
733         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
734         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
735         struct ecore_tunnel_info tunn; /* @DPDK */
736         uint16_t udp_port;
737         int rc;
738
739         PMD_INIT_FUNC_TRACE(edev);
740
741         memset(&tunn, 0, sizeof(tunn));
742
743         switch (tunnel_udp->prot_type) {
744         case RTE_TUNNEL_TYPE_VXLAN:
745                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
746                         DP_ERR(edev, "UDP port %u doesn't exist\n",
747                                 tunnel_udp->udp_port);
748                         return ECORE_INVAL;
749                 }
750                 udp_port = 0;
751
752                 tunn.vxlan_port.b_update_port = true;
753                 tunn.vxlan_port.port = udp_port;
754
755                 rc = qede_tunnel_update(qdev, &tunn);
756                 if (rc != ECORE_SUCCESS) {
757                         DP_ERR(edev, "Unable to config UDP port %u\n",
758                                tunn.vxlan_port.port);
759                         return rc;
760                 }
761
762                 qdev->vxlan.udp_port = udp_port;
763                 /* If the request is to delete UDP port and if the number of
764                  * VXLAN filters have reached 0 then VxLAN offload can be be
765                  * disabled.
766                  */
767                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
768                         return qede_vxlan_enable(eth_dev,
769                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
770
771                 break;
772         case RTE_TUNNEL_TYPE_GENEVE:
773                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
774                         DP_ERR(edev, "UDP port %u doesn't exist\n",
775                                 tunnel_udp->udp_port);
776                         return ECORE_INVAL;
777                 }
778
779                 udp_port = 0;
780
781                 tunn.geneve_port.b_update_port = true;
782                 tunn.geneve_port.port = udp_port;
783
784                 rc = qede_tunnel_update(qdev, &tunn);
785                 if (rc != ECORE_SUCCESS) {
786                         DP_ERR(edev, "Unable to config UDP port %u\n",
787                                tunn.vxlan_port.port);
788                         return rc;
789                 }
790
791                 qdev->vxlan.udp_port = udp_port;
792                 /* If the request is to delete UDP port and if the number of
793                  * GENEVE filters have reached 0 then GENEVE offload can be be
794                  * disabled.
795                  */
796                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
797                         return qede_geneve_enable(eth_dev,
798                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
799
800                 break;
801
802         default:
803                 return ECORE_INVAL;
804         }
805
806         return 0;
807 }
808
809 int
810 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
811                       struct rte_eth_udp_tunnel *tunnel_udp)
812 {
813         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
814         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
815         struct ecore_tunnel_info tunn; /* @DPDK */
816         uint16_t udp_port;
817         int rc;
818
819         PMD_INIT_FUNC_TRACE(edev);
820
821         memset(&tunn, 0, sizeof(tunn));
822
823         switch (tunnel_udp->prot_type) {
824         case RTE_TUNNEL_TYPE_VXLAN:
825                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
826                         DP_INFO(edev,
827                                 "UDP port %u for VXLAN was already configured\n",
828                                 tunnel_udp->udp_port);
829                         return ECORE_SUCCESS;
830                 }
831
832                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
833                  * it was not enabled while adding VXLAN filter before UDP port
834                  * update.
835                  */
836                 if (!qdev->vxlan.enable) {
837                         rc = qede_vxlan_enable(eth_dev,
838                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
839                         if (rc != ECORE_SUCCESS) {
840                                 DP_ERR(edev, "Failed to enable VXLAN "
841                                         "prior to updating UDP port\n");
842                                 return rc;
843                         }
844                 }
845                 udp_port = tunnel_udp->udp_port;
846
847                 tunn.vxlan_port.b_update_port = true;
848                 tunn.vxlan_port.port = udp_port;
849
850                 rc = qede_tunnel_update(qdev, &tunn);
851                 if (rc != ECORE_SUCCESS) {
852                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
853                                udp_port);
854                         return rc;
855                 }
856
857                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
858
859                 qdev->vxlan.udp_port = udp_port;
860                 break;
861         case RTE_TUNNEL_TYPE_GENEVE:
862                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
863                         DP_INFO(edev,
864                                 "UDP port %u for GENEVE was already configured\n",
865                                 tunnel_udp->udp_port);
866                         return ECORE_SUCCESS;
867                 }
868
869                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
870                  * it was not enabled while adding GENEVE filter before UDP port
871                  * update.
872                  */
873                 if (!qdev->geneve.enable) {
874                         rc = qede_geneve_enable(eth_dev,
875                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
876                         if (rc != ECORE_SUCCESS) {
877                                 DP_ERR(edev, "Failed to enable GENEVE "
878                                         "prior to updating UDP port\n");
879                                 return rc;
880                         }
881                 }
882                 udp_port = tunnel_udp->udp_port;
883
884                 tunn.geneve_port.b_update_port = true;
885                 tunn.geneve_port.port = udp_port;
886
887                 rc = qede_tunnel_update(qdev, &tunn);
888                 if (rc != ECORE_SUCCESS) {
889                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
890                                udp_port);
891                         return rc;
892                 }
893
894                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
895
896                 qdev->geneve.udp_port = udp_port;
897                 break;
898         default:
899                 return ECORE_INVAL;
900         }
901
902         return 0;
903 }
904
905 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
906                                        uint32_t *clss, char *str)
907 {
908         uint16_t j;
909         *clss = MAX_ECORE_TUNN_CLSS;
910
911         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
912                 if (filter == qede_tunn_types[j].rte_filter_type) {
913                         *type = qede_tunn_types[j].qede_type;
914                         *clss = qede_tunn_types[j].qede_tunn_clss;
915                         strcpy(str, qede_tunn_types[j].string);
916                         return;
917                 }
918         }
919 }
920
921 static int
922 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
923                               const struct rte_eth_tunnel_filter_conf *conf,
924                               uint32_t type)
925 {
926         /* Init commmon ucast params first */
927         qede_set_ucast_cmn_params(ucast);
928
929         /* Copy out the required fields based on classification type */
930         ucast->type = type;
931
932         switch (type) {
933         case ECORE_FILTER_VNI:
934                 ucast->vni = conf->tenant_id;
935         break;
936         case ECORE_FILTER_INNER_VLAN:
937                 ucast->vlan = conf->inner_vlan;
938         break;
939         case ECORE_FILTER_MAC:
940                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
941                        RTE_ETHER_ADDR_LEN);
942         break;
943         case ECORE_FILTER_INNER_MAC:
944                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
945                        RTE_ETHER_ADDR_LEN);
946         break;
947         case ECORE_FILTER_MAC_VNI_PAIR:
948                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
949                         RTE_ETHER_ADDR_LEN);
950                 ucast->vni = conf->tenant_id;
951         break;
952         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
953                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
954                         RTE_ETHER_ADDR_LEN);
955                 ucast->vni = conf->tenant_id;
956         break;
957         case ECORE_FILTER_INNER_PAIR:
958                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
959                         RTE_ETHER_ADDR_LEN);
960                 ucast->vlan = conf->inner_vlan;
961         break;
962         default:
963                 return -EINVAL;
964         }
965
966         return ECORE_SUCCESS;
967 }
968
969 static int
970 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
971                          const struct rte_eth_tunnel_filter_conf *conf,
972                          __rte_unused enum rte_filter_op filter_op,
973                          enum ecore_tunn_clss *clss,
974                          bool add)
975 {
976         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
977         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
978         struct ecore_filter_ucast ucast = {0};
979         enum ecore_filter_ucast_type type;
980         uint16_t filter_type = 0;
981         char str[80];
982         int rc;
983
984         filter_type = conf->filter_type;
985         /* Determine if the given filter classification is supported */
986         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
987         if (*clss == MAX_ECORE_TUNN_CLSS) {
988                 DP_ERR(edev, "Unsupported filter type\n");
989                 return -EINVAL;
990         }
991         /* Init tunnel ucast params */
992         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
993         if (rc != ECORE_SUCCESS) {
994                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
995                 conf->filter_type);
996                 return rc;
997         }
998         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
999                 str, filter_op, ucast.type);
1000
1001         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1002
1003         /* Skip MAC/VLAN if filter is based on VNI */
1004         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1005                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1006                 if (rc == 0 && add) {
1007                         /* Enable accept anyvlan */
1008                         qede_config_accept_any_vlan(qdev, true);
1009                 }
1010         } else {
1011                 rc = qede_ucast_filter(eth_dev, &ucast, add);
1012                 if (rc == 0)
1013                         rc = ecore_filter_ucast_cmd(edev, &ucast,
1014                                             ECORE_SPQ_MODE_CB, NULL);
1015         }
1016
1017         return rc;
1018 }
1019
1020 static int
1021 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1022                  enum rte_eth_tunnel_type tunn_type, bool enable)
1023 {
1024         int rc = -EINVAL;
1025
1026         switch (tunn_type) {
1027         case RTE_TUNNEL_TYPE_VXLAN:
1028                 rc = qede_vxlan_enable(eth_dev, clss, enable);
1029                 break;
1030         case RTE_TUNNEL_TYPE_GENEVE:
1031                 rc = qede_geneve_enable(eth_dev, clss, enable);
1032                 break;
1033         case RTE_TUNNEL_TYPE_IP_IN_GRE:
1034                 rc = qede_ipgre_enable(eth_dev, clss, enable);
1035                 break;
1036         default:
1037                 rc = -EINVAL;
1038                 break;
1039         }
1040
1041         return rc;
1042 }
1043
1044 static int
1045 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1046                         enum rte_filter_op filter_op,
1047                         const struct rte_eth_tunnel_filter_conf *conf)
1048 {
1049         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1050         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1051         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1052         bool add;
1053         int rc;
1054
1055         PMD_INIT_FUNC_TRACE(edev);
1056
1057         switch (filter_op) {
1058         case RTE_ETH_FILTER_ADD:
1059                 add = true;
1060                 break;
1061         case RTE_ETH_FILTER_DELETE:
1062                 add = false;
1063                 break;
1064         default:
1065                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1066                 return -EINVAL;
1067         }
1068
1069         if (IS_VF(edev))
1070                 return qede_tunn_enable(eth_dev,
1071                                         ECORE_TUNN_CLSS_MAC_VLAN,
1072                                         conf->tunnel_type, add);
1073
1074         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1075         if (rc != ECORE_SUCCESS)
1076                 return rc;
1077
1078         if (add) {
1079                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1080                         qdev->vxlan.num_filters++;
1081                         qdev->vxlan.filter_type = conf->filter_type;
1082                 } else { /* GENEVE */
1083                         qdev->geneve.num_filters++;
1084                         qdev->geneve.filter_type = conf->filter_type;
1085                 }
1086
1087                 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1088                     !qdev->ipgre.enable)
1089                         return qede_tunn_enable(eth_dev, clss,
1090                                                 conf->tunnel_type,
1091                                                 true);
1092         } else {
1093                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1094                         qdev->vxlan.num_filters--;
1095                 else /*GENEVE*/
1096                         qdev->geneve.num_filters--;
1097
1098                 /* Disable VXLAN if VXLAN filters become 0 */
1099                 if (qdev->vxlan.num_filters == 0 ||
1100                     qdev->geneve.num_filters == 0)
1101                         return qede_tunn_enable(eth_dev, clss,
1102                                                 conf->tunnel_type,
1103                                                 false);
1104         }
1105
1106         return 0;
1107 }
1108
1109 static int
1110 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
1111                         const struct rte_flow_attr *attr,
1112                         struct rte_flow_error *error)
1113 {
1114         if (attr == NULL) {
1115                 rte_flow_error_set(error, EINVAL,
1116                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1117                                    "NULL attribute");
1118                 return -rte_errno;
1119         }
1120
1121         if (attr->group != 0) {
1122                 rte_flow_error_set(error, ENOTSUP,
1123                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1124                                    "Groups are not supported");
1125                 return -rte_errno;
1126         }
1127
1128         if (attr->priority != 0) {
1129                 rte_flow_error_set(error, ENOTSUP,
1130                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1131                                    "Priorities are not supported");
1132                 return -rte_errno;
1133         }
1134
1135         if (attr->egress != 0) {
1136                 rte_flow_error_set(error, ENOTSUP,
1137                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1138                                    "Egress is not supported");
1139                 return -rte_errno;
1140         }
1141
1142         if (attr->transfer != 0) {
1143                 rte_flow_error_set(error, ENOTSUP,
1144                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1145                                    "Transfer is not supported");
1146                 return -rte_errno;
1147         }
1148
1149         if (attr->ingress == 0) {
1150                 rte_flow_error_set(error, ENOTSUP,
1151                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1152                                    "Only ingress is supported");
1153                 return -rte_errno;
1154         }
1155
1156         return 0;
1157 }
1158
1159 static int
1160 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
1161                         const struct rte_flow_item pattern[],
1162                         struct rte_flow_error *error,
1163                         struct rte_flow *flow)
1164 {
1165         bool l3 = false, l4 = false;
1166
1167         if (pattern == NULL) {
1168                 rte_flow_error_set(error, EINVAL,
1169                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1170                                    "NULL pattern");
1171                 return -rte_errno;
1172         }
1173
1174         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1175                 if (!pattern->spec) {
1176                         rte_flow_error_set(error, EINVAL,
1177                                            RTE_FLOW_ERROR_TYPE_ITEM,
1178                                            pattern,
1179                                            "Item spec not defined");
1180                         return -rte_errno;
1181                 }
1182
1183                 if (pattern->last) {
1184                         rte_flow_error_set(error, EINVAL,
1185                                            RTE_FLOW_ERROR_TYPE_ITEM,
1186                                            pattern,
1187                                            "Item last not supported");
1188                         return -rte_errno;
1189                 }
1190
1191                 if (pattern->mask) {
1192                         rte_flow_error_set(error, EINVAL,
1193                                            RTE_FLOW_ERROR_TYPE_ITEM,
1194                                            pattern,
1195                                            "Item mask not supported");
1196                         return -rte_errno;
1197                 }
1198
1199                 /* Below validation is only for 4 tuple flow
1200                  * (GFT_PROFILE_TYPE_4_TUPLE)
1201                  * - src and dst L3 address (IPv4 or IPv6)
1202                  * - src and dst L4 port (TCP or UDP)
1203                  */
1204
1205                 switch (pattern->type) {
1206                 case RTE_FLOW_ITEM_TYPE_IPV4:
1207                         l3 = true;
1208
1209                         if (flow) {
1210                                 const struct rte_flow_item_ipv4 *spec;
1211
1212                                 spec = pattern->spec;
1213                                 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
1214                                 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
1215                                 flow->entry.tuple.eth_proto =
1216                                         RTE_ETHER_TYPE_IPV4;
1217                         }
1218                         break;
1219
1220                 case RTE_FLOW_ITEM_TYPE_IPV6:
1221                         l3 = true;
1222
1223                         if (flow) {
1224                                 const struct rte_flow_item_ipv6 *spec;
1225
1226                                 spec = pattern->spec;
1227                                 rte_memcpy(flow->entry.tuple.src_ipv6,
1228                                            spec->hdr.src_addr,
1229                                            IPV6_ADDR_LEN);
1230                                 rte_memcpy(flow->entry.tuple.dst_ipv6,
1231                                            spec->hdr.dst_addr,
1232                                            IPV6_ADDR_LEN);
1233                                 flow->entry.tuple.eth_proto =
1234                                         RTE_ETHER_TYPE_IPV6;
1235                         }
1236                         break;
1237
1238                 case RTE_FLOW_ITEM_TYPE_UDP:
1239                         l4 = true;
1240
1241                         if (flow) {
1242                                 const struct rte_flow_item_udp *spec;
1243
1244                                 spec = pattern->spec;
1245                                 flow->entry.tuple.src_port =
1246                                                 spec->hdr.src_port;
1247                                 flow->entry.tuple.dst_port =
1248                                                 spec->hdr.dst_port;
1249                                 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1250                         }
1251                         break;
1252
1253                 case RTE_FLOW_ITEM_TYPE_TCP:
1254                         l4 = true;
1255
1256                         if (flow) {
1257                                 const struct rte_flow_item_tcp *spec;
1258
1259                                 spec = pattern->spec;
1260                                 flow->entry.tuple.src_port =
1261                                                 spec->hdr.src_port;
1262                                 flow->entry.tuple.dst_port =
1263                                                 spec->hdr.dst_port;
1264                                 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1265                         }
1266
1267                         break;
1268                 default:
1269                         rte_flow_error_set(error, EINVAL,
1270                                            RTE_FLOW_ERROR_TYPE_ITEM,
1271                                            pattern,
1272                                            "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1273                         return -rte_errno;
1274                 }
1275         }
1276
1277         if (!(l3 && l4)) {
1278                 rte_flow_error_set(error, EINVAL,
1279                                    RTE_FLOW_ERROR_TYPE_ITEM,
1280                                    pattern,
1281                                    "Item types need to have both L3 and L4 protocols");
1282                 return -rte_errno;
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int
1289 qede_flow_parse_actions(struct rte_eth_dev *dev,
1290                         const struct rte_flow_action actions[],
1291                         struct rte_flow_error *error,
1292                         struct rte_flow *flow)
1293 {
1294         const struct rte_flow_action_queue *queue;
1295
1296         if (actions == NULL) {
1297                 rte_flow_error_set(error, EINVAL,
1298                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1299                                    "NULL actions");
1300                 return -rte_errno;
1301         }
1302
1303         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1304                 switch (actions->type) {
1305                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1306                         queue = actions->conf;
1307
1308                         if (queue->index >= QEDE_RSS_COUNT(dev)) {
1309                                 rte_flow_error_set(error, EINVAL,
1310                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1311                                                    actions,
1312                                                    "Bad QUEUE action");
1313                                 return -rte_errno;
1314                         }
1315
1316                         if (flow)
1317                                 flow->entry.rx_queue = queue->index;
1318
1319                         break;
1320                 case RTE_FLOW_ACTION_TYPE_DROP:
1321                         if (flow)
1322                                 flow->entry.is_drop = true;
1323                         break;
1324                 default:
1325                         rte_flow_error_set(error, ENOTSUP,
1326                                            RTE_FLOW_ERROR_TYPE_ACTION,
1327                                            actions,
1328                                            "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
1329                         return -rte_errno;
1330                 }
1331         }
1332
1333         return 0;
1334 }
1335
1336 static int
1337 qede_flow_parse(struct rte_eth_dev *dev,
1338                 const struct rte_flow_attr *attr,
1339                 const struct rte_flow_item patterns[],
1340                 const struct rte_flow_action actions[],
1341                 struct rte_flow_error *error,
1342                 struct rte_flow *flow)
1343
1344 {
1345         int rc = 0;
1346
1347         rc = qede_flow_validate_attr(dev, attr, error);
1348         if (rc)
1349                 return rc;
1350
1351         /* parse and validate item pattern and actions.
1352          * Given item list and actions will be translate to qede PMD
1353          * specific arfs structure.
1354          */
1355         rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1356         if (rc)
1357                 return rc;
1358
1359         rc = qede_flow_parse_actions(dev, actions, error, flow);
1360
1361         return rc;
1362 }
1363
1364 static int
1365 qede_flow_validate(struct rte_eth_dev *dev,
1366                    const struct rte_flow_attr *attr,
1367                    const struct rte_flow_item patterns[],
1368                    const struct rte_flow_action actions[],
1369                    struct rte_flow_error *error)
1370 {
1371         return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1372 }
1373
1374 static struct rte_flow *
1375 qede_flow_create(struct rte_eth_dev *dev,
1376                  const struct rte_flow_attr *attr,
1377                  const struct rte_flow_item pattern[],
1378                  const struct rte_flow_action actions[],
1379                  struct rte_flow_error *error)
1380 {
1381         struct rte_flow *flow = NULL;
1382         int rc;
1383
1384         flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1385         if (flow == NULL) {
1386                 rte_flow_error_set(error, ENOMEM,
1387                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1388                                    "Failed to allocate memory");
1389                 return NULL;
1390         }
1391
1392         rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1393         if (rc < 0) {
1394                 rte_free(flow);
1395                 return NULL;
1396         }
1397
1398         rc = qede_config_arfs_filter(dev, &flow->entry, true);
1399         if (rc < 0) {
1400                 rte_flow_error_set(error, rc,
1401                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1402                                    "Failed to configure flow filter");
1403                 rte_free(flow);
1404                 return NULL;
1405         }
1406
1407         return flow;
1408 }
1409
1410 static int
1411 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1412                   struct rte_flow *flow,
1413                   struct rte_flow_error *error)
1414 {
1415         int rc = 0;
1416
1417         rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1418         if (rc < 0) {
1419                 rte_flow_error_set(error, rc,
1420                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1421                                    "Failed to delete flow filter");
1422                 rte_free(flow);
1423         }
1424
1425         return rc;
1426 }
1427
1428 static int
1429 qede_flow_flush(struct rte_eth_dev *eth_dev,
1430                 struct rte_flow_error *error)
1431 {
1432         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1433         struct qede_arfs_entry *tmp = NULL;
1434         int rc = 0;
1435
1436         while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1437                 tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1438
1439                 rc = qede_config_arfs_filter(eth_dev, tmp, false);
1440                 if (rc < 0)
1441                         rte_flow_error_set(error, rc,
1442                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1443                                            "Failed to flush flow filter");
1444         }
1445
1446         return rc;
1447 }
1448
1449 const struct rte_flow_ops qede_flow_ops = {
1450         .validate = qede_flow_validate,
1451         .create = qede_flow_create,
1452         .destroy = qede_flow_destroy,
1453         .flush = qede_flow_flush,
1454 };
1455
1456 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1457                          enum rte_filter_type filter_type,
1458                          enum rte_filter_op filter_op,
1459                          void *arg)
1460 {
1461         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1462         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1463         struct rte_eth_tunnel_filter_conf *filter_conf =
1464                         (struct rte_eth_tunnel_filter_conf *)arg;
1465
1466         switch (filter_type) {
1467         case RTE_ETH_FILTER_TUNNEL:
1468                 switch (filter_conf->tunnel_type) {
1469                 case RTE_TUNNEL_TYPE_VXLAN:
1470                 case RTE_TUNNEL_TYPE_GENEVE:
1471                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1472                         DP_INFO(edev,
1473                                 "Packet steering to the specified Rx queue"
1474                                 " is not supported with UDP tunneling");
1475                         return(qede_tunn_filter_config(eth_dev, filter_op,
1476                                                       filter_conf));
1477                 case RTE_TUNNEL_TYPE_TEREDO:
1478                 case RTE_TUNNEL_TYPE_NVGRE:
1479                 case RTE_L2_TUNNEL_TYPE_E_TAG:
1480                         DP_ERR(edev, "Unsupported tunnel type %d\n",
1481                                 filter_conf->tunnel_type);
1482                         return -EINVAL;
1483                 case RTE_TUNNEL_TYPE_NONE:
1484                 default:
1485                         return 0;
1486                 }
1487                 break;
1488         case RTE_ETH_FILTER_FDIR:
1489                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1490         case RTE_ETH_FILTER_GENERIC:
1491                 if (ECORE_IS_CMT(edev)) {
1492                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1493                         return -ENOTSUP;
1494                 }
1495
1496                 if (filter_op != RTE_ETH_FILTER_GET)
1497                         return -EINVAL;
1498
1499                 *(const void **)arg = &qede_flow_ops;
1500                 return 0;
1501         case RTE_ETH_FILTER_HASH:
1502         case RTE_ETH_FILTER_L2_TUNNEL:
1503         case RTE_ETH_FILTER_MAX:
1504         default:
1505                 DP_ERR(edev, "Unsupported filter type %d\n",
1506                         filter_type);
1507                 return -EINVAL;
1508         }
1509
1510         return 0;
1511 }