c756594bfc4b547685fb39285c3e9df1010eb12f
[dpdk.git] / drivers / net / qede / qede_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12
13 #include "qede_ethdev.h"
14
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17         uint16_t rte_filter_type;
18         enum ecore_filter_ucast_type qede_type;
19         enum ecore_tunn_clss qede_tunn_clss;
20         const char *string;
21 } qede_tunn_types[] = {
22         {
23                 ETH_TUNNEL_FILTER_OMAC,
24                 ECORE_FILTER_MAC,
25                 ECORE_TUNN_CLSS_MAC_VLAN,
26                 "outer-mac"
27         },
28         {
29                 ETH_TUNNEL_FILTER_TENID,
30                 ECORE_FILTER_VNI,
31                 ECORE_TUNN_CLSS_MAC_VNI,
32                 "vni"
33         },
34         {
35                 ETH_TUNNEL_FILTER_IMAC,
36                 ECORE_FILTER_INNER_MAC,
37                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38                 "inner-mac"
39         },
40         {
41                 ETH_TUNNEL_FILTER_IVLAN,
42                 ECORE_FILTER_INNER_VLAN,
43                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44                 "inner-vlan"
45         },
46         {
47                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48                 ECORE_FILTER_MAC_VNI_PAIR,
49                 ECORE_TUNN_CLSS_MAC_VNI,
50                 "outer-mac and vni"
51         },
52         {
53                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
54                 ECORE_FILTER_UNUSED,
55                 MAX_ECORE_TUNN_CLSS,
56                 "outer-mac and inner-mac"
57         },
58         {
59                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
60                 ECORE_FILTER_UNUSED,
61                 MAX_ECORE_TUNN_CLSS,
62                 "outer-mac and inner-vlan"
63         },
64         {
65                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
68                 "vni and inner-mac",
69         },
70         {
71                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
72                 ECORE_FILTER_UNUSED,
73                 MAX_ECORE_TUNN_CLSS,
74                 "vni and inner-vlan",
75         },
76         {
77                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78                 ECORE_FILTER_INNER_PAIR,
79                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80                 "inner-mac and inner-vlan",
81         },
82         {
83                 ETH_TUNNEL_FILTER_OIP,
84                 ECORE_FILTER_UNUSED,
85                 MAX_ECORE_TUNN_CLSS,
86                 "outer-IP"
87         },
88         {
89                 ETH_TUNNEL_FILTER_IIP,
90                 ECORE_FILTER_UNUSED,
91                 MAX_ECORE_TUNN_CLSS,
92                 "inner-IP"
93         },
94         {
95                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
96                 ECORE_FILTER_UNUSED,
97                 MAX_ECORE_TUNN_CLSS,
98                 "IMAC_IVLAN"
99         },
100         {
101                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102                 ECORE_FILTER_UNUSED,
103                 MAX_ECORE_TUNN_CLSS,
104                 "IMAC_IVLAN_TENID"
105         },
106         {
107                 RTE_TUNNEL_FILTER_IMAC_TENID,
108                 ECORE_FILTER_UNUSED,
109                 MAX_ECORE_TUNN_CLSS,
110                 "IMAC_TENID"
111         },
112         {
113                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
114                 ECORE_FILTER_UNUSED,
115                 MAX_ECORE_TUNN_CLSS,
116                 "OMAC_TENID_IMAC"
117         },
118 };
119
120 #define IP_VERSION                              (0x40)
121 #define IP_HDRLEN                               (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW         (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
132
133 static uint16_t
134 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
135                         struct qede_arfs_entry *arfs,
136                         void *buff,
137                         struct ecore_arfs_config_params *params);
138
139 /* Note: Flowdir support is only partial.
140  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
141  * Parameters like pballoc/status fields are irrelevant here.
142  */
143 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
144 {
145         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
146         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
147         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
148
149         /* check FDIR modes */
150         switch (fdir->mode) {
151         case RTE_FDIR_MODE_NONE:
152                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
153                 DP_INFO(edev, "flowdir is disabled\n");
154         break;
155         case RTE_FDIR_MODE_PERFECT:
156                 if (ECORE_IS_CMT(edev)) {
157                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
158                         qdev->arfs_info.arfs.mode =
159                                 ECORE_FILTER_CONFIG_MODE_DISABLE;
160                         return -ENOTSUP;
161                 }
162                 qdev->arfs_info.arfs.mode =
163                                 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
164                 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
165         break;
166         case RTE_FDIR_MODE_PERFECT_TUNNEL:
167         case RTE_FDIR_MODE_SIGNATURE:
168         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
169                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
170                 return -ENOTSUP;
171         }
172
173         return 0;
174 }
175
176 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
177 {
178         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
179         struct qede_arfs_entry *tmp = NULL;
180
181         SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
182                 if (tmp) {
183                         if (tmp->mz)
184                                 rte_memzone_free(tmp->mz);
185                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
186                                      qede_arfs_entry, list);
187                         rte_free(tmp);
188                 }
189         }
190 }
191
192 static int
193 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
194                         struct qede_arfs_entry *arfs,
195                         bool add)
196 {
197         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
198         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
199         struct ecore_ntuple_filter_params params;
200         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
201         struct qede_arfs_entry *tmp = NULL;
202         const struct rte_memzone *mz;
203         struct ecore_hwfn *p_hwfn;
204         enum _ecore_status_t rc;
205         uint16_t pkt_len;
206         void *pkt;
207
208         if (add) {
209                 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
210                         DP_ERR(edev, "Reached max flowdir filter limit\n");
211                         return -EINVAL;
212                 }
213         }
214
215         /* soft_id could have been used as memzone string, but soft_id is
216          * not currently used so it has no significance.
217          */
218         snprintf(mz_name, sizeof(mz_name), "%lx",
219                  (unsigned long)rte_get_timer_cycles());
220         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
221                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
222         if (!mz) {
223                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
224                        rte_strerror(rte_errno));
225                 return -rte_errno;
226         }
227
228         pkt = mz->addr;
229         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
230         pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
231                                           &qdev->arfs_info.arfs);
232         if (pkt_len == 0) {
233                 rc = -EINVAL;
234                 goto err1;
235         }
236
237         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
238         if (add) {
239                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
240                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
241                                 DP_INFO(edev, "flowdir filter exist\n");
242                                 rc = -EEXIST;
243                                 goto err1;
244                         }
245                 }
246         } else {
247                 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
248                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
249                                 break;
250                 }
251                 if (!tmp) {
252                         DP_ERR(edev, "flowdir filter does not exist\n");
253                         rc = -EEXIST;
254                         goto err1;
255                 }
256         }
257         p_hwfn = ECORE_LEADING_HWFN(edev);
258         if (add) {
259                 if (qdev->arfs_info.arfs.mode ==
260                         ECORE_FILTER_CONFIG_MODE_DISABLE) {
261                         /* Force update */
262                         eth_dev->data->dev_conf.fdir_conf.mode =
263                                                 RTE_FDIR_MODE_PERFECT;
264                         qdev->arfs_info.arfs.mode =
265                                         ECORE_FILTER_CONFIG_MODE_5_TUPLE;
266                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
267                 }
268                 /* Enable ARFS searcher with updated flow_types */
269                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
270                                           &qdev->arfs_info.arfs);
271         }
272
273         memset(&params, 0, sizeof(params));
274         params.addr = (dma_addr_t)mz->iova;
275         params.length = pkt_len;
276         params.qid = arfs->rx_queue;
277         params.vport_id = 0;
278         params.b_is_add = add;
279         params.b_is_drop = arfs->is_drop;
280
281         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
282         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
283                                                &params);
284         if (rc == ECORE_SUCCESS) {
285                 if (add) {
286                         arfs->pkt_len = pkt_len;
287                         arfs->mz = mz;
288                         SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
289                                           arfs, list);
290                         qdev->arfs_info.filter_count++;
291                         DP_INFO(edev, "flowdir filter added, count = %d\n",
292                                 qdev->arfs_info.filter_count);
293                 } else {
294                         rte_memzone_free(tmp->mz);
295                         SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
296                                      qede_arfs_entry, list);
297                         rte_free(tmp); /* the node deleted */
298                         rte_memzone_free(mz); /* temp node allocated */
299                         qdev->arfs_info.filter_count--;
300                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
301                                 qdev->arfs_info.filter_count);
302                 }
303         } else {
304                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
305                        rc, qdev->arfs_info.filter_count);
306         }
307
308         /* Disable ARFS searcher if there are no more filters */
309         if (qdev->arfs_info.filter_count == 0) {
310                 memset(&qdev->arfs_info.arfs, 0,
311                        sizeof(struct ecore_arfs_config_params));
312                 DP_INFO(edev, "Disabling flowdir\n");
313                 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
314                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
315                                           &qdev->arfs_info.arfs);
316         }
317         return 0;
318
319 err1:
320         rte_memzone_free(mz);
321         return rc;
322 }
323
324 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
325 static uint16_t
326 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
327                         struct qede_arfs_entry *arfs,
328                         void *buff,
329                         struct ecore_arfs_config_params *params)
330
331 {
332         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
333         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
334         uint16_t *ether_type;
335         uint8_t *raw_pkt;
336         struct rte_ipv4_hdr *ip;
337         struct rte_ipv6_hdr *ip6;
338         struct rte_udp_hdr *udp;
339         struct rte_tcp_hdr *tcp;
340         uint16_t len;
341
342         raw_pkt = (uint8_t *)buff;
343
344         len =  2 * sizeof(struct rte_ether_addr);
345         raw_pkt += 2 * sizeof(struct rte_ether_addr);
346         ether_type = (uint16_t *)raw_pkt;
347         raw_pkt += sizeof(uint16_t);
348         len += sizeof(uint16_t);
349
350         *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
351         switch (arfs->tuple.eth_proto) {
352         case RTE_ETHER_TYPE_IPV4:
353                 ip = (struct rte_ipv4_hdr *)raw_pkt;
354                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
355                 ip->total_length = sizeof(struct rte_ipv4_hdr);
356                 ip->next_proto_id = arfs->tuple.ip_proto;
357                 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
358                 ip->dst_addr = arfs->tuple.dst_ipv4;
359                 ip->src_addr = arfs->tuple.src_ipv4;
360                 len += sizeof(struct rte_ipv4_hdr);
361                 params->ipv4 = true;
362
363                 raw_pkt = (uint8_t *)buff;
364                 /* UDP */
365                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
366                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
367                         udp->dst_port = arfs->tuple.dst_port;
368                         udp->src_port = arfs->tuple.src_port;
369                         udp->dgram_len = sizeof(struct rte_udp_hdr);
370                         len += sizeof(struct rte_udp_hdr);
371                         /* adjust ip total_length */
372                         ip->total_length += sizeof(struct rte_udp_hdr);
373                         params->udp = true;
374                 } else { /* TCP */
375                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
376                         tcp->src_port = arfs->tuple.src_port;
377                         tcp->dst_port = arfs->tuple.dst_port;
378                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
379                         len += sizeof(struct rte_tcp_hdr);
380                         /* adjust ip total_length */
381                         ip->total_length += sizeof(struct rte_tcp_hdr);
382                         params->tcp = true;
383                 }
384                 break;
385         case RTE_ETHER_TYPE_IPV6:
386                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
387                 ip6->proto = arfs->tuple.ip_proto;
388                 ip6->vtc_flow =
389                         rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
390
391                 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
392                            IPV6_ADDR_LEN);
393                 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
394                            IPV6_ADDR_LEN);
395                 len += sizeof(struct rte_ipv6_hdr);
396                 params->ipv6 = true;
397
398                 raw_pkt = (uint8_t *)buff;
399                 /* UDP */
400                 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
401                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
402                         udp->src_port = arfs->tuple.src_port;
403                         udp->dst_port = arfs->tuple.dst_port;
404                         len += sizeof(struct rte_udp_hdr);
405                         params->udp = true;
406                 } else { /* TCP */
407                         tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
408                         tcp->src_port = arfs->tuple.src_port;
409                         tcp->dst_port = arfs->tuple.dst_port;
410                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
411                         len += sizeof(struct rte_tcp_hdr);
412                         params->tcp = true;
413                 }
414                 break;
415         default:
416                 DP_ERR(edev, "Unsupported eth_proto %u\n",
417                        arfs->tuple.eth_proto);
418                 return 0;
419         }
420
421         return len;
422 }
423
424 static int
425 qede_tunnel_update(struct qede_dev *qdev,
426                    struct ecore_tunnel_info *tunn_info)
427 {
428         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
429         enum _ecore_status_t rc = ECORE_INVAL;
430         struct ecore_hwfn *p_hwfn;
431         struct ecore_ptt *p_ptt;
432         int i;
433
434         for_each_hwfn(edev, i) {
435                 p_hwfn = &edev->hwfns[i];
436                 if (IS_PF(edev)) {
437                         p_ptt = ecore_ptt_acquire(p_hwfn);
438                         if (!p_ptt) {
439                                 DP_ERR(p_hwfn, "Can't acquire PTT\n");
440                                 return -EAGAIN;
441                         }
442                 } else {
443                         p_ptt = NULL;
444                 }
445
446                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
447                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
448                 if (IS_PF(edev))
449                         ecore_ptt_release(p_hwfn, p_ptt);
450
451                 if (rc != ECORE_SUCCESS)
452                         break;
453         }
454
455         return rc;
456 }
457
458 static int
459 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
460                   bool enable)
461 {
462         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
463         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
464         enum _ecore_status_t rc = ECORE_INVAL;
465         struct ecore_tunnel_info tunn;
466
467         if (qdev->vxlan.enable == enable)
468                 return ECORE_SUCCESS;
469
470         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
471         tunn.vxlan.b_update_mode = true;
472         tunn.vxlan.b_mode_enabled = enable;
473         tunn.b_update_rx_cls = true;
474         tunn.b_update_tx_cls = true;
475         tunn.vxlan.tun_cls = clss;
476
477         tunn.vxlan_port.b_update_port = true;
478         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
479
480         rc = qede_tunnel_update(qdev, &tunn);
481         if (rc == ECORE_SUCCESS) {
482                 qdev->vxlan.enable = enable;
483                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
484                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
485                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
486         } else {
487                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
488                        tunn.vxlan.tun_cls);
489         }
490
491         return rc;
492 }
493
494 static int
495 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
496                   bool enable)
497 {
498         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
499         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
500         enum _ecore_status_t rc = ECORE_INVAL;
501         struct ecore_tunnel_info tunn;
502
503         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
504         tunn.l2_geneve.b_update_mode = true;
505         tunn.l2_geneve.b_mode_enabled = enable;
506         tunn.ip_geneve.b_update_mode = true;
507         tunn.ip_geneve.b_mode_enabled = enable;
508         tunn.l2_geneve.tun_cls = clss;
509         tunn.ip_geneve.tun_cls = clss;
510         tunn.b_update_rx_cls = true;
511         tunn.b_update_tx_cls = true;
512
513         tunn.geneve_port.b_update_port = true;
514         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
515
516         rc = qede_tunnel_update(qdev, &tunn);
517         if (rc == ECORE_SUCCESS) {
518                 qdev->geneve.enable = enable;
519                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
520                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
521                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
522         } else {
523                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
524                        clss);
525         }
526
527         return rc;
528 }
529
530 int
531 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
532                       struct rte_eth_udp_tunnel *tunnel_udp)
533 {
534         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
535         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
536         struct ecore_tunnel_info tunn; /* @DPDK */
537         uint16_t udp_port;
538         int rc;
539
540         PMD_INIT_FUNC_TRACE(edev);
541
542         memset(&tunn, 0, sizeof(tunn));
543
544         switch (tunnel_udp->prot_type) {
545         case RTE_TUNNEL_TYPE_VXLAN:
546                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
547                         DP_ERR(edev, "UDP port %u doesn't exist\n",
548                                 tunnel_udp->udp_port);
549                         return ECORE_INVAL;
550                 }
551                 udp_port = 0;
552
553                 tunn.vxlan_port.b_update_port = true;
554                 tunn.vxlan_port.port = udp_port;
555
556                 rc = qede_tunnel_update(qdev, &tunn);
557                 if (rc != ECORE_SUCCESS) {
558                         DP_ERR(edev, "Unable to config UDP port %u\n",
559                                tunn.vxlan_port.port);
560                         return rc;
561                 }
562
563                 qdev->vxlan.udp_port = udp_port;
564                 /* If the request is to delete UDP port and if the number of
565                  * VXLAN filters have reached 0 then VxLAN offload can be be
566                  * disabled.
567                  */
568                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
569                         return qede_vxlan_enable(eth_dev,
570                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
571
572                 break;
573         case RTE_TUNNEL_TYPE_GENEVE:
574                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
575                         DP_ERR(edev, "UDP port %u doesn't exist\n",
576                                 tunnel_udp->udp_port);
577                         return ECORE_INVAL;
578                 }
579
580                 udp_port = 0;
581
582                 tunn.geneve_port.b_update_port = true;
583                 tunn.geneve_port.port = udp_port;
584
585                 rc = qede_tunnel_update(qdev, &tunn);
586                 if (rc != ECORE_SUCCESS) {
587                         DP_ERR(edev, "Unable to config UDP port %u\n",
588                                tunn.vxlan_port.port);
589                         return rc;
590                 }
591
592                 qdev->vxlan.udp_port = udp_port;
593                 /* If the request is to delete UDP port and if the number of
594                  * GENEVE filters have reached 0 then GENEVE offload can be be
595                  * disabled.
596                  */
597                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
598                         return qede_geneve_enable(eth_dev,
599                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
600
601                 break;
602
603         default:
604                 return ECORE_INVAL;
605         }
606
607         return 0;
608 }
609
610 int
611 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
612                       struct rte_eth_udp_tunnel *tunnel_udp)
613 {
614         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
615         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
616         struct ecore_tunnel_info tunn; /* @DPDK */
617         uint16_t udp_port;
618         int rc;
619
620         PMD_INIT_FUNC_TRACE(edev);
621
622         memset(&tunn, 0, sizeof(tunn));
623
624         switch (tunnel_udp->prot_type) {
625         case RTE_TUNNEL_TYPE_VXLAN:
626                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
627                         DP_INFO(edev,
628                                 "UDP port %u for VXLAN was already configured\n",
629                                 tunnel_udp->udp_port);
630                         return ECORE_SUCCESS;
631                 }
632
633                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
634                  * it was not enabled while adding VXLAN filter before UDP port
635                  * update.
636                  */
637                 if (!qdev->vxlan.enable) {
638                         rc = qede_vxlan_enable(eth_dev,
639                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
640                         if (rc != ECORE_SUCCESS) {
641                                 DP_ERR(edev, "Failed to enable VXLAN "
642                                         "prior to updating UDP port\n");
643                                 return rc;
644                         }
645                 }
646                 udp_port = tunnel_udp->udp_port;
647
648                 tunn.vxlan_port.b_update_port = true;
649                 tunn.vxlan_port.port = udp_port;
650
651                 rc = qede_tunnel_update(qdev, &tunn);
652                 if (rc != ECORE_SUCCESS) {
653                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
654                                udp_port);
655                         return rc;
656                 }
657
658                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
659
660                 qdev->vxlan.udp_port = udp_port;
661                 break;
662         case RTE_TUNNEL_TYPE_GENEVE:
663                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
664                         DP_INFO(edev,
665                                 "UDP port %u for GENEVE was already configured\n",
666                                 tunnel_udp->udp_port);
667                         return ECORE_SUCCESS;
668                 }
669
670                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
671                  * it was not enabled while adding GENEVE filter before UDP port
672                  * update.
673                  */
674                 if (!qdev->geneve.enable) {
675                         rc = qede_geneve_enable(eth_dev,
676                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
677                         if (rc != ECORE_SUCCESS) {
678                                 DP_ERR(edev, "Failed to enable GENEVE "
679                                         "prior to updating UDP port\n");
680                                 return rc;
681                         }
682                 }
683                 udp_port = tunnel_udp->udp_port;
684
685                 tunn.geneve_port.b_update_port = true;
686                 tunn.geneve_port.port = udp_port;
687
688                 rc = qede_tunnel_update(qdev, &tunn);
689                 if (rc != ECORE_SUCCESS) {
690                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
691                                udp_port);
692                         return rc;
693                 }
694
695                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
696
697                 qdev->geneve.udp_port = udp_port;
698                 break;
699         default:
700                 return ECORE_INVAL;
701         }
702
703         return 0;
704 }
705
706 static int
707 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
708                         const struct rte_flow_attr *attr,
709                         struct rte_flow_error *error)
710 {
711         if (attr == NULL) {
712                 rte_flow_error_set(error, EINVAL,
713                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
714                                    "NULL attribute");
715                 return -rte_errno;
716         }
717
718         if (attr->group != 0) {
719                 rte_flow_error_set(error, ENOTSUP,
720                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
721                                    "Groups are not supported");
722                 return -rte_errno;
723         }
724
725         if (attr->priority != 0) {
726                 rte_flow_error_set(error, ENOTSUP,
727                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
728                                    "Priorities are not supported");
729                 return -rte_errno;
730         }
731
732         if (attr->egress != 0) {
733                 rte_flow_error_set(error, ENOTSUP,
734                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
735                                    "Egress is not supported");
736                 return -rte_errno;
737         }
738
739         if (attr->transfer != 0) {
740                 rte_flow_error_set(error, ENOTSUP,
741                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
742                                    "Transfer is not supported");
743                 return -rte_errno;
744         }
745
746         if (attr->ingress == 0) {
747                 rte_flow_error_set(error, ENOTSUP,
748                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
749                                    "Only ingress is supported");
750                 return -rte_errno;
751         }
752
753         return 0;
754 }
755
756 static int
757 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
758                         const struct rte_flow_item pattern[],
759                         struct rte_flow_error *error,
760                         struct rte_flow *flow)
761 {
762         bool l3 = false, l4 = false;
763
764         if (pattern == NULL) {
765                 rte_flow_error_set(error, EINVAL,
766                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
767                                    "NULL pattern");
768                 return -rte_errno;
769         }
770
771         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
772                 if (!pattern->spec) {
773                         rte_flow_error_set(error, EINVAL,
774                                            RTE_FLOW_ERROR_TYPE_ITEM,
775                                            pattern,
776                                            "Item spec not defined");
777                         return -rte_errno;
778                 }
779
780                 if (pattern->last) {
781                         rte_flow_error_set(error, EINVAL,
782                                            RTE_FLOW_ERROR_TYPE_ITEM,
783                                            pattern,
784                                            "Item last not supported");
785                         return -rte_errno;
786                 }
787
788                 if (pattern->mask) {
789                         rte_flow_error_set(error, EINVAL,
790                                            RTE_FLOW_ERROR_TYPE_ITEM,
791                                            pattern,
792                                            "Item mask not supported");
793                         return -rte_errno;
794                 }
795
796                 /* Below validation is only for 4 tuple flow
797                  * (GFT_PROFILE_TYPE_4_TUPLE)
798                  * - src and dst L3 address (IPv4 or IPv6)
799                  * - src and dst L4 port (TCP or UDP)
800                  */
801
802                 switch (pattern->type) {
803                 case RTE_FLOW_ITEM_TYPE_IPV4:
804                         l3 = true;
805
806                         if (flow) {
807                                 const struct rte_flow_item_ipv4 *spec;
808
809                                 spec = pattern->spec;
810                                 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
811                                 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
812                                 flow->entry.tuple.eth_proto =
813                                         RTE_ETHER_TYPE_IPV4;
814                         }
815                         break;
816
817                 case RTE_FLOW_ITEM_TYPE_IPV6:
818                         l3 = true;
819
820                         if (flow) {
821                                 const struct rte_flow_item_ipv6 *spec;
822
823                                 spec = pattern->spec;
824                                 rte_memcpy(flow->entry.tuple.src_ipv6,
825                                            spec->hdr.src_addr,
826                                            IPV6_ADDR_LEN);
827                                 rte_memcpy(flow->entry.tuple.dst_ipv6,
828                                            spec->hdr.dst_addr,
829                                            IPV6_ADDR_LEN);
830                                 flow->entry.tuple.eth_proto =
831                                         RTE_ETHER_TYPE_IPV6;
832                         }
833                         break;
834
835                 case RTE_FLOW_ITEM_TYPE_UDP:
836                         l4 = true;
837
838                         if (flow) {
839                                 const struct rte_flow_item_udp *spec;
840
841                                 spec = pattern->spec;
842                                 flow->entry.tuple.src_port =
843                                                 spec->hdr.src_port;
844                                 flow->entry.tuple.dst_port =
845                                                 spec->hdr.dst_port;
846                                 flow->entry.tuple.ip_proto = IPPROTO_UDP;
847                         }
848                         break;
849
850                 case RTE_FLOW_ITEM_TYPE_TCP:
851                         l4 = true;
852
853                         if (flow) {
854                                 const struct rte_flow_item_tcp *spec;
855
856                                 spec = pattern->spec;
857                                 flow->entry.tuple.src_port =
858                                                 spec->hdr.src_port;
859                                 flow->entry.tuple.dst_port =
860                                                 spec->hdr.dst_port;
861                                 flow->entry.tuple.ip_proto = IPPROTO_TCP;
862                         }
863
864                         break;
865                 default:
866                         rte_flow_error_set(error, EINVAL,
867                                            RTE_FLOW_ERROR_TYPE_ITEM,
868                                            pattern,
869                                            "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
870                         return -rte_errno;
871                 }
872         }
873
874         if (!(l3 && l4)) {
875                 rte_flow_error_set(error, EINVAL,
876                                    RTE_FLOW_ERROR_TYPE_ITEM,
877                                    pattern,
878                                    "Item types need to have both L3 and L4 protocols");
879                 return -rte_errno;
880         }
881
882         return 0;
883 }
884
885 static int
886 qede_flow_parse_actions(struct rte_eth_dev *dev,
887                         const struct rte_flow_action actions[],
888                         struct rte_flow_error *error,
889                         struct rte_flow *flow)
890 {
891         const struct rte_flow_action_queue *queue;
892
893         if (actions == NULL) {
894                 rte_flow_error_set(error, EINVAL,
895                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
896                                    "NULL actions");
897                 return -rte_errno;
898         }
899
900         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
901                 switch (actions->type) {
902                 case RTE_FLOW_ACTION_TYPE_QUEUE:
903                         queue = actions->conf;
904
905                         if (queue->index >= QEDE_RSS_COUNT(dev)) {
906                                 rte_flow_error_set(error, EINVAL,
907                                                    RTE_FLOW_ERROR_TYPE_ACTION,
908                                                    actions,
909                                                    "Bad QUEUE action");
910                                 return -rte_errno;
911                         }
912
913                         if (flow)
914                                 flow->entry.rx_queue = queue->index;
915
916                         break;
917                 case RTE_FLOW_ACTION_TYPE_DROP:
918                         if (flow)
919                                 flow->entry.is_drop = true;
920                         break;
921                 default:
922                         rte_flow_error_set(error, ENOTSUP,
923                                            RTE_FLOW_ERROR_TYPE_ACTION,
924                                            actions,
925                                            "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
926                         return -rte_errno;
927                 }
928         }
929
930         return 0;
931 }
932
933 static int
934 qede_flow_parse(struct rte_eth_dev *dev,
935                 const struct rte_flow_attr *attr,
936                 const struct rte_flow_item patterns[],
937                 const struct rte_flow_action actions[],
938                 struct rte_flow_error *error,
939                 struct rte_flow *flow)
940
941 {
942         int rc = 0;
943
944         rc = qede_flow_validate_attr(dev, attr, error);
945         if (rc)
946                 return rc;
947
948         /* parse and validate item pattern and actions.
949          * Given item list and actions will be translate to qede PMD
950          * specific arfs structure.
951          */
952         rc = qede_flow_parse_pattern(dev, patterns, error, flow);
953         if (rc)
954                 return rc;
955
956         rc = qede_flow_parse_actions(dev, actions, error, flow);
957
958         return rc;
959 }
960
961 static int
962 qede_flow_validate(struct rte_eth_dev *dev,
963                    const struct rte_flow_attr *attr,
964                    const struct rte_flow_item patterns[],
965                    const struct rte_flow_action actions[],
966                    struct rte_flow_error *error)
967 {
968         return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
969 }
970
971 static struct rte_flow *
972 qede_flow_create(struct rte_eth_dev *dev,
973                  const struct rte_flow_attr *attr,
974                  const struct rte_flow_item pattern[],
975                  const struct rte_flow_action actions[],
976                  struct rte_flow_error *error)
977 {
978         struct rte_flow *flow = NULL;
979         int rc;
980
981         flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
982         if (flow == NULL) {
983                 rte_flow_error_set(error, ENOMEM,
984                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
985                                    "Failed to allocate memory");
986                 return NULL;
987         }
988
989         rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
990         if (rc < 0) {
991                 rte_free(flow);
992                 return NULL;
993         }
994
995         rc = qede_config_arfs_filter(dev, &flow->entry, true);
996         if (rc < 0) {
997                 rte_flow_error_set(error, rc,
998                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
999                                    "Failed to configure flow filter");
1000                 rte_free(flow);
1001                 return NULL;
1002         }
1003
1004         return flow;
1005 }
1006
1007 static int
1008 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1009                   struct rte_flow *flow,
1010                   struct rte_flow_error *error)
1011 {
1012         int rc = 0;
1013
1014         rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1015         if (rc < 0) {
1016                 rte_flow_error_set(error, rc,
1017                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1018                                    "Failed to delete flow filter");
1019                 rte_free(flow);
1020         }
1021
1022         return rc;
1023 }
1024
1025 static int
1026 qede_flow_flush(struct rte_eth_dev *eth_dev,
1027                 struct rte_flow_error *error)
1028 {
1029         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1030         struct qede_arfs_entry *tmp = NULL;
1031         int rc = 0;
1032
1033         while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1034                 tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1035
1036                 rc = qede_config_arfs_filter(eth_dev, tmp, false);
1037                 if (rc < 0)
1038                         rte_flow_error_set(error, rc,
1039                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1040                                            "Failed to flush flow filter");
1041         }
1042
1043         return rc;
1044 }
1045
1046 const struct rte_flow_ops qede_flow_ops = {
1047         .validate = qede_flow_validate,
1048         .create = qede_flow_create,
1049         .destroy = qede_flow_destroy,
1050         .flush = qede_flow_flush,
1051 };
1052
1053 int
1054 qede_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
1055                       const struct rte_flow_ops **ops)
1056 {
1057         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1058         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1059
1060         if (ECORE_IS_CMT(edev)) {
1061                 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1062                 return -ENOTSUP;
1063         }
1064
1065         *ops = &qede_flow_ops;
1066         return 0;
1067 }