1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
12 #include "qede_ethdev.h"
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16 uint16_t rte_filter_type;
17 enum ecore_filter_ucast_type qede_type;
18 enum ecore_tunn_clss qede_tunn_clss;
20 } qede_tunn_types[] = {
22 ETH_TUNNEL_FILTER_OMAC,
24 ECORE_TUNN_CLSS_MAC_VLAN,
28 ETH_TUNNEL_FILTER_TENID,
30 ECORE_TUNN_CLSS_MAC_VNI,
34 ETH_TUNNEL_FILTER_IMAC,
35 ECORE_FILTER_INNER_MAC,
36 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40 ETH_TUNNEL_FILTER_IVLAN,
41 ECORE_FILTER_INNER_VLAN,
42 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47 ECORE_FILTER_MAC_VNI_PAIR,
48 ECORE_TUNN_CLSS_MAC_VNI,
52 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
55 "outer-mac and inner-mac"
58 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
61 "outer-mac and inner-vlan"
64 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66 ECORE_TUNN_CLSS_INNER_MAC_VNI,
70 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
76 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77 ECORE_FILTER_INNER_PAIR,
78 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79 "inner-mac and inner-vlan",
82 ETH_TUNNEL_FILTER_OIP,
88 ETH_TUNNEL_FILTER_IIP,
94 RTE_TUNNEL_FILTER_IMAC_IVLAN,
100 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
106 RTE_TUNNEL_FILTER_IMAC_TENID,
112 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
119 #define IP_VERSION (0x40)
120 #define IP_HDRLEN (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL (64)
124 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
125 /* Sum of length of header types of L2, L3, L4.
126 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
130 #define QEDE_MAX_FDIR_PKT_LEN (86)
132 static inline bool qede_valid_flow(uint16_t flow_type)
134 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
135 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
136 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
137 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
141 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
142 struct qede_arfs_entry *arfs,
144 struct ecore_arfs_config_params *params);
146 /* Note: Flowdir support is only partial.
147 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
148 * Parameters like pballoc/status fields are irrelevant here.
150 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
152 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
153 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
154 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
156 /* check FDIR modes */
157 switch (fdir->mode) {
158 case RTE_FDIR_MODE_NONE:
159 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
160 DP_INFO(edev, "flowdir is disabled\n");
162 case RTE_FDIR_MODE_PERFECT:
163 if (ECORE_IS_CMT(edev)) {
164 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
165 qdev->arfs_info.arfs.mode =
166 ECORE_FILTER_CONFIG_MODE_DISABLE;
169 qdev->arfs_info.arfs.mode =
170 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
171 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
173 case RTE_FDIR_MODE_PERFECT_TUNNEL:
174 case RTE_FDIR_MODE_SIGNATURE:
175 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
176 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
183 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
185 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
186 struct qede_arfs_entry *tmp = NULL;
188 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
191 rte_memzone_free(tmp->mz);
192 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
193 qede_arfs_entry, list);
200 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
201 struct rte_eth_fdir_filter *fdir,
202 struct qede_arfs_entry *arfs)
204 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
205 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
206 struct rte_eth_fdir_input *input;
208 static const uint8_t next_proto[] = {
209 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
210 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
211 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
212 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
215 input = &fdir->input;
217 DP_INFO(edev, "flow_type %d\n", input->flow_type);
219 switch (input->flow_type) {
220 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
221 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
222 /* fill the common ip header */
223 arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
224 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
225 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
226 arfs->tuple.ip_proto = next_proto[input->flow_type];
229 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
230 arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
231 arfs->tuple.src_port = input->flow.udp4_flow.src_port;
233 arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
234 arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
237 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
238 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
239 arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
240 arfs->tuple.ip_proto = next_proto[input->flow_type];
241 rte_memcpy(arfs->tuple.dst_ipv6,
242 &input->flow.ipv6_flow.dst_ip,
244 rte_memcpy(arfs->tuple.src_ipv6,
245 &input->flow.ipv6_flow.src_ip,
249 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
250 arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
251 arfs->tuple.src_port = input->flow.udp6_flow.src_port;
253 arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
254 arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
258 DP_ERR(edev, "Unsupported flow_type %u\n",
263 arfs->rx_queue = fdir->action.rx_queue;
268 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
269 struct qede_arfs_entry *arfs,
272 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
273 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
274 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
275 struct qede_arfs_entry *tmp = NULL;
276 const struct rte_memzone *mz;
277 struct ecore_hwfn *p_hwfn;
278 enum _ecore_status_t rc;
283 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
284 DP_ERR(edev, "Reached max flowdir filter limit\n");
289 /* soft_id could have been used as memzone string, but soft_id is
290 * not currently used so it has no significance.
292 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
293 (unsigned long)rte_get_timer_cycles());
294 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
295 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
297 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
298 rte_strerror(rte_errno));
303 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
304 pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
305 &qdev->arfs_info.arfs);
311 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
313 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
314 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
315 DP_INFO(edev, "flowdir filter exist\n");
321 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
322 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
326 DP_ERR(edev, "flowdir filter does not exist\n");
331 p_hwfn = ECORE_LEADING_HWFN(edev);
333 if (qdev->arfs_info.arfs.mode ==
334 ECORE_FILTER_CONFIG_MODE_DISABLE) {
336 eth_dev->data->dev_conf.fdir_conf.mode =
337 RTE_FDIR_MODE_PERFECT;
338 qdev->arfs_info.arfs.mode =
339 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
340 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
342 /* Enable ARFS searcher with updated flow_types */
343 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
344 &qdev->arfs_info.arfs);
346 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
347 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
348 (dma_addr_t)mz->iova,
352 if (rc == ECORE_SUCCESS) {
354 arfs->pkt_len = pkt_len;
356 SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
358 qdev->arfs_info.filter_count++;
359 DP_INFO(edev, "flowdir filter added, count = %d\n",
360 qdev->arfs_info.filter_count);
362 rte_memzone_free(tmp->mz);
363 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
364 qede_arfs_entry, list);
365 rte_free(tmp); /* the node deleted */
366 rte_memzone_free(mz); /* temp node allocated */
367 qdev->arfs_info.filter_count--;
368 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
369 qdev->arfs_info.filter_count);
372 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
373 rc, qdev->arfs_info.filter_count);
376 /* Disable ARFS searcher if there are no more filters */
377 if (qdev->arfs_info.filter_count == 0) {
378 memset(&qdev->arfs_info.arfs, 0,
379 sizeof(struct ecore_arfs_config_params));
380 DP_INFO(edev, "Disabling flowdir\n");
381 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
382 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
383 &qdev->arfs_info.arfs);
388 rte_memzone_free(mz);
393 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
394 struct rte_eth_fdir_filter *fdir_filter,
397 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
398 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
399 struct qede_arfs_entry *arfs = NULL;
402 arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
403 RTE_CACHE_LINE_SIZE);
405 DP_ERR(edev, "Did not allocate memory for arfs\n");
409 rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
413 rc = qede_config_arfs_filter(eth_dev, arfs, add);
421 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
422 struct rte_eth_fdir_filter *fdir,
425 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
426 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
428 if (!qede_valid_flow(fdir->input.flow_type)) {
429 DP_ERR(edev, "invalid flow_type input\n");
433 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
434 DP_ERR(edev, "invalid queue number %u\n",
435 fdir->action.rx_queue);
439 if (fdir->input.flow_ext.is_vf) {
440 DP_ERR(edev, "flowdir is not supported over VF\n");
444 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
447 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
449 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
450 struct qede_arfs_entry *arfs,
452 struct ecore_arfs_config_params *params)
455 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
456 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
457 uint16_t *ether_type;
460 struct ipv6_hdr *ip6;
465 raw_pkt = (uint8_t *)buff;
467 len = 2 * sizeof(struct ether_addr);
468 raw_pkt += 2 * sizeof(struct ether_addr);
469 ether_type = (uint16_t *)raw_pkt;
470 raw_pkt += sizeof(uint16_t);
471 len += sizeof(uint16_t);
473 *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
474 switch (arfs->tuple.eth_proto) {
475 case ETHER_TYPE_IPv4:
476 ip = (struct ipv4_hdr *)raw_pkt;
477 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
478 ip->total_length = sizeof(struct ipv4_hdr);
479 ip->next_proto_id = arfs->tuple.ip_proto;
480 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
481 ip->dst_addr = arfs->tuple.dst_ipv4;
482 ip->src_addr = arfs->tuple.src_ipv4;
483 len += sizeof(struct ipv4_hdr);
486 raw_pkt = (uint8_t *)buff;
488 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
489 udp = (struct udp_hdr *)(raw_pkt + len);
490 udp->dst_port = arfs->tuple.dst_port;
491 udp->src_port = arfs->tuple.src_port;
492 udp->dgram_len = sizeof(struct udp_hdr);
493 len += sizeof(struct udp_hdr);
494 /* adjust ip total_length */
495 ip->total_length += sizeof(struct udp_hdr);
498 tcp = (struct tcp_hdr *)(raw_pkt + len);
499 tcp->src_port = arfs->tuple.src_port;
500 tcp->dst_port = arfs->tuple.dst_port;
501 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
502 len += sizeof(struct tcp_hdr);
503 /* adjust ip total_length */
504 ip->total_length += sizeof(struct tcp_hdr);
508 case ETHER_TYPE_IPv6:
509 ip6 = (struct ipv6_hdr *)raw_pkt;
510 ip6->proto = arfs->tuple.ip_proto;
512 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
514 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
516 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
518 len += sizeof(struct ipv6_hdr);
521 raw_pkt = (uint8_t *)buff;
523 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
524 udp = (struct udp_hdr *)(raw_pkt + len);
525 udp->src_port = arfs->tuple.src_port;
526 udp->dst_port = arfs->tuple.dst_port;
527 len += sizeof(struct udp_hdr);
530 tcp = (struct tcp_hdr *)(raw_pkt + len);
531 tcp->src_port = arfs->tuple.src_port;
532 tcp->dst_port = arfs->tuple.dst_port;
533 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
534 len += sizeof(struct tcp_hdr);
539 DP_ERR(edev, "Unsupported eth_proto %u\n",
540 arfs->tuple.eth_proto);
548 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
549 enum rte_filter_op filter_op,
552 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
553 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
554 struct rte_eth_fdir_filter *fdir;
557 fdir = (struct rte_eth_fdir_filter *)arg;
559 case RTE_ETH_FILTER_NOP:
560 /* Typically used to query flowdir support */
561 if (ECORE_IS_CMT(edev)) {
562 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
565 return 0; /* means supported */
566 case RTE_ETH_FILTER_ADD:
567 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
569 case RTE_ETH_FILTER_DELETE:
570 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
572 case RTE_ETH_FILTER_FLUSH:
573 case RTE_ETH_FILTER_UPDATE:
574 case RTE_ETH_FILTER_INFO:
578 DP_ERR(edev, "unknown operation %u", filter_op);
585 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
586 enum rte_filter_op filter_op,
589 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
590 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
591 struct rte_eth_ntuple_filter *ntuple;
592 struct rte_eth_fdir_filter fdir_entry;
593 struct rte_eth_tcpv4_flow *tcpv4_flow;
594 struct rte_eth_udpv4_flow *udpv4_flow;
598 case RTE_ETH_FILTER_NOP:
599 /* Typically used to query fdir support */
600 if (ECORE_IS_CMT(edev)) {
601 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
604 return 0; /* means supported */
605 case RTE_ETH_FILTER_ADD:
608 case RTE_ETH_FILTER_DELETE:
610 case RTE_ETH_FILTER_INFO:
611 case RTE_ETH_FILTER_GET:
612 case RTE_ETH_FILTER_UPDATE:
613 case RTE_ETH_FILTER_FLUSH:
614 case RTE_ETH_FILTER_SET:
615 case RTE_ETH_FILTER_STATS:
616 case RTE_ETH_FILTER_OP_MAX:
617 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
620 ntuple = (struct rte_eth_ntuple_filter *)arg;
621 /* Internally convert ntuple to fdir entry */
622 memset(&fdir_entry, 0, sizeof(fdir_entry));
623 if (ntuple->proto == IPPROTO_TCP) {
624 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
625 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
626 tcpv4_flow->ip.src_ip = ntuple->src_ip;
627 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
628 tcpv4_flow->ip.proto = IPPROTO_TCP;
629 tcpv4_flow->src_port = ntuple->src_port;
630 tcpv4_flow->dst_port = ntuple->dst_port;
632 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
633 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
634 udpv4_flow->ip.src_ip = ntuple->src_ip;
635 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
636 udpv4_flow->ip.proto = IPPROTO_TCP;
637 udpv4_flow->src_port = ntuple->src_port;
638 udpv4_flow->dst_port = ntuple->dst_port;
641 fdir_entry.action.rx_queue = ntuple->queue;
643 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
647 qede_tunnel_update(struct qede_dev *qdev,
648 struct ecore_tunnel_info *tunn_info)
650 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
651 enum _ecore_status_t rc = ECORE_INVAL;
652 struct ecore_hwfn *p_hwfn;
653 struct ecore_ptt *p_ptt;
656 for_each_hwfn(edev, i) {
657 p_hwfn = &edev->hwfns[i];
659 p_ptt = ecore_ptt_acquire(p_hwfn);
661 DP_ERR(p_hwfn, "Can't acquire PTT\n");
668 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
669 tunn_info, ECORE_SPQ_MODE_CB, NULL);
671 ecore_ptt_release(p_hwfn, p_ptt);
673 if (rc != ECORE_SUCCESS)
681 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
684 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
685 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
686 enum _ecore_status_t rc = ECORE_INVAL;
687 struct ecore_tunnel_info tunn;
689 if (qdev->vxlan.enable == enable)
690 return ECORE_SUCCESS;
692 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
693 tunn.vxlan.b_update_mode = true;
694 tunn.vxlan.b_mode_enabled = enable;
695 tunn.b_update_rx_cls = true;
696 tunn.b_update_tx_cls = true;
697 tunn.vxlan.tun_cls = clss;
699 tunn.vxlan_port.b_update_port = true;
700 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
702 rc = qede_tunnel_update(qdev, &tunn);
703 if (rc == ECORE_SUCCESS) {
704 qdev->vxlan.enable = enable;
705 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
706 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
707 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
709 DP_ERR(edev, "Failed to update tunn_clss %u\n",
717 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
720 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
721 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
722 enum _ecore_status_t rc = ECORE_INVAL;
723 struct ecore_tunnel_info tunn;
725 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
726 tunn.l2_geneve.b_update_mode = true;
727 tunn.l2_geneve.b_mode_enabled = enable;
728 tunn.ip_geneve.b_update_mode = true;
729 tunn.ip_geneve.b_mode_enabled = enable;
730 tunn.l2_geneve.tun_cls = clss;
731 tunn.ip_geneve.tun_cls = clss;
732 tunn.b_update_rx_cls = true;
733 tunn.b_update_tx_cls = true;
735 tunn.geneve_port.b_update_port = true;
736 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
738 rc = qede_tunnel_update(qdev, &tunn);
739 if (rc == ECORE_SUCCESS) {
740 qdev->geneve.enable = enable;
741 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
742 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
743 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
745 DP_ERR(edev, "Failed to update tunn_clss %u\n",
753 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
756 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
757 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
758 enum _ecore_status_t rc = ECORE_INVAL;
759 struct ecore_tunnel_info tunn;
761 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
762 tunn.ip_gre.b_update_mode = true;
763 tunn.ip_gre.b_mode_enabled = enable;
764 tunn.ip_gre.tun_cls = clss;
765 tunn.ip_gre.tun_cls = clss;
766 tunn.b_update_rx_cls = true;
767 tunn.b_update_tx_cls = true;
769 rc = qede_tunnel_update(qdev, &tunn);
770 if (rc == ECORE_SUCCESS) {
771 qdev->ipgre.enable = enable;
772 DP_INFO(edev, "IPGRE is %s\n",
773 enable ? "enabled" : "disabled");
775 DP_ERR(edev, "Failed to update tunn_clss %u\n",
783 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
784 struct rte_eth_udp_tunnel *tunnel_udp)
786 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
787 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
788 struct ecore_tunnel_info tunn; /* @DPDK */
792 PMD_INIT_FUNC_TRACE(edev);
794 memset(&tunn, 0, sizeof(tunn));
796 switch (tunnel_udp->prot_type) {
797 case RTE_TUNNEL_TYPE_VXLAN:
798 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
799 DP_ERR(edev, "UDP port %u doesn't exist\n",
800 tunnel_udp->udp_port);
805 tunn.vxlan_port.b_update_port = true;
806 tunn.vxlan_port.port = udp_port;
808 rc = qede_tunnel_update(qdev, &tunn);
809 if (rc != ECORE_SUCCESS) {
810 DP_ERR(edev, "Unable to config UDP port %u\n",
811 tunn.vxlan_port.port);
815 qdev->vxlan.udp_port = udp_port;
816 /* If the request is to delete UDP port and if the number of
817 * VXLAN filters have reached 0 then VxLAN offload can be be
820 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
821 return qede_vxlan_enable(eth_dev,
822 ECORE_TUNN_CLSS_MAC_VLAN, false);
825 case RTE_TUNNEL_TYPE_GENEVE:
826 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
827 DP_ERR(edev, "UDP port %u doesn't exist\n",
828 tunnel_udp->udp_port);
834 tunn.geneve_port.b_update_port = true;
835 tunn.geneve_port.port = udp_port;
837 rc = qede_tunnel_update(qdev, &tunn);
838 if (rc != ECORE_SUCCESS) {
839 DP_ERR(edev, "Unable to config UDP port %u\n",
840 tunn.vxlan_port.port);
844 qdev->vxlan.udp_port = udp_port;
845 /* If the request is to delete UDP port and if the number of
846 * GENEVE filters have reached 0 then GENEVE offload can be be
849 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
850 return qede_geneve_enable(eth_dev,
851 ECORE_TUNN_CLSS_MAC_VLAN, false);
863 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
864 struct rte_eth_udp_tunnel *tunnel_udp)
866 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
867 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
868 struct ecore_tunnel_info tunn; /* @DPDK */
872 PMD_INIT_FUNC_TRACE(edev);
874 memset(&tunn, 0, sizeof(tunn));
876 switch (tunnel_udp->prot_type) {
877 case RTE_TUNNEL_TYPE_VXLAN:
878 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
880 "UDP port %u for VXLAN was already configured\n",
881 tunnel_udp->udp_port);
882 return ECORE_SUCCESS;
885 /* Enable VxLAN tunnel with default MAC/VLAN classification if
886 * it was not enabled while adding VXLAN filter before UDP port
889 if (!qdev->vxlan.enable) {
890 rc = qede_vxlan_enable(eth_dev,
891 ECORE_TUNN_CLSS_MAC_VLAN, true);
892 if (rc != ECORE_SUCCESS) {
893 DP_ERR(edev, "Failed to enable VXLAN "
894 "prior to updating UDP port\n");
898 udp_port = tunnel_udp->udp_port;
900 tunn.vxlan_port.b_update_port = true;
901 tunn.vxlan_port.port = udp_port;
903 rc = qede_tunnel_update(qdev, &tunn);
904 if (rc != ECORE_SUCCESS) {
905 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
910 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
912 qdev->vxlan.udp_port = udp_port;
914 case RTE_TUNNEL_TYPE_GENEVE:
915 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
917 "UDP port %u for GENEVE was already configured\n",
918 tunnel_udp->udp_port);
919 return ECORE_SUCCESS;
922 /* Enable GENEVE tunnel with default MAC/VLAN classification if
923 * it was not enabled while adding GENEVE filter before UDP port
926 if (!qdev->geneve.enable) {
927 rc = qede_geneve_enable(eth_dev,
928 ECORE_TUNN_CLSS_MAC_VLAN, true);
929 if (rc != ECORE_SUCCESS) {
930 DP_ERR(edev, "Failed to enable GENEVE "
931 "prior to updating UDP port\n");
935 udp_port = tunnel_udp->udp_port;
937 tunn.geneve_port.b_update_port = true;
938 tunn.geneve_port.port = udp_port;
940 rc = qede_tunnel_update(qdev, &tunn);
941 if (rc != ECORE_SUCCESS) {
942 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
947 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
949 qdev->geneve.udp_port = udp_port;
958 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
959 uint32_t *clss, char *str)
962 *clss = MAX_ECORE_TUNN_CLSS;
964 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
965 if (filter == qede_tunn_types[j].rte_filter_type) {
966 *type = qede_tunn_types[j].qede_type;
967 *clss = qede_tunn_types[j].qede_tunn_clss;
968 strcpy(str, qede_tunn_types[j].string);
975 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
976 const struct rte_eth_tunnel_filter_conf *conf,
979 /* Init commmon ucast params first */
980 qede_set_ucast_cmn_params(ucast);
982 /* Copy out the required fields based on classification type */
986 case ECORE_FILTER_VNI:
987 ucast->vni = conf->tenant_id;
989 case ECORE_FILTER_INNER_VLAN:
990 ucast->vlan = conf->inner_vlan;
992 case ECORE_FILTER_MAC:
993 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
996 case ECORE_FILTER_INNER_MAC:
997 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1000 case ECORE_FILTER_MAC_VNI_PAIR:
1001 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1003 ucast->vni = conf->tenant_id;
1005 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1006 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1008 ucast->vni = conf->tenant_id;
1010 case ECORE_FILTER_INNER_PAIR:
1011 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1013 ucast->vlan = conf->inner_vlan;
1019 return ECORE_SUCCESS;
1023 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1024 const struct rte_eth_tunnel_filter_conf *conf,
1025 __attribute__((unused)) enum rte_filter_op filter_op,
1026 enum ecore_tunn_clss *clss,
1029 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1030 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1031 struct ecore_filter_ucast ucast = {0};
1032 enum ecore_filter_ucast_type type;
1033 uint16_t filter_type = 0;
1037 filter_type = conf->filter_type;
1038 /* Determine if the given filter classification is supported */
1039 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
1040 if (*clss == MAX_ECORE_TUNN_CLSS) {
1041 DP_ERR(edev, "Unsupported filter type\n");
1044 /* Init tunnel ucast params */
1045 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1046 if (rc != ECORE_SUCCESS) {
1047 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
1051 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1052 str, filter_op, ucast.type);
1054 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1056 /* Skip MAC/VLAN if filter is based on VNI */
1057 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1058 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1059 if (rc == 0 && add) {
1060 /* Enable accept anyvlan */
1061 qede_config_accept_any_vlan(qdev, true);
1064 rc = qede_ucast_filter(eth_dev, &ucast, add);
1066 rc = ecore_filter_ucast_cmd(edev, &ucast,
1067 ECORE_SPQ_MODE_CB, NULL);
1074 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1075 enum rte_eth_tunnel_type tunn_type, bool enable)
1079 switch (tunn_type) {
1080 case RTE_TUNNEL_TYPE_VXLAN:
1081 rc = qede_vxlan_enable(eth_dev, clss, enable);
1083 case RTE_TUNNEL_TYPE_GENEVE:
1084 rc = qede_geneve_enable(eth_dev, clss, enable);
1086 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1087 rc = qede_ipgre_enable(eth_dev, clss, enable);
1098 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1099 enum rte_filter_op filter_op,
1100 const struct rte_eth_tunnel_filter_conf *conf)
1102 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1103 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1104 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1108 PMD_INIT_FUNC_TRACE(edev);
1110 switch (filter_op) {
1111 case RTE_ETH_FILTER_ADD:
1114 case RTE_ETH_FILTER_DELETE:
1118 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1123 return qede_tunn_enable(eth_dev,
1124 ECORE_TUNN_CLSS_MAC_VLAN,
1125 conf->tunnel_type, add);
1127 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1128 if (rc != ECORE_SUCCESS)
1132 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1133 qdev->vxlan.num_filters++;
1134 qdev->vxlan.filter_type = conf->filter_type;
1135 } else { /* GENEVE */
1136 qdev->geneve.num_filters++;
1137 qdev->geneve.filter_type = conf->filter_type;
1140 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1141 !qdev->ipgre.enable)
1142 return qede_tunn_enable(eth_dev, clss,
1146 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1147 qdev->vxlan.num_filters--;
1149 qdev->geneve.num_filters--;
1151 /* Disable VXLAN if VXLAN filters become 0 */
1152 if (qdev->vxlan.num_filters == 0 ||
1153 qdev->geneve.num_filters == 0)
1154 return qede_tunn_enable(eth_dev, clss,
1162 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1163 enum rte_filter_type filter_type,
1164 enum rte_filter_op filter_op,
1167 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1168 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1169 struct rte_eth_tunnel_filter_conf *filter_conf =
1170 (struct rte_eth_tunnel_filter_conf *)arg;
1172 switch (filter_type) {
1173 case RTE_ETH_FILTER_TUNNEL:
1174 switch (filter_conf->tunnel_type) {
1175 case RTE_TUNNEL_TYPE_VXLAN:
1176 case RTE_TUNNEL_TYPE_GENEVE:
1177 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1179 "Packet steering to the specified Rx queue"
1180 " is not supported with UDP tunneling");
1181 return(qede_tunn_filter_config(eth_dev, filter_op,
1183 case RTE_TUNNEL_TYPE_TEREDO:
1184 case RTE_TUNNEL_TYPE_NVGRE:
1185 case RTE_L2_TUNNEL_TYPE_E_TAG:
1186 DP_ERR(edev, "Unsupported tunnel type %d\n",
1187 filter_conf->tunnel_type);
1189 case RTE_TUNNEL_TYPE_NONE:
1194 case RTE_ETH_FILTER_FDIR:
1195 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1196 case RTE_ETH_FILTER_NTUPLE:
1197 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1198 case RTE_ETH_FILTER_MACVLAN:
1199 case RTE_ETH_FILTER_ETHERTYPE:
1200 case RTE_ETH_FILTER_FLEXIBLE:
1201 case RTE_ETH_FILTER_SYN:
1202 case RTE_ETH_FILTER_HASH:
1203 case RTE_ETH_FILTER_L2_TUNNEL:
1204 case RTE_ETH_FILTER_MAX:
1206 DP_ERR(edev, "Unsupported filter type %d\n",