1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
12 #include "qede_ethdev.h"
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16 uint16_t rte_filter_type;
17 enum ecore_filter_ucast_type qede_type;
18 enum ecore_tunn_clss qede_tunn_clss;
20 } qede_tunn_types[] = {
22 ETH_TUNNEL_FILTER_OMAC,
24 ECORE_TUNN_CLSS_MAC_VLAN,
28 ETH_TUNNEL_FILTER_TENID,
30 ECORE_TUNN_CLSS_MAC_VNI,
34 ETH_TUNNEL_FILTER_IMAC,
35 ECORE_FILTER_INNER_MAC,
36 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40 ETH_TUNNEL_FILTER_IVLAN,
41 ECORE_FILTER_INNER_VLAN,
42 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47 ECORE_FILTER_MAC_VNI_PAIR,
48 ECORE_TUNN_CLSS_MAC_VNI,
52 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
55 "outer-mac and inner-mac"
58 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
61 "outer-mac and inner-vlan"
64 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66 ECORE_TUNN_CLSS_INNER_MAC_VNI,
70 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
76 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77 ECORE_FILTER_INNER_PAIR,
78 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79 "inner-mac and inner-vlan",
82 ETH_TUNNEL_FILTER_OIP,
88 ETH_TUNNEL_FILTER_IIP,
94 RTE_TUNNEL_FILTER_IMAC_IVLAN,
100 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
106 RTE_TUNNEL_FILTER_IMAC_TENID,
112 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
119 #define IP_VERSION (0x40)
120 #define IP_HDRLEN (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL (64)
125 /* Sum of length of header types of L2, L3, L4.
126 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
130 #define QEDE_MAX_FDIR_PKT_LEN (86)
132 #ifndef IPV6_ADDR_LEN
133 #define IPV6_ADDR_LEN (16)
136 static inline bool qede_valid_flow(uint16_t flow_type)
138 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
139 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
140 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
141 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
144 /* Note: Flowdir support is only partial.
145 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
146 * Parameters like pballoc/status fields are irrelevant here.
148 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
150 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
151 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
152 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
154 /* check FDIR modes */
155 switch (fdir->mode) {
156 case RTE_FDIR_MODE_NONE:
157 qdev->fdir_info.arfs.arfs_enable = false;
158 DP_INFO(edev, "flowdir is disabled\n");
160 case RTE_FDIR_MODE_PERFECT:
161 if (ECORE_IS_CMT(edev)) {
162 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
163 qdev->fdir_info.arfs.arfs_enable = false;
166 qdev->fdir_info.arfs.arfs_enable = true;
167 DP_INFO(edev, "flowdir is enabled\n");
169 case RTE_FDIR_MODE_PERFECT_TUNNEL:
170 case RTE_FDIR_MODE_SIGNATURE:
171 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
172 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
179 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
181 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
182 struct qede_fdir_entry *tmp = NULL;
184 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
187 rte_memzone_free(tmp->mz);
188 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
189 qede_fdir_entry, list);
196 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
197 struct rte_eth_fdir_filter *fdir_filter,
200 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
201 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
202 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
203 struct qede_fdir_entry *tmp = NULL;
204 struct qede_fdir_entry *fdir = NULL;
205 const struct rte_memzone *mz;
206 struct ecore_hwfn *p_hwfn;
207 enum _ecore_status_t rc;
212 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
213 DP_ERR(edev, "Reached max flowdir filter limit\n");
216 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
217 RTE_CACHE_LINE_SIZE);
219 DP_ERR(edev, "Did not allocate memory for fdir\n");
223 /* soft_id could have been used as memzone string, but soft_id is
224 * not currently used so it has no significance.
226 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
227 (unsigned long)rte_get_timer_cycles());
228 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
229 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
231 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
232 rte_strerror(rte_errno));
238 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
239 pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
240 &qdev->fdir_info.arfs);
245 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
247 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
248 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
249 DP_INFO(edev, "flowdir filter exist\n");
255 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
256 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
260 DP_ERR(edev, "flowdir filter does not exist\n");
265 p_hwfn = ECORE_LEADING_HWFN(edev);
267 if (!qdev->fdir_info.arfs.arfs_enable) {
269 eth_dev->data->dev_conf.fdir_conf.mode =
270 RTE_FDIR_MODE_PERFECT;
271 qdev->fdir_info.arfs.arfs_enable = true;
272 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
274 /* Enable ARFS searcher with updated flow_types */
275 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
276 &qdev->fdir_info.arfs);
278 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
279 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
280 (dma_addr_t)mz->iova,
282 fdir_filter->action.rx_queue,
284 if (rc == ECORE_SUCCESS) {
286 fdir->rx_queue = fdir_filter->action.rx_queue;
287 fdir->pkt_len = pkt_len;
289 SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
291 qdev->fdir_info.filter_count++;
292 DP_INFO(edev, "flowdir filter added, count = %d\n",
293 qdev->fdir_info.filter_count);
295 rte_memzone_free(tmp->mz);
296 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
297 qede_fdir_entry, list);
298 rte_free(tmp); /* the node deleted */
299 rte_memzone_free(mz); /* temp node allocated */
300 qdev->fdir_info.filter_count--;
301 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
302 qdev->fdir_info.filter_count);
305 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
306 rc, qdev->fdir_info.filter_count);
309 /* Disable ARFS searcher if there are no more filters */
310 if (qdev->fdir_info.filter_count == 0) {
311 memset(&qdev->fdir_info.arfs, 0,
312 sizeof(struct ecore_arfs_config_params));
313 DP_INFO(edev, "Disabling flowdir\n");
314 qdev->fdir_info.arfs.arfs_enable = false;
315 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
316 &qdev->fdir_info.arfs);
321 rte_memzone_free(mz);
329 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
330 struct rte_eth_fdir_filter *fdir,
333 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
334 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
336 if (!qede_valid_flow(fdir->input.flow_type)) {
337 DP_ERR(edev, "invalid flow_type input\n");
341 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
342 DP_ERR(edev, "invalid queue number %u\n",
343 fdir->action.rx_queue);
347 if (fdir->input.flow_ext.is_vf) {
348 DP_ERR(edev, "flowdir is not supported over VF\n");
352 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
355 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
357 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
358 struct rte_eth_fdir_filter *fdir,
360 struct ecore_arfs_config_params *params)
363 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
364 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
365 uint16_t *ether_type;
367 struct rte_eth_fdir_input *input;
368 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
370 struct ipv6_hdr *ip6;
374 static const uint8_t next_proto[] = {
375 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
376 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
377 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
378 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
380 raw_pkt = (uint8_t *)buff;
381 input = &fdir->input;
382 DP_INFO(edev, "flow_type %d\n", input->flow_type);
384 len = 2 * sizeof(struct ether_addr);
385 raw_pkt += 2 * sizeof(struct ether_addr);
386 if (input->flow_ext.vlan_tci) {
387 DP_INFO(edev, "adding VLAN header\n");
388 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
389 rte_memcpy(raw_pkt + sizeof(uint16_t),
390 &input->flow_ext.vlan_tci,
392 raw_pkt += sizeof(vlan_frame);
393 len += sizeof(vlan_frame);
395 ether_type = (uint16_t *)raw_pkt;
396 raw_pkt += sizeof(uint16_t);
397 len += sizeof(uint16_t);
399 switch (input->flow_type) {
400 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
401 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
402 /* fill the common ip header */
403 ip = (struct ipv4_hdr *)raw_pkt;
404 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
405 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
406 ip->total_length = sizeof(struct ipv4_hdr);
407 ip->next_proto_id = input->flow.ip4_flow.proto ?
408 input->flow.ip4_flow.proto :
409 next_proto[input->flow_type];
410 ip->time_to_live = input->flow.ip4_flow.ttl ?
411 input->flow.ip4_flow.ttl :
412 QEDE_FDIR_IPV4_DEF_TTL;
413 ip->type_of_service = input->flow.ip4_flow.tos;
414 ip->dst_addr = input->flow.ip4_flow.dst_ip;
415 ip->src_addr = input->flow.ip4_flow.src_ip;
416 len += sizeof(struct ipv4_hdr);
419 raw_pkt = (uint8_t *)buff;
421 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
422 udp = (struct udp_hdr *)(raw_pkt + len);
423 udp->dst_port = input->flow.udp4_flow.dst_port;
424 udp->src_port = input->flow.udp4_flow.src_port;
425 udp->dgram_len = sizeof(struct udp_hdr);
426 len += sizeof(struct udp_hdr);
427 /* adjust ip total_length */
428 ip->total_length += sizeof(struct udp_hdr);
431 tcp = (struct tcp_hdr *)(raw_pkt + len);
432 tcp->src_port = input->flow.tcp4_flow.src_port;
433 tcp->dst_port = input->flow.tcp4_flow.dst_port;
434 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
435 len += sizeof(struct tcp_hdr);
436 /* adjust ip total_length */
437 ip->total_length += sizeof(struct tcp_hdr);
441 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
442 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
443 ip6 = (struct ipv6_hdr *)raw_pkt;
444 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
445 ip6->proto = input->flow.ipv6_flow.proto ?
446 input->flow.ipv6_flow.proto :
447 next_proto[input->flow_type];
448 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
450 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
452 len += sizeof(struct ipv6_hdr);
454 raw_pkt = (uint8_t *)buff;
456 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
457 udp = (struct udp_hdr *)(raw_pkt + len);
458 udp->src_port = input->flow.udp6_flow.dst_port;
459 udp->dst_port = input->flow.udp6_flow.src_port;
460 len += sizeof(struct udp_hdr);
463 tcp = (struct tcp_hdr *)(raw_pkt + len);
464 tcp->src_port = input->flow.tcp4_flow.src_port;
465 tcp->dst_port = input->flow.tcp4_flow.dst_port;
466 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
467 len += sizeof(struct tcp_hdr);
472 DP_ERR(edev, "Unsupported flow_type %u\n",
481 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
482 enum rte_filter_op filter_op,
485 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
486 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
487 struct rte_eth_fdir_filter *fdir;
490 fdir = (struct rte_eth_fdir_filter *)arg;
492 case RTE_ETH_FILTER_NOP:
493 /* Typically used to query flowdir support */
494 if (ECORE_IS_CMT(edev)) {
495 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
498 return 0; /* means supported */
499 case RTE_ETH_FILTER_ADD:
500 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
502 case RTE_ETH_FILTER_DELETE:
503 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
505 case RTE_ETH_FILTER_FLUSH:
506 case RTE_ETH_FILTER_UPDATE:
507 case RTE_ETH_FILTER_INFO:
511 DP_ERR(edev, "unknown operation %u", filter_op);
518 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
519 enum rte_filter_op filter_op,
522 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
523 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
524 struct rte_eth_ntuple_filter *ntuple;
525 struct rte_eth_fdir_filter fdir_entry;
526 struct rte_eth_tcpv4_flow *tcpv4_flow;
527 struct rte_eth_udpv4_flow *udpv4_flow;
531 case RTE_ETH_FILTER_NOP:
532 /* Typically used to query fdir support */
533 if (ECORE_IS_CMT(edev)) {
534 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
537 return 0; /* means supported */
538 case RTE_ETH_FILTER_ADD:
541 case RTE_ETH_FILTER_DELETE:
543 case RTE_ETH_FILTER_INFO:
544 case RTE_ETH_FILTER_GET:
545 case RTE_ETH_FILTER_UPDATE:
546 case RTE_ETH_FILTER_FLUSH:
547 case RTE_ETH_FILTER_SET:
548 case RTE_ETH_FILTER_STATS:
549 case RTE_ETH_FILTER_OP_MAX:
550 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
553 ntuple = (struct rte_eth_ntuple_filter *)arg;
554 /* Internally convert ntuple to fdir entry */
555 memset(&fdir_entry, 0, sizeof(fdir_entry));
556 if (ntuple->proto == IPPROTO_TCP) {
557 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
558 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
559 tcpv4_flow->ip.src_ip = ntuple->src_ip;
560 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
561 tcpv4_flow->ip.proto = IPPROTO_TCP;
562 tcpv4_flow->src_port = ntuple->src_port;
563 tcpv4_flow->dst_port = ntuple->dst_port;
565 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
566 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
567 udpv4_flow->ip.src_ip = ntuple->src_ip;
568 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
569 udpv4_flow->ip.proto = IPPROTO_TCP;
570 udpv4_flow->src_port = ntuple->src_port;
571 udpv4_flow->dst_port = ntuple->dst_port;
574 fdir_entry.action.rx_queue = ntuple->queue;
576 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
580 qede_tunnel_update(struct qede_dev *qdev,
581 struct ecore_tunnel_info *tunn_info)
583 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
584 enum _ecore_status_t rc = ECORE_INVAL;
585 struct ecore_hwfn *p_hwfn;
586 struct ecore_ptt *p_ptt;
589 for_each_hwfn(edev, i) {
590 p_hwfn = &edev->hwfns[i];
592 p_ptt = ecore_ptt_acquire(p_hwfn);
594 DP_ERR(p_hwfn, "Can't acquire PTT\n");
601 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
602 tunn_info, ECORE_SPQ_MODE_CB, NULL);
604 ecore_ptt_release(p_hwfn, p_ptt);
606 if (rc != ECORE_SUCCESS)
614 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
617 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
618 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
619 enum _ecore_status_t rc = ECORE_INVAL;
620 struct ecore_tunnel_info tunn;
622 if (qdev->vxlan.enable == enable)
623 return ECORE_SUCCESS;
625 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
626 tunn.vxlan.b_update_mode = true;
627 tunn.vxlan.b_mode_enabled = enable;
628 tunn.b_update_rx_cls = true;
629 tunn.b_update_tx_cls = true;
630 tunn.vxlan.tun_cls = clss;
632 tunn.vxlan_port.b_update_port = true;
633 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
635 rc = qede_tunnel_update(qdev, &tunn);
636 if (rc == ECORE_SUCCESS) {
637 qdev->vxlan.enable = enable;
638 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
639 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
640 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
642 DP_ERR(edev, "Failed to update tunn_clss %u\n",
650 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
653 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
654 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
655 enum _ecore_status_t rc = ECORE_INVAL;
656 struct ecore_tunnel_info tunn;
658 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
659 tunn.l2_geneve.b_update_mode = true;
660 tunn.l2_geneve.b_mode_enabled = enable;
661 tunn.ip_geneve.b_update_mode = true;
662 tunn.ip_geneve.b_mode_enabled = enable;
663 tunn.l2_geneve.tun_cls = clss;
664 tunn.ip_geneve.tun_cls = clss;
665 tunn.b_update_rx_cls = true;
666 tunn.b_update_tx_cls = true;
668 tunn.geneve_port.b_update_port = true;
669 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
671 rc = qede_tunnel_update(qdev, &tunn);
672 if (rc == ECORE_SUCCESS) {
673 qdev->geneve.enable = enable;
674 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
675 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
676 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
678 DP_ERR(edev, "Failed to update tunn_clss %u\n",
686 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
689 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
690 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
691 enum _ecore_status_t rc = ECORE_INVAL;
692 struct ecore_tunnel_info tunn;
694 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
695 tunn.ip_gre.b_update_mode = true;
696 tunn.ip_gre.b_mode_enabled = enable;
697 tunn.ip_gre.tun_cls = clss;
698 tunn.ip_gre.tun_cls = clss;
699 tunn.b_update_rx_cls = true;
700 tunn.b_update_tx_cls = true;
702 rc = qede_tunnel_update(qdev, &tunn);
703 if (rc == ECORE_SUCCESS) {
704 qdev->ipgre.enable = enable;
705 DP_INFO(edev, "IPGRE is %s\n",
706 enable ? "enabled" : "disabled");
708 DP_ERR(edev, "Failed to update tunn_clss %u\n",
716 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
717 struct rte_eth_udp_tunnel *tunnel_udp)
719 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
720 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
721 struct ecore_tunnel_info tunn; /* @DPDK */
725 PMD_INIT_FUNC_TRACE(edev);
727 memset(&tunn, 0, sizeof(tunn));
729 switch (tunnel_udp->prot_type) {
730 case RTE_TUNNEL_TYPE_VXLAN:
731 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
732 DP_ERR(edev, "UDP port %u doesn't exist\n",
733 tunnel_udp->udp_port);
738 tunn.vxlan_port.b_update_port = true;
739 tunn.vxlan_port.port = udp_port;
741 rc = qede_tunnel_update(qdev, &tunn);
742 if (rc != ECORE_SUCCESS) {
743 DP_ERR(edev, "Unable to config UDP port %u\n",
744 tunn.vxlan_port.port);
748 qdev->vxlan.udp_port = udp_port;
749 /* If the request is to delete UDP port and if the number of
750 * VXLAN filters have reached 0 then VxLAN offload can be be
753 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
754 return qede_vxlan_enable(eth_dev,
755 ECORE_TUNN_CLSS_MAC_VLAN, false);
758 case RTE_TUNNEL_TYPE_GENEVE:
759 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
760 DP_ERR(edev, "UDP port %u doesn't exist\n",
761 tunnel_udp->udp_port);
767 tunn.geneve_port.b_update_port = true;
768 tunn.geneve_port.port = udp_port;
770 rc = qede_tunnel_update(qdev, &tunn);
771 if (rc != ECORE_SUCCESS) {
772 DP_ERR(edev, "Unable to config UDP port %u\n",
773 tunn.vxlan_port.port);
777 qdev->vxlan.udp_port = udp_port;
778 /* If the request is to delete UDP port and if the number of
779 * GENEVE filters have reached 0 then GENEVE offload can be be
782 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
783 return qede_geneve_enable(eth_dev,
784 ECORE_TUNN_CLSS_MAC_VLAN, false);
796 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
797 struct rte_eth_udp_tunnel *tunnel_udp)
799 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
800 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
801 struct ecore_tunnel_info tunn; /* @DPDK */
805 PMD_INIT_FUNC_TRACE(edev);
807 memset(&tunn, 0, sizeof(tunn));
809 switch (tunnel_udp->prot_type) {
810 case RTE_TUNNEL_TYPE_VXLAN:
811 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
813 "UDP port %u for VXLAN was already configured\n",
814 tunnel_udp->udp_port);
815 return ECORE_SUCCESS;
818 /* Enable VxLAN tunnel with default MAC/VLAN classification if
819 * it was not enabled while adding VXLAN filter before UDP port
822 if (!qdev->vxlan.enable) {
823 rc = qede_vxlan_enable(eth_dev,
824 ECORE_TUNN_CLSS_MAC_VLAN, true);
825 if (rc != ECORE_SUCCESS) {
826 DP_ERR(edev, "Failed to enable VXLAN "
827 "prior to updating UDP port\n");
831 udp_port = tunnel_udp->udp_port;
833 tunn.vxlan_port.b_update_port = true;
834 tunn.vxlan_port.port = udp_port;
836 rc = qede_tunnel_update(qdev, &tunn);
837 if (rc != ECORE_SUCCESS) {
838 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
843 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
845 qdev->vxlan.udp_port = udp_port;
847 case RTE_TUNNEL_TYPE_GENEVE:
848 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
850 "UDP port %u for GENEVE was already configured\n",
851 tunnel_udp->udp_port);
852 return ECORE_SUCCESS;
855 /* Enable GENEVE tunnel with default MAC/VLAN classification if
856 * it was not enabled while adding GENEVE filter before UDP port
859 if (!qdev->geneve.enable) {
860 rc = qede_geneve_enable(eth_dev,
861 ECORE_TUNN_CLSS_MAC_VLAN, true);
862 if (rc != ECORE_SUCCESS) {
863 DP_ERR(edev, "Failed to enable GENEVE "
864 "prior to updating UDP port\n");
868 udp_port = tunnel_udp->udp_port;
870 tunn.geneve_port.b_update_port = true;
871 tunn.geneve_port.port = udp_port;
873 rc = qede_tunnel_update(qdev, &tunn);
874 if (rc != ECORE_SUCCESS) {
875 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
880 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
882 qdev->geneve.udp_port = udp_port;
891 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
892 uint32_t *clss, char *str)
895 *clss = MAX_ECORE_TUNN_CLSS;
897 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
898 if (filter == qede_tunn_types[j].rte_filter_type) {
899 *type = qede_tunn_types[j].qede_type;
900 *clss = qede_tunn_types[j].qede_tunn_clss;
901 strcpy(str, qede_tunn_types[j].string);
908 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
909 const struct rte_eth_tunnel_filter_conf *conf,
912 /* Init commmon ucast params first */
913 qede_set_ucast_cmn_params(ucast);
915 /* Copy out the required fields based on classification type */
919 case ECORE_FILTER_VNI:
920 ucast->vni = conf->tenant_id;
922 case ECORE_FILTER_INNER_VLAN:
923 ucast->vlan = conf->inner_vlan;
925 case ECORE_FILTER_MAC:
926 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
929 case ECORE_FILTER_INNER_MAC:
930 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
933 case ECORE_FILTER_MAC_VNI_PAIR:
934 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
936 ucast->vni = conf->tenant_id;
938 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
939 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
941 ucast->vni = conf->tenant_id;
943 case ECORE_FILTER_INNER_PAIR:
944 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
946 ucast->vlan = conf->inner_vlan;
952 return ECORE_SUCCESS;
956 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
957 const struct rte_eth_tunnel_filter_conf *conf,
958 __attribute__((unused)) enum rte_filter_op filter_op,
959 enum ecore_tunn_clss *clss,
962 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
963 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
964 struct ecore_filter_ucast ucast = {0};
965 enum ecore_filter_ucast_type type;
966 uint16_t filter_type = 0;
970 filter_type = conf->filter_type;
971 /* Determine if the given filter classification is supported */
972 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
973 if (*clss == MAX_ECORE_TUNN_CLSS) {
974 DP_ERR(edev, "Unsupported filter type\n");
977 /* Init tunnel ucast params */
978 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
979 if (rc != ECORE_SUCCESS) {
980 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
984 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
985 str, filter_op, ucast.type);
987 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
989 /* Skip MAC/VLAN if filter is based on VNI */
990 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
991 rc = qede_mac_int_ops(eth_dev, &ucast, add);
992 if (rc == 0 && add) {
993 /* Enable accept anyvlan */
994 qede_config_accept_any_vlan(qdev, true);
997 rc = qede_ucast_filter(eth_dev, &ucast, add);
999 rc = ecore_filter_ucast_cmd(edev, &ucast,
1000 ECORE_SPQ_MODE_CB, NULL);
1007 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1008 enum rte_eth_tunnel_type tunn_type, bool enable)
1012 switch (tunn_type) {
1013 case RTE_TUNNEL_TYPE_VXLAN:
1014 rc = qede_vxlan_enable(eth_dev, clss, enable);
1016 case RTE_TUNNEL_TYPE_GENEVE:
1017 rc = qede_geneve_enable(eth_dev, clss, enable);
1019 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1020 rc = qede_ipgre_enable(eth_dev, clss, enable);
1031 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1032 enum rte_filter_op filter_op,
1033 const struct rte_eth_tunnel_filter_conf *conf)
1035 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1036 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1037 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1041 PMD_INIT_FUNC_TRACE(edev);
1043 switch (filter_op) {
1044 case RTE_ETH_FILTER_ADD:
1047 case RTE_ETH_FILTER_DELETE:
1051 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1056 return qede_tunn_enable(eth_dev,
1057 ECORE_TUNN_CLSS_MAC_VLAN,
1058 conf->tunnel_type, add);
1060 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1061 if (rc != ECORE_SUCCESS)
1065 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1066 qdev->vxlan.num_filters++;
1067 qdev->vxlan.filter_type = conf->filter_type;
1068 } else { /* GENEVE */
1069 qdev->geneve.num_filters++;
1070 qdev->geneve.filter_type = conf->filter_type;
1073 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1074 !qdev->ipgre.enable)
1075 return qede_tunn_enable(eth_dev, clss,
1079 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1080 qdev->vxlan.num_filters--;
1082 qdev->geneve.num_filters--;
1084 /* Disable VXLAN if VXLAN filters become 0 */
1085 if (qdev->vxlan.num_filters == 0 ||
1086 qdev->geneve.num_filters == 0)
1087 return qede_tunn_enable(eth_dev, clss,
1095 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1096 enum rte_filter_type filter_type,
1097 enum rte_filter_op filter_op,
1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1102 struct rte_eth_tunnel_filter_conf *filter_conf =
1103 (struct rte_eth_tunnel_filter_conf *)arg;
1105 switch (filter_type) {
1106 case RTE_ETH_FILTER_TUNNEL:
1107 switch (filter_conf->tunnel_type) {
1108 case RTE_TUNNEL_TYPE_VXLAN:
1109 case RTE_TUNNEL_TYPE_GENEVE:
1110 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1112 "Packet steering to the specified Rx queue"
1113 " is not supported with UDP tunneling");
1114 return(qede_tunn_filter_config(eth_dev, filter_op,
1116 case RTE_TUNNEL_TYPE_TEREDO:
1117 case RTE_TUNNEL_TYPE_NVGRE:
1118 case RTE_L2_TUNNEL_TYPE_E_TAG:
1119 DP_ERR(edev, "Unsupported tunnel type %d\n",
1120 filter_conf->tunnel_type);
1122 case RTE_TUNNEL_TYPE_NONE:
1127 case RTE_ETH_FILTER_FDIR:
1128 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1129 case RTE_ETH_FILTER_NTUPLE:
1130 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1131 case RTE_ETH_FILTER_MACVLAN:
1132 case RTE_ETH_FILTER_ETHERTYPE:
1133 case RTE_ETH_FILTER_FLEXIBLE:
1134 case RTE_ETH_FILTER_SYN:
1135 case RTE_ETH_FILTER_HASH:
1136 case RTE_ETH_FILTER_L2_TUNNEL:
1137 case RTE_ETH_FILTER_MAX:
1139 DP_ERR(edev, "Unsupported filter type %d\n",