1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
12 #include "qede_ethdev.h"
14 /* VXLAN tunnel classification mapping */
15 const struct _qede_udp_tunn_types {
16 uint16_t rte_filter_type;
17 enum ecore_filter_ucast_type qede_type;
18 enum ecore_tunn_clss qede_tunn_clss;
20 } qede_tunn_types[] = {
22 ETH_TUNNEL_FILTER_OMAC,
24 ECORE_TUNN_CLSS_MAC_VLAN,
28 ETH_TUNNEL_FILTER_TENID,
30 ECORE_TUNN_CLSS_MAC_VNI,
34 ETH_TUNNEL_FILTER_IMAC,
35 ECORE_FILTER_INNER_MAC,
36 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40 ETH_TUNNEL_FILTER_IVLAN,
41 ECORE_FILTER_INNER_VLAN,
42 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
47 ECORE_FILTER_MAC_VNI_PAIR,
48 ECORE_TUNN_CLSS_MAC_VNI,
52 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
55 "outer-mac and inner-mac"
58 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
61 "outer-mac and inner-vlan"
64 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
65 ECORE_FILTER_INNER_MAC_VNI_PAIR,
66 ECORE_TUNN_CLSS_INNER_MAC_VNI,
70 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
76 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
77 ECORE_FILTER_INNER_PAIR,
78 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
79 "inner-mac and inner-vlan",
82 ETH_TUNNEL_FILTER_OIP,
88 ETH_TUNNEL_FILTER_IIP,
94 RTE_TUNNEL_FILTER_IMAC_IVLAN,
100 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
106 RTE_TUNNEL_FILTER_IMAC_TENID,
112 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
119 #define IP_VERSION (0x40)
120 #define IP_HDRLEN (0x5)
121 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
122 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
123 #define QEDE_FDIR_IPV4_DEF_TTL (64)
124 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
125 /* Sum of length of header types of L2, L3, L4.
126 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
130 #define QEDE_MAX_FDIR_PKT_LEN (86)
132 #ifndef IPV6_ADDR_LEN
133 #define IPV6_ADDR_LEN (16)
136 static inline bool qede_valid_flow(uint16_t flow_type)
138 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
139 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
140 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
141 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
144 /* Note: Flowdir support is only partial.
145 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
146 * Parameters like pballoc/status fields are irrelevant here.
148 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
150 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
151 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
152 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
154 /* check FDIR modes */
155 switch (fdir->mode) {
156 case RTE_FDIR_MODE_NONE:
157 qdev->fdir_info.arfs.arfs_enable = false;
158 DP_INFO(edev, "flowdir is disabled\n");
160 case RTE_FDIR_MODE_PERFECT:
161 if (ECORE_IS_CMT(edev)) {
162 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
163 qdev->fdir_info.arfs.arfs_enable = false;
166 qdev->fdir_info.arfs.arfs_enable = true;
167 DP_INFO(edev, "flowdir is enabled\n");
169 case RTE_FDIR_MODE_PERFECT_TUNNEL:
170 case RTE_FDIR_MODE_SIGNATURE:
171 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
172 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
179 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
181 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
182 struct qede_fdir_entry *tmp = NULL;
184 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
187 rte_memzone_free(tmp->mz);
188 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
189 qede_fdir_entry, list);
196 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
197 struct rte_eth_fdir_filter *fdir_filter,
200 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
201 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
202 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
203 struct qede_fdir_entry *tmp = NULL;
204 struct qede_fdir_entry *fdir = NULL;
205 const struct rte_memzone *mz;
206 struct ecore_hwfn *p_hwfn;
207 enum _ecore_status_t rc;
212 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
213 DP_ERR(edev, "Reached max flowdir filter limit\n");
216 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
217 RTE_CACHE_LINE_SIZE);
219 DP_ERR(edev, "Did not allocate memory for fdir\n");
223 /* soft_id could have been used as memzone string, but soft_id is
224 * not currently used so it has no significance.
226 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
227 (unsigned long)rte_get_timer_cycles());
228 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
229 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
231 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
232 rte_strerror(rte_errno));
238 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
239 pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
240 &qdev->fdir_info.arfs);
245 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
247 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
248 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
249 DP_INFO(edev, "flowdir filter exist\n");
255 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
256 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
260 DP_ERR(edev, "flowdir filter does not exist\n");
265 p_hwfn = ECORE_LEADING_HWFN(edev);
267 if (!qdev->fdir_info.arfs.arfs_enable) {
269 eth_dev->data->dev_conf.fdir_conf.mode =
270 RTE_FDIR_MODE_PERFECT;
271 qdev->fdir_info.arfs.arfs_enable = true;
272 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
274 /* Enable ARFS searcher with updated flow_types */
275 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
276 &qdev->fdir_info.arfs);
278 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
279 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
280 (dma_addr_t)mz->iova,
282 fdir_filter->action.rx_queue,
284 if (rc == ECORE_SUCCESS) {
286 fdir->rx_queue = fdir_filter->action.rx_queue;
287 fdir->pkt_len = pkt_len;
289 SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
291 qdev->fdir_info.filter_count++;
292 DP_INFO(edev, "flowdir filter added, count = %d\n",
293 qdev->fdir_info.filter_count);
295 rte_memzone_free(tmp->mz);
296 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
297 qede_fdir_entry, list);
298 rte_free(tmp); /* the node deleted */
299 rte_memzone_free(mz); /* temp node allocated */
300 qdev->fdir_info.filter_count--;
301 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
302 qdev->fdir_info.filter_count);
305 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
306 rc, qdev->fdir_info.filter_count);
309 /* Disable ARFS searcher if there are no more filters */
310 if (qdev->fdir_info.filter_count == 0) {
311 memset(&qdev->fdir_info.arfs, 0,
312 sizeof(struct ecore_arfs_config_params));
313 DP_INFO(edev, "Disabling flowdir\n");
314 qdev->fdir_info.arfs.arfs_enable = false;
315 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
316 &qdev->fdir_info.arfs);
321 rte_memzone_free(mz);
329 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
330 struct rte_eth_fdir_filter *fdir,
333 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
334 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
336 if (!qede_valid_flow(fdir->input.flow_type)) {
337 DP_ERR(edev, "invalid flow_type input\n");
341 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
342 DP_ERR(edev, "invalid queue number %u\n",
343 fdir->action.rx_queue);
347 if (fdir->input.flow_ext.is_vf) {
348 DP_ERR(edev, "flowdir is not supported over VF\n");
352 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
355 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
357 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
358 struct rte_eth_fdir_filter *fdir,
360 struct ecore_arfs_config_params *params)
363 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
364 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
365 uint16_t *ether_type;
367 struct rte_eth_fdir_input *input;
368 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
370 struct ipv6_hdr *ip6;
374 static const uint8_t next_proto[] = {
375 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
376 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
377 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
378 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
380 raw_pkt = (uint8_t *)buff;
381 input = &fdir->input;
382 DP_INFO(edev, "flow_type %d\n", input->flow_type);
384 len = 2 * sizeof(struct ether_addr);
385 raw_pkt += 2 * sizeof(struct ether_addr);
386 if (input->flow_ext.vlan_tci) {
387 DP_INFO(edev, "adding VLAN header\n");
388 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
389 rte_memcpy(raw_pkt + sizeof(uint16_t),
390 &input->flow_ext.vlan_tci,
392 raw_pkt += sizeof(vlan_frame);
393 len += sizeof(vlan_frame);
395 ether_type = (uint16_t *)raw_pkt;
396 raw_pkt += sizeof(uint16_t);
397 len += sizeof(uint16_t);
399 switch (input->flow_type) {
400 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
401 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
402 /* fill the common ip header */
403 ip = (struct ipv4_hdr *)raw_pkt;
404 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
405 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
406 ip->total_length = sizeof(struct ipv4_hdr);
407 ip->next_proto_id = input->flow.ip4_flow.proto ?
408 input->flow.ip4_flow.proto :
409 next_proto[input->flow_type];
410 ip->time_to_live = input->flow.ip4_flow.ttl ?
411 input->flow.ip4_flow.ttl :
412 QEDE_FDIR_IPV4_DEF_TTL;
413 ip->type_of_service = input->flow.ip4_flow.tos;
414 ip->dst_addr = input->flow.ip4_flow.dst_ip;
415 ip->src_addr = input->flow.ip4_flow.src_ip;
416 len += sizeof(struct ipv4_hdr);
419 raw_pkt = (uint8_t *)buff;
421 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
422 udp = (struct udp_hdr *)(raw_pkt + len);
423 udp->dst_port = input->flow.udp4_flow.dst_port;
424 udp->src_port = input->flow.udp4_flow.src_port;
425 udp->dgram_len = sizeof(struct udp_hdr);
426 len += sizeof(struct udp_hdr);
427 /* adjust ip total_length */
428 ip->total_length += sizeof(struct udp_hdr);
431 tcp = (struct tcp_hdr *)(raw_pkt + len);
432 tcp->src_port = input->flow.tcp4_flow.src_port;
433 tcp->dst_port = input->flow.tcp4_flow.dst_port;
434 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
435 len += sizeof(struct tcp_hdr);
436 /* adjust ip total_length */
437 ip->total_length += sizeof(struct tcp_hdr);
441 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
442 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
443 ip6 = (struct ipv6_hdr *)raw_pkt;
444 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
445 ip6->proto = input->flow.ipv6_flow.proto ?
446 input->flow.ipv6_flow.proto :
447 next_proto[input->flow_type];
449 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
451 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
453 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
455 len += sizeof(struct ipv6_hdr);
458 raw_pkt = (uint8_t *)buff;
460 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
461 udp = (struct udp_hdr *)(raw_pkt + len);
462 udp->src_port = input->flow.udp6_flow.src_port;
463 udp->dst_port = input->flow.udp6_flow.dst_port;
464 len += sizeof(struct udp_hdr);
467 tcp = (struct tcp_hdr *)(raw_pkt + len);
468 tcp->src_port = input->flow.tcp6_flow.src_port;
469 tcp->dst_port = input->flow.tcp6_flow.dst_port;
470 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
471 len += sizeof(struct tcp_hdr);
476 DP_ERR(edev, "Unsupported flow_type %u\n",
485 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
486 enum rte_filter_op filter_op,
489 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
490 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
491 struct rte_eth_fdir_filter *fdir;
494 fdir = (struct rte_eth_fdir_filter *)arg;
496 case RTE_ETH_FILTER_NOP:
497 /* Typically used to query flowdir support */
498 if (ECORE_IS_CMT(edev)) {
499 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
502 return 0; /* means supported */
503 case RTE_ETH_FILTER_ADD:
504 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
506 case RTE_ETH_FILTER_DELETE:
507 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
509 case RTE_ETH_FILTER_FLUSH:
510 case RTE_ETH_FILTER_UPDATE:
511 case RTE_ETH_FILTER_INFO:
515 DP_ERR(edev, "unknown operation %u", filter_op);
522 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
523 enum rte_filter_op filter_op,
526 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
527 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
528 struct rte_eth_ntuple_filter *ntuple;
529 struct rte_eth_fdir_filter fdir_entry;
530 struct rte_eth_tcpv4_flow *tcpv4_flow;
531 struct rte_eth_udpv4_flow *udpv4_flow;
535 case RTE_ETH_FILTER_NOP:
536 /* Typically used to query fdir support */
537 if (ECORE_IS_CMT(edev)) {
538 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
541 return 0; /* means supported */
542 case RTE_ETH_FILTER_ADD:
545 case RTE_ETH_FILTER_DELETE:
547 case RTE_ETH_FILTER_INFO:
548 case RTE_ETH_FILTER_GET:
549 case RTE_ETH_FILTER_UPDATE:
550 case RTE_ETH_FILTER_FLUSH:
551 case RTE_ETH_FILTER_SET:
552 case RTE_ETH_FILTER_STATS:
553 case RTE_ETH_FILTER_OP_MAX:
554 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
557 ntuple = (struct rte_eth_ntuple_filter *)arg;
558 /* Internally convert ntuple to fdir entry */
559 memset(&fdir_entry, 0, sizeof(fdir_entry));
560 if (ntuple->proto == IPPROTO_TCP) {
561 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
562 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
563 tcpv4_flow->ip.src_ip = ntuple->src_ip;
564 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
565 tcpv4_flow->ip.proto = IPPROTO_TCP;
566 tcpv4_flow->src_port = ntuple->src_port;
567 tcpv4_flow->dst_port = ntuple->dst_port;
569 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
570 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
571 udpv4_flow->ip.src_ip = ntuple->src_ip;
572 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
573 udpv4_flow->ip.proto = IPPROTO_TCP;
574 udpv4_flow->src_port = ntuple->src_port;
575 udpv4_flow->dst_port = ntuple->dst_port;
578 fdir_entry.action.rx_queue = ntuple->queue;
580 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
584 qede_tunnel_update(struct qede_dev *qdev,
585 struct ecore_tunnel_info *tunn_info)
587 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
588 enum _ecore_status_t rc = ECORE_INVAL;
589 struct ecore_hwfn *p_hwfn;
590 struct ecore_ptt *p_ptt;
593 for_each_hwfn(edev, i) {
594 p_hwfn = &edev->hwfns[i];
596 p_ptt = ecore_ptt_acquire(p_hwfn);
598 DP_ERR(p_hwfn, "Can't acquire PTT\n");
605 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
606 tunn_info, ECORE_SPQ_MODE_CB, NULL);
608 ecore_ptt_release(p_hwfn, p_ptt);
610 if (rc != ECORE_SUCCESS)
618 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
621 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
622 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
623 enum _ecore_status_t rc = ECORE_INVAL;
624 struct ecore_tunnel_info tunn;
626 if (qdev->vxlan.enable == enable)
627 return ECORE_SUCCESS;
629 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
630 tunn.vxlan.b_update_mode = true;
631 tunn.vxlan.b_mode_enabled = enable;
632 tunn.b_update_rx_cls = true;
633 tunn.b_update_tx_cls = true;
634 tunn.vxlan.tun_cls = clss;
636 tunn.vxlan_port.b_update_port = true;
637 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
639 rc = qede_tunnel_update(qdev, &tunn);
640 if (rc == ECORE_SUCCESS) {
641 qdev->vxlan.enable = enable;
642 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
643 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
644 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
646 DP_ERR(edev, "Failed to update tunn_clss %u\n",
654 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
657 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
658 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
659 enum _ecore_status_t rc = ECORE_INVAL;
660 struct ecore_tunnel_info tunn;
662 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
663 tunn.l2_geneve.b_update_mode = true;
664 tunn.l2_geneve.b_mode_enabled = enable;
665 tunn.ip_geneve.b_update_mode = true;
666 tunn.ip_geneve.b_mode_enabled = enable;
667 tunn.l2_geneve.tun_cls = clss;
668 tunn.ip_geneve.tun_cls = clss;
669 tunn.b_update_rx_cls = true;
670 tunn.b_update_tx_cls = true;
672 tunn.geneve_port.b_update_port = true;
673 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
675 rc = qede_tunnel_update(qdev, &tunn);
676 if (rc == ECORE_SUCCESS) {
677 qdev->geneve.enable = enable;
678 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
679 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
680 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
682 DP_ERR(edev, "Failed to update tunn_clss %u\n",
690 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
693 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
694 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
695 enum _ecore_status_t rc = ECORE_INVAL;
696 struct ecore_tunnel_info tunn;
698 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
699 tunn.ip_gre.b_update_mode = true;
700 tunn.ip_gre.b_mode_enabled = enable;
701 tunn.ip_gre.tun_cls = clss;
702 tunn.ip_gre.tun_cls = clss;
703 tunn.b_update_rx_cls = true;
704 tunn.b_update_tx_cls = true;
706 rc = qede_tunnel_update(qdev, &tunn);
707 if (rc == ECORE_SUCCESS) {
708 qdev->ipgre.enable = enable;
709 DP_INFO(edev, "IPGRE is %s\n",
710 enable ? "enabled" : "disabled");
712 DP_ERR(edev, "Failed to update tunn_clss %u\n",
720 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
721 struct rte_eth_udp_tunnel *tunnel_udp)
723 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
724 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
725 struct ecore_tunnel_info tunn; /* @DPDK */
729 PMD_INIT_FUNC_TRACE(edev);
731 memset(&tunn, 0, sizeof(tunn));
733 switch (tunnel_udp->prot_type) {
734 case RTE_TUNNEL_TYPE_VXLAN:
735 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
736 DP_ERR(edev, "UDP port %u doesn't exist\n",
737 tunnel_udp->udp_port);
742 tunn.vxlan_port.b_update_port = true;
743 tunn.vxlan_port.port = udp_port;
745 rc = qede_tunnel_update(qdev, &tunn);
746 if (rc != ECORE_SUCCESS) {
747 DP_ERR(edev, "Unable to config UDP port %u\n",
748 tunn.vxlan_port.port);
752 qdev->vxlan.udp_port = udp_port;
753 /* If the request is to delete UDP port and if the number of
754 * VXLAN filters have reached 0 then VxLAN offload can be be
757 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
758 return qede_vxlan_enable(eth_dev,
759 ECORE_TUNN_CLSS_MAC_VLAN, false);
762 case RTE_TUNNEL_TYPE_GENEVE:
763 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
764 DP_ERR(edev, "UDP port %u doesn't exist\n",
765 tunnel_udp->udp_port);
771 tunn.geneve_port.b_update_port = true;
772 tunn.geneve_port.port = udp_port;
774 rc = qede_tunnel_update(qdev, &tunn);
775 if (rc != ECORE_SUCCESS) {
776 DP_ERR(edev, "Unable to config UDP port %u\n",
777 tunn.vxlan_port.port);
781 qdev->vxlan.udp_port = udp_port;
782 /* If the request is to delete UDP port and if the number of
783 * GENEVE filters have reached 0 then GENEVE offload can be be
786 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
787 return qede_geneve_enable(eth_dev,
788 ECORE_TUNN_CLSS_MAC_VLAN, false);
800 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
801 struct rte_eth_udp_tunnel *tunnel_udp)
803 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
804 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
805 struct ecore_tunnel_info tunn; /* @DPDK */
809 PMD_INIT_FUNC_TRACE(edev);
811 memset(&tunn, 0, sizeof(tunn));
813 switch (tunnel_udp->prot_type) {
814 case RTE_TUNNEL_TYPE_VXLAN:
815 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
817 "UDP port %u for VXLAN was already configured\n",
818 tunnel_udp->udp_port);
819 return ECORE_SUCCESS;
822 /* Enable VxLAN tunnel with default MAC/VLAN classification if
823 * it was not enabled while adding VXLAN filter before UDP port
826 if (!qdev->vxlan.enable) {
827 rc = qede_vxlan_enable(eth_dev,
828 ECORE_TUNN_CLSS_MAC_VLAN, true);
829 if (rc != ECORE_SUCCESS) {
830 DP_ERR(edev, "Failed to enable VXLAN "
831 "prior to updating UDP port\n");
835 udp_port = tunnel_udp->udp_port;
837 tunn.vxlan_port.b_update_port = true;
838 tunn.vxlan_port.port = udp_port;
840 rc = qede_tunnel_update(qdev, &tunn);
841 if (rc != ECORE_SUCCESS) {
842 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
847 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
849 qdev->vxlan.udp_port = udp_port;
851 case RTE_TUNNEL_TYPE_GENEVE:
852 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
854 "UDP port %u for GENEVE was already configured\n",
855 tunnel_udp->udp_port);
856 return ECORE_SUCCESS;
859 /* Enable GENEVE tunnel with default MAC/VLAN classification if
860 * it was not enabled while adding GENEVE filter before UDP port
863 if (!qdev->geneve.enable) {
864 rc = qede_geneve_enable(eth_dev,
865 ECORE_TUNN_CLSS_MAC_VLAN, true);
866 if (rc != ECORE_SUCCESS) {
867 DP_ERR(edev, "Failed to enable GENEVE "
868 "prior to updating UDP port\n");
872 udp_port = tunnel_udp->udp_port;
874 tunn.geneve_port.b_update_port = true;
875 tunn.geneve_port.port = udp_port;
877 rc = qede_tunnel_update(qdev, &tunn);
878 if (rc != ECORE_SUCCESS) {
879 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
884 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
886 qdev->geneve.udp_port = udp_port;
895 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
896 uint32_t *clss, char *str)
899 *clss = MAX_ECORE_TUNN_CLSS;
901 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
902 if (filter == qede_tunn_types[j].rte_filter_type) {
903 *type = qede_tunn_types[j].qede_type;
904 *clss = qede_tunn_types[j].qede_tunn_clss;
905 strcpy(str, qede_tunn_types[j].string);
912 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
913 const struct rte_eth_tunnel_filter_conf *conf,
916 /* Init commmon ucast params first */
917 qede_set_ucast_cmn_params(ucast);
919 /* Copy out the required fields based on classification type */
923 case ECORE_FILTER_VNI:
924 ucast->vni = conf->tenant_id;
926 case ECORE_FILTER_INNER_VLAN:
927 ucast->vlan = conf->inner_vlan;
929 case ECORE_FILTER_MAC:
930 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
933 case ECORE_FILTER_INNER_MAC:
934 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
937 case ECORE_FILTER_MAC_VNI_PAIR:
938 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
940 ucast->vni = conf->tenant_id;
942 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
943 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
945 ucast->vni = conf->tenant_id;
947 case ECORE_FILTER_INNER_PAIR:
948 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
950 ucast->vlan = conf->inner_vlan;
956 return ECORE_SUCCESS;
960 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
961 const struct rte_eth_tunnel_filter_conf *conf,
962 __attribute__((unused)) enum rte_filter_op filter_op,
963 enum ecore_tunn_clss *clss,
966 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
967 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
968 struct ecore_filter_ucast ucast = {0};
969 enum ecore_filter_ucast_type type;
970 uint16_t filter_type = 0;
974 filter_type = conf->filter_type;
975 /* Determine if the given filter classification is supported */
976 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
977 if (*clss == MAX_ECORE_TUNN_CLSS) {
978 DP_ERR(edev, "Unsupported filter type\n");
981 /* Init tunnel ucast params */
982 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
983 if (rc != ECORE_SUCCESS) {
984 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
988 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
989 str, filter_op, ucast.type);
991 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
993 /* Skip MAC/VLAN if filter is based on VNI */
994 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
995 rc = qede_mac_int_ops(eth_dev, &ucast, add);
996 if (rc == 0 && add) {
997 /* Enable accept anyvlan */
998 qede_config_accept_any_vlan(qdev, true);
1001 rc = qede_ucast_filter(eth_dev, &ucast, add);
1003 rc = ecore_filter_ucast_cmd(edev, &ucast,
1004 ECORE_SPQ_MODE_CB, NULL);
1011 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1012 enum rte_eth_tunnel_type tunn_type, bool enable)
1016 switch (tunn_type) {
1017 case RTE_TUNNEL_TYPE_VXLAN:
1018 rc = qede_vxlan_enable(eth_dev, clss, enable);
1020 case RTE_TUNNEL_TYPE_GENEVE:
1021 rc = qede_geneve_enable(eth_dev, clss, enable);
1023 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1024 rc = qede_ipgre_enable(eth_dev, clss, enable);
1035 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1036 enum rte_filter_op filter_op,
1037 const struct rte_eth_tunnel_filter_conf *conf)
1039 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1040 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1041 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1045 PMD_INIT_FUNC_TRACE(edev);
1047 switch (filter_op) {
1048 case RTE_ETH_FILTER_ADD:
1051 case RTE_ETH_FILTER_DELETE:
1055 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1060 return qede_tunn_enable(eth_dev,
1061 ECORE_TUNN_CLSS_MAC_VLAN,
1062 conf->tunnel_type, add);
1064 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1065 if (rc != ECORE_SUCCESS)
1069 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1070 qdev->vxlan.num_filters++;
1071 qdev->vxlan.filter_type = conf->filter_type;
1072 } else { /* GENEVE */
1073 qdev->geneve.num_filters++;
1074 qdev->geneve.filter_type = conf->filter_type;
1077 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1078 !qdev->ipgre.enable)
1079 return qede_tunn_enable(eth_dev, clss,
1083 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1084 qdev->vxlan.num_filters--;
1086 qdev->geneve.num_filters--;
1088 /* Disable VXLAN if VXLAN filters become 0 */
1089 if (qdev->vxlan.num_filters == 0 ||
1090 qdev->geneve.num_filters == 0)
1091 return qede_tunn_enable(eth_dev, clss,
1099 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1100 enum rte_filter_type filter_type,
1101 enum rte_filter_op filter_op,
1104 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1105 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1106 struct rte_eth_tunnel_filter_conf *filter_conf =
1107 (struct rte_eth_tunnel_filter_conf *)arg;
1109 switch (filter_type) {
1110 case RTE_ETH_FILTER_TUNNEL:
1111 switch (filter_conf->tunnel_type) {
1112 case RTE_TUNNEL_TYPE_VXLAN:
1113 case RTE_TUNNEL_TYPE_GENEVE:
1114 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1116 "Packet steering to the specified Rx queue"
1117 " is not supported with UDP tunneling");
1118 return(qede_tunn_filter_config(eth_dev, filter_op,
1120 case RTE_TUNNEL_TYPE_TEREDO:
1121 case RTE_TUNNEL_TYPE_NVGRE:
1122 case RTE_L2_TUNNEL_TYPE_E_TAG:
1123 DP_ERR(edev, "Unsupported tunnel type %d\n",
1124 filter_conf->tunnel_type);
1126 case RTE_TUNNEL_TYPE_NONE:
1131 case RTE_ETH_FILTER_FDIR:
1132 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1133 case RTE_ETH_FILTER_NTUPLE:
1134 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1135 case RTE_ETH_FILTER_MACVLAN:
1136 case RTE_ETH_FILTER_ETHERTYPE:
1137 case RTE_ETH_FILTER_FLEXIBLE:
1138 case RTE_ETH_FILTER_SYN:
1139 case RTE_ETH_FILTER_HASH:
1140 case RTE_ETH_FILTER_L2_TUNNEL:
1141 case RTE_ETH_FILTER_MAX:
1143 DP_ERR(edev, "Unsupported filter type %d\n",