1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
13 #include "qede_ethdev.h"
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17 uint16_t rte_filter_type;
18 enum ecore_filter_ucast_type qede_type;
19 enum ecore_tunn_clss qede_tunn_clss;
21 } qede_tunn_types[] = {
23 ETH_TUNNEL_FILTER_OMAC,
25 ECORE_TUNN_CLSS_MAC_VLAN,
29 ETH_TUNNEL_FILTER_TENID,
31 ECORE_TUNN_CLSS_MAC_VNI,
35 ETH_TUNNEL_FILTER_IMAC,
36 ECORE_FILTER_INNER_MAC,
37 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
41 ETH_TUNNEL_FILTER_IVLAN,
42 ECORE_FILTER_INNER_VLAN,
43 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48 ECORE_FILTER_MAC_VNI_PAIR,
49 ECORE_TUNN_CLSS_MAC_VNI,
53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56 "outer-mac and inner-mac"
59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62 "outer-mac and inner-vlan"
65 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67 ECORE_TUNN_CLSS_INNER_MAC_VNI,
71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
77 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78 ECORE_FILTER_INNER_PAIR,
79 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80 "inner-mac and inner-vlan",
83 ETH_TUNNEL_FILTER_OIP,
89 ETH_TUNNEL_FILTER_IIP,
95 RTE_TUNNEL_FILTER_IMAC_IVLAN,
101 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
107 RTE_TUNNEL_FILTER_IMAC_TENID,
113 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
120 #define IP_VERSION (0x40)
121 #define IP_HDRLEN (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
131 #define QEDE_MAX_FDIR_PKT_LEN (86)
133 static inline bool qede_valid_flow(uint16_t flow_type)
135 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143 struct qede_arfs_entry *arfs,
145 struct ecore_arfs_config_params *params);
147 /* Note: Flowdir support is only partial.
148 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149 * Parameters like pballoc/status fields are irrelevant here.
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
153 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
157 /* check FDIR modes */
158 switch (fdir->mode) {
159 case RTE_FDIR_MODE_NONE:
160 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161 DP_INFO(edev, "flowdir is disabled\n");
163 case RTE_FDIR_MODE_PERFECT:
164 if (ECORE_IS_CMT(edev)) {
165 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166 qdev->arfs_info.arfs.mode =
167 ECORE_FILTER_CONFIG_MODE_DISABLE;
170 qdev->arfs_info.arfs.mode =
171 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
174 case RTE_FDIR_MODE_PERFECT_TUNNEL:
175 case RTE_FDIR_MODE_SIGNATURE:
176 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
186 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187 struct qede_arfs_entry *tmp = NULL;
189 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
192 rte_memzone_free(tmp->mz);
193 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194 qede_arfs_entry, list);
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202 struct rte_eth_fdir_filter *fdir,
203 struct qede_arfs_entry *arfs)
205 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207 struct rte_eth_fdir_input *input;
209 static const uint8_t next_proto[] = {
210 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
216 input = &fdir->input;
218 DP_INFO(edev, "flow_type %d\n", input->flow_type);
220 switch (input->flow_type) {
221 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223 /* fill the common ip header */
224 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
225 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227 arfs->tuple.ip_proto = next_proto[input->flow_type];
230 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231 arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232 arfs->tuple.src_port = input->flow.udp4_flow.src_port;
234 arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235 arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
238 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
241 arfs->tuple.ip_proto = next_proto[input->flow_type];
242 rte_memcpy(arfs->tuple.dst_ipv6,
243 &input->flow.ipv6_flow.dst_ip,
245 rte_memcpy(arfs->tuple.src_ipv6,
246 &input->flow.ipv6_flow.src_ip,
250 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251 arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252 arfs->tuple.src_port = input->flow.udp6_flow.src_port;
254 arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255 arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
259 DP_ERR(edev, "Unsupported flow_type %u\n",
264 arfs->rx_queue = fdir->action.rx_queue;
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270 struct qede_arfs_entry *arfs,
273 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275 struct ecore_ntuple_filter_params params;
276 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
277 struct qede_arfs_entry *tmp = NULL;
278 const struct rte_memzone *mz;
279 struct ecore_hwfn *p_hwfn;
280 enum _ecore_status_t rc;
285 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
286 DP_ERR(edev, "Reached max flowdir filter limit\n");
291 /* soft_id could have been used as memzone string, but soft_id is
292 * not currently used so it has no significance.
294 snprintf(mz_name, sizeof(mz_name), "%lx",
295 (unsigned long)rte_get_timer_cycles());
296 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
297 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
299 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
300 rte_strerror(rte_errno));
305 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
306 pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
307 &qdev->arfs_info.arfs);
313 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
315 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
316 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
317 DP_INFO(edev, "flowdir filter exist\n");
323 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
324 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
328 DP_ERR(edev, "flowdir filter does not exist\n");
333 p_hwfn = ECORE_LEADING_HWFN(edev);
335 if (qdev->arfs_info.arfs.mode ==
336 ECORE_FILTER_CONFIG_MODE_DISABLE) {
338 eth_dev->data->dev_conf.fdir_conf.mode =
339 RTE_FDIR_MODE_PERFECT;
340 qdev->arfs_info.arfs.mode =
341 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
342 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
344 /* Enable ARFS searcher with updated flow_types */
345 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
346 &qdev->arfs_info.arfs);
349 memset(¶ms, 0, sizeof(params));
350 params.addr = (dma_addr_t)mz->iova;
351 params.length = pkt_len;
352 params.qid = arfs->rx_queue;
354 params.b_is_add = add;
355 params.b_is_drop = arfs->is_drop;
357 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
358 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
360 if (rc == ECORE_SUCCESS) {
362 arfs->pkt_len = pkt_len;
364 SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
366 qdev->arfs_info.filter_count++;
367 DP_INFO(edev, "flowdir filter added, count = %d\n",
368 qdev->arfs_info.filter_count);
370 rte_memzone_free(tmp->mz);
371 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
372 qede_arfs_entry, list);
373 rte_free(tmp); /* the node deleted */
374 rte_memzone_free(mz); /* temp node allocated */
375 qdev->arfs_info.filter_count--;
376 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
377 qdev->arfs_info.filter_count);
380 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
381 rc, qdev->arfs_info.filter_count);
384 /* Disable ARFS searcher if there are no more filters */
385 if (qdev->arfs_info.filter_count == 0) {
386 memset(&qdev->arfs_info.arfs, 0,
387 sizeof(struct ecore_arfs_config_params));
388 DP_INFO(edev, "Disabling flowdir\n");
389 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
390 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
391 &qdev->arfs_info.arfs);
396 rte_memzone_free(mz);
401 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
402 struct rte_eth_fdir_filter *fdir_filter,
405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
407 struct qede_arfs_entry *arfs = NULL;
410 arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
411 RTE_CACHE_LINE_SIZE);
413 DP_ERR(edev, "Did not allocate memory for arfs\n");
417 rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
421 rc = qede_config_arfs_filter(eth_dev, arfs, add);
429 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
430 struct rte_eth_fdir_filter *fdir,
433 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
436 if (!qede_valid_flow(fdir->input.flow_type)) {
437 DP_ERR(edev, "invalid flow_type input\n");
441 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
442 DP_ERR(edev, "invalid queue number %u\n",
443 fdir->action.rx_queue);
447 if (fdir->input.flow_ext.is_vf) {
448 DP_ERR(edev, "flowdir is not supported over VF\n");
452 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
455 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
457 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
458 struct qede_arfs_entry *arfs,
460 struct ecore_arfs_config_params *params)
463 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465 uint16_t *ether_type;
467 struct rte_ipv4_hdr *ip;
468 struct rte_ipv6_hdr *ip6;
469 struct rte_udp_hdr *udp;
470 struct rte_tcp_hdr *tcp;
473 raw_pkt = (uint8_t *)buff;
475 len = 2 * sizeof(struct rte_ether_addr);
476 raw_pkt += 2 * sizeof(struct rte_ether_addr);
477 ether_type = (uint16_t *)raw_pkt;
478 raw_pkt += sizeof(uint16_t);
479 len += sizeof(uint16_t);
481 *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
482 switch (arfs->tuple.eth_proto) {
483 case RTE_ETHER_TYPE_IPV4:
484 ip = (struct rte_ipv4_hdr *)raw_pkt;
485 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
486 ip->total_length = sizeof(struct rte_ipv4_hdr);
487 ip->next_proto_id = arfs->tuple.ip_proto;
488 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
489 ip->dst_addr = arfs->tuple.dst_ipv4;
490 ip->src_addr = arfs->tuple.src_ipv4;
491 len += sizeof(struct rte_ipv4_hdr);
494 raw_pkt = (uint8_t *)buff;
496 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
497 udp = (struct rte_udp_hdr *)(raw_pkt + len);
498 udp->dst_port = arfs->tuple.dst_port;
499 udp->src_port = arfs->tuple.src_port;
500 udp->dgram_len = sizeof(struct rte_udp_hdr);
501 len += sizeof(struct rte_udp_hdr);
502 /* adjust ip total_length */
503 ip->total_length += sizeof(struct rte_udp_hdr);
506 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
507 tcp->src_port = arfs->tuple.src_port;
508 tcp->dst_port = arfs->tuple.dst_port;
509 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
510 len += sizeof(struct rte_tcp_hdr);
511 /* adjust ip total_length */
512 ip->total_length += sizeof(struct rte_tcp_hdr);
516 case RTE_ETHER_TYPE_IPV6:
517 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
518 ip6->proto = arfs->tuple.ip_proto;
520 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
522 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
524 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
526 len += sizeof(struct rte_ipv6_hdr);
529 raw_pkt = (uint8_t *)buff;
531 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
532 udp = (struct rte_udp_hdr *)(raw_pkt + len);
533 udp->src_port = arfs->tuple.src_port;
534 udp->dst_port = arfs->tuple.dst_port;
535 len += sizeof(struct rte_udp_hdr);
538 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
539 tcp->src_port = arfs->tuple.src_port;
540 tcp->dst_port = arfs->tuple.dst_port;
541 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
542 len += sizeof(struct rte_tcp_hdr);
547 DP_ERR(edev, "Unsupported eth_proto %u\n",
548 arfs->tuple.eth_proto);
556 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
557 enum rte_filter_op filter_op,
560 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562 struct rte_eth_fdir_filter *fdir;
565 fdir = (struct rte_eth_fdir_filter *)arg;
567 case RTE_ETH_FILTER_NOP:
568 /* Typically used to query flowdir support */
569 if (ECORE_IS_CMT(edev)) {
570 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
573 return 0; /* means supported */
574 case RTE_ETH_FILTER_ADD:
575 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
577 case RTE_ETH_FILTER_DELETE:
578 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
580 case RTE_ETH_FILTER_FLUSH:
581 case RTE_ETH_FILTER_UPDATE:
582 case RTE_ETH_FILTER_INFO:
586 DP_ERR(edev, "unknown operation %u", filter_op);
594 qede_tunnel_update(struct qede_dev *qdev,
595 struct ecore_tunnel_info *tunn_info)
597 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
598 enum _ecore_status_t rc = ECORE_INVAL;
599 struct ecore_hwfn *p_hwfn;
600 struct ecore_ptt *p_ptt;
603 for_each_hwfn(edev, i) {
604 p_hwfn = &edev->hwfns[i];
606 p_ptt = ecore_ptt_acquire(p_hwfn);
608 DP_ERR(p_hwfn, "Can't acquire PTT\n");
615 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
616 tunn_info, ECORE_SPQ_MODE_CB, NULL);
618 ecore_ptt_release(p_hwfn, p_ptt);
620 if (rc != ECORE_SUCCESS)
628 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
631 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
632 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
633 enum _ecore_status_t rc = ECORE_INVAL;
634 struct ecore_tunnel_info tunn;
636 if (qdev->vxlan.enable == enable)
637 return ECORE_SUCCESS;
639 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
640 tunn.vxlan.b_update_mode = true;
641 tunn.vxlan.b_mode_enabled = enable;
642 tunn.b_update_rx_cls = true;
643 tunn.b_update_tx_cls = true;
644 tunn.vxlan.tun_cls = clss;
646 tunn.vxlan_port.b_update_port = true;
647 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
649 rc = qede_tunnel_update(qdev, &tunn);
650 if (rc == ECORE_SUCCESS) {
651 qdev->vxlan.enable = enable;
652 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
653 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
654 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
656 DP_ERR(edev, "Failed to update tunn_clss %u\n",
664 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
667 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
668 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
669 enum _ecore_status_t rc = ECORE_INVAL;
670 struct ecore_tunnel_info tunn;
672 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
673 tunn.l2_geneve.b_update_mode = true;
674 tunn.l2_geneve.b_mode_enabled = enable;
675 tunn.ip_geneve.b_update_mode = true;
676 tunn.ip_geneve.b_mode_enabled = enable;
677 tunn.l2_geneve.tun_cls = clss;
678 tunn.ip_geneve.tun_cls = clss;
679 tunn.b_update_rx_cls = true;
680 tunn.b_update_tx_cls = true;
682 tunn.geneve_port.b_update_port = true;
683 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
685 rc = qede_tunnel_update(qdev, &tunn);
686 if (rc == ECORE_SUCCESS) {
687 qdev->geneve.enable = enable;
688 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
689 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
690 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
692 DP_ERR(edev, "Failed to update tunn_clss %u\n",
700 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
703 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
704 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
705 enum _ecore_status_t rc = ECORE_INVAL;
706 struct ecore_tunnel_info tunn;
708 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
709 tunn.ip_gre.b_update_mode = true;
710 tunn.ip_gre.b_mode_enabled = enable;
711 tunn.ip_gre.tun_cls = clss;
712 tunn.ip_gre.tun_cls = clss;
713 tunn.b_update_rx_cls = true;
714 tunn.b_update_tx_cls = true;
716 rc = qede_tunnel_update(qdev, &tunn);
717 if (rc == ECORE_SUCCESS) {
718 qdev->ipgre.enable = enable;
719 DP_INFO(edev, "IPGRE is %s\n",
720 enable ? "enabled" : "disabled");
722 DP_ERR(edev, "Failed to update tunn_clss %u\n",
730 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
731 struct rte_eth_udp_tunnel *tunnel_udp)
733 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
734 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
735 struct ecore_tunnel_info tunn; /* @DPDK */
739 PMD_INIT_FUNC_TRACE(edev);
741 memset(&tunn, 0, sizeof(tunn));
743 switch (tunnel_udp->prot_type) {
744 case RTE_TUNNEL_TYPE_VXLAN:
745 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
746 DP_ERR(edev, "UDP port %u doesn't exist\n",
747 tunnel_udp->udp_port);
752 tunn.vxlan_port.b_update_port = true;
753 tunn.vxlan_port.port = udp_port;
755 rc = qede_tunnel_update(qdev, &tunn);
756 if (rc != ECORE_SUCCESS) {
757 DP_ERR(edev, "Unable to config UDP port %u\n",
758 tunn.vxlan_port.port);
762 qdev->vxlan.udp_port = udp_port;
763 /* If the request is to delete UDP port and if the number of
764 * VXLAN filters have reached 0 then VxLAN offload can be be
767 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
768 return qede_vxlan_enable(eth_dev,
769 ECORE_TUNN_CLSS_MAC_VLAN, false);
772 case RTE_TUNNEL_TYPE_GENEVE:
773 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
774 DP_ERR(edev, "UDP port %u doesn't exist\n",
775 tunnel_udp->udp_port);
781 tunn.geneve_port.b_update_port = true;
782 tunn.geneve_port.port = udp_port;
784 rc = qede_tunnel_update(qdev, &tunn);
785 if (rc != ECORE_SUCCESS) {
786 DP_ERR(edev, "Unable to config UDP port %u\n",
787 tunn.vxlan_port.port);
791 qdev->vxlan.udp_port = udp_port;
792 /* If the request is to delete UDP port and if the number of
793 * GENEVE filters have reached 0 then GENEVE offload can be be
796 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
797 return qede_geneve_enable(eth_dev,
798 ECORE_TUNN_CLSS_MAC_VLAN, false);
810 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
811 struct rte_eth_udp_tunnel *tunnel_udp)
813 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
814 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
815 struct ecore_tunnel_info tunn; /* @DPDK */
819 PMD_INIT_FUNC_TRACE(edev);
821 memset(&tunn, 0, sizeof(tunn));
823 switch (tunnel_udp->prot_type) {
824 case RTE_TUNNEL_TYPE_VXLAN:
825 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
827 "UDP port %u for VXLAN was already configured\n",
828 tunnel_udp->udp_port);
829 return ECORE_SUCCESS;
832 /* Enable VxLAN tunnel with default MAC/VLAN classification if
833 * it was not enabled while adding VXLAN filter before UDP port
836 if (!qdev->vxlan.enable) {
837 rc = qede_vxlan_enable(eth_dev,
838 ECORE_TUNN_CLSS_MAC_VLAN, true);
839 if (rc != ECORE_SUCCESS) {
840 DP_ERR(edev, "Failed to enable VXLAN "
841 "prior to updating UDP port\n");
845 udp_port = tunnel_udp->udp_port;
847 tunn.vxlan_port.b_update_port = true;
848 tunn.vxlan_port.port = udp_port;
850 rc = qede_tunnel_update(qdev, &tunn);
851 if (rc != ECORE_SUCCESS) {
852 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
857 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
859 qdev->vxlan.udp_port = udp_port;
861 case RTE_TUNNEL_TYPE_GENEVE:
862 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
864 "UDP port %u for GENEVE was already configured\n",
865 tunnel_udp->udp_port);
866 return ECORE_SUCCESS;
869 /* Enable GENEVE tunnel with default MAC/VLAN classification if
870 * it was not enabled while adding GENEVE filter before UDP port
873 if (!qdev->geneve.enable) {
874 rc = qede_geneve_enable(eth_dev,
875 ECORE_TUNN_CLSS_MAC_VLAN, true);
876 if (rc != ECORE_SUCCESS) {
877 DP_ERR(edev, "Failed to enable GENEVE "
878 "prior to updating UDP port\n");
882 udp_port = tunnel_udp->udp_port;
884 tunn.geneve_port.b_update_port = true;
885 tunn.geneve_port.port = udp_port;
887 rc = qede_tunnel_update(qdev, &tunn);
888 if (rc != ECORE_SUCCESS) {
889 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
894 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
896 qdev->geneve.udp_port = udp_port;
905 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
906 uint32_t *clss, char *str)
909 *clss = MAX_ECORE_TUNN_CLSS;
911 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
912 if (filter == qede_tunn_types[j].rte_filter_type) {
913 *type = qede_tunn_types[j].qede_type;
914 *clss = qede_tunn_types[j].qede_tunn_clss;
915 strcpy(str, qede_tunn_types[j].string);
922 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
923 const struct rte_eth_tunnel_filter_conf *conf,
926 /* Init commmon ucast params first */
927 qede_set_ucast_cmn_params(ucast);
929 /* Copy out the required fields based on classification type */
933 case ECORE_FILTER_VNI:
934 ucast->vni = conf->tenant_id;
936 case ECORE_FILTER_INNER_VLAN:
937 ucast->vlan = conf->inner_vlan;
939 case ECORE_FILTER_MAC:
940 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
943 case ECORE_FILTER_INNER_MAC:
944 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
947 case ECORE_FILTER_MAC_VNI_PAIR:
948 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
950 ucast->vni = conf->tenant_id;
952 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
953 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
955 ucast->vni = conf->tenant_id;
957 case ECORE_FILTER_INNER_PAIR:
958 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
960 ucast->vlan = conf->inner_vlan;
966 return ECORE_SUCCESS;
970 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
971 const struct rte_eth_tunnel_filter_conf *conf,
972 __rte_unused enum rte_filter_op filter_op,
973 enum ecore_tunn_clss *clss,
976 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
977 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
978 struct ecore_filter_ucast ucast = {0};
979 enum ecore_filter_ucast_type type;
980 uint16_t filter_type = 0;
984 filter_type = conf->filter_type;
985 /* Determine if the given filter classification is supported */
986 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
987 if (*clss == MAX_ECORE_TUNN_CLSS) {
988 DP_ERR(edev, "Unsupported filter type\n");
991 /* Init tunnel ucast params */
992 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
993 if (rc != ECORE_SUCCESS) {
994 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
998 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
999 str, filter_op, ucast.type);
1001 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1003 /* Skip MAC/VLAN if filter is based on VNI */
1004 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1005 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1006 if (rc == 0 && add) {
1007 /* Enable accept anyvlan */
1008 qede_config_accept_any_vlan(qdev, true);
1011 rc = qede_ucast_filter(eth_dev, &ucast, add);
1013 rc = ecore_filter_ucast_cmd(edev, &ucast,
1014 ECORE_SPQ_MODE_CB, NULL);
1021 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1022 enum rte_eth_tunnel_type tunn_type, bool enable)
1026 switch (tunn_type) {
1027 case RTE_TUNNEL_TYPE_VXLAN:
1028 rc = qede_vxlan_enable(eth_dev, clss, enable);
1030 case RTE_TUNNEL_TYPE_GENEVE:
1031 rc = qede_geneve_enable(eth_dev, clss, enable);
1033 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1034 rc = qede_ipgre_enable(eth_dev, clss, enable);
1045 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1046 enum rte_filter_op filter_op,
1047 const struct rte_eth_tunnel_filter_conf *conf)
1049 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1050 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1051 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1055 PMD_INIT_FUNC_TRACE(edev);
1057 switch (filter_op) {
1058 case RTE_ETH_FILTER_ADD:
1061 case RTE_ETH_FILTER_DELETE:
1065 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1070 return qede_tunn_enable(eth_dev,
1071 ECORE_TUNN_CLSS_MAC_VLAN,
1072 conf->tunnel_type, add);
1074 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1075 if (rc != ECORE_SUCCESS)
1079 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1080 qdev->vxlan.num_filters++;
1081 qdev->vxlan.filter_type = conf->filter_type;
1082 } else { /* GENEVE */
1083 qdev->geneve.num_filters++;
1084 qdev->geneve.filter_type = conf->filter_type;
1087 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1088 !qdev->ipgre.enable)
1089 return qede_tunn_enable(eth_dev, clss,
1093 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1094 qdev->vxlan.num_filters--;
1096 qdev->geneve.num_filters--;
1098 /* Disable VXLAN if VXLAN filters become 0 */
1099 if (qdev->vxlan.num_filters == 0 ||
1100 qdev->geneve.num_filters == 0)
1101 return qede_tunn_enable(eth_dev, clss,
1110 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
1111 const struct rte_flow_attr *attr,
1112 struct rte_flow_error *error)
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1121 if (attr->group != 0) {
1122 rte_flow_error_set(error, ENOTSUP,
1123 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1124 "Groups are not supported");
1128 if (attr->priority != 0) {
1129 rte_flow_error_set(error, ENOTSUP,
1130 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1131 "Priorities are not supported");
1135 if (attr->egress != 0) {
1136 rte_flow_error_set(error, ENOTSUP,
1137 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1138 "Egress is not supported");
1142 if (attr->transfer != 0) {
1143 rte_flow_error_set(error, ENOTSUP,
1144 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1145 "Transfer is not supported");
1149 if (attr->ingress == 0) {
1150 rte_flow_error_set(error, ENOTSUP,
1151 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1152 "Only ingress is supported");
1160 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
1161 const struct rte_flow_item pattern[],
1162 struct rte_flow_error *error,
1163 struct rte_flow *flow)
1165 bool l3 = false, l4 = false;
1167 if (pattern == NULL) {
1168 rte_flow_error_set(error, EINVAL,
1169 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1174 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1175 if (!pattern->spec) {
1176 rte_flow_error_set(error, EINVAL,
1177 RTE_FLOW_ERROR_TYPE_ITEM,
1179 "Item spec not defined");
1183 if (pattern->last) {
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ITEM,
1187 "Item last not supported");
1191 if (pattern->mask) {
1192 rte_flow_error_set(error, EINVAL,
1193 RTE_FLOW_ERROR_TYPE_ITEM,
1195 "Item mask not supported");
1199 /* Below validation is only for 4 tuple flow
1200 * (GFT_PROFILE_TYPE_4_TUPLE)
1201 * - src and dst L3 address (IPv4 or IPv6)
1202 * - src and dst L4 port (TCP or UDP)
1205 switch (pattern->type) {
1206 case RTE_FLOW_ITEM_TYPE_IPV4:
1210 const struct rte_flow_item_ipv4 *spec;
1212 spec = pattern->spec;
1213 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
1214 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
1215 flow->entry.tuple.eth_proto =
1216 RTE_ETHER_TYPE_IPV4;
1220 case RTE_FLOW_ITEM_TYPE_IPV6:
1224 const struct rte_flow_item_ipv6 *spec;
1226 spec = pattern->spec;
1227 rte_memcpy(flow->entry.tuple.src_ipv6,
1230 rte_memcpy(flow->entry.tuple.dst_ipv6,
1233 flow->entry.tuple.eth_proto =
1234 RTE_ETHER_TYPE_IPV6;
1238 case RTE_FLOW_ITEM_TYPE_UDP:
1242 const struct rte_flow_item_udp *spec;
1244 spec = pattern->spec;
1245 flow->entry.tuple.src_port =
1247 flow->entry.tuple.dst_port =
1249 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1253 case RTE_FLOW_ITEM_TYPE_TCP:
1257 const struct rte_flow_item_tcp *spec;
1259 spec = pattern->spec;
1260 flow->entry.tuple.src_port =
1262 flow->entry.tuple.dst_port =
1264 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1269 rte_flow_error_set(error, EINVAL,
1270 RTE_FLOW_ERROR_TYPE_ITEM,
1272 "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_ITEM,
1281 "Item types need to have both L3 and L4 protocols");
1289 qede_flow_parse_actions(struct rte_eth_dev *dev,
1290 const struct rte_flow_action actions[],
1291 struct rte_flow_error *error,
1292 struct rte_flow *flow)
1294 const struct rte_flow_action_queue *queue;
1296 if (actions == NULL) {
1297 rte_flow_error_set(error, EINVAL,
1298 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1303 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1304 switch (actions->type) {
1305 case RTE_FLOW_ACTION_TYPE_QUEUE:
1306 queue = actions->conf;
1308 if (queue->index >= QEDE_RSS_COUNT(dev)) {
1309 rte_flow_error_set(error, EINVAL,
1310 RTE_FLOW_ERROR_TYPE_ACTION,
1312 "Bad QUEUE action");
1317 flow->entry.rx_queue = queue->index;
1320 case RTE_FLOW_ACTION_TYPE_DROP:
1322 flow->entry.is_drop = true;
1325 rte_flow_error_set(error, ENOTSUP,
1326 RTE_FLOW_ERROR_TYPE_ACTION,
1328 "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
1337 qede_flow_parse(struct rte_eth_dev *dev,
1338 const struct rte_flow_attr *attr,
1339 const struct rte_flow_item patterns[],
1340 const struct rte_flow_action actions[],
1341 struct rte_flow_error *error,
1342 struct rte_flow *flow)
1347 rc = qede_flow_validate_attr(dev, attr, error);
1351 /* parse and validate item pattern and actions.
1352 * Given item list and actions will be translate to qede PMD
1353 * specific arfs structure.
1355 rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1359 rc = qede_flow_parse_actions(dev, actions, error, flow);
1365 qede_flow_validate(struct rte_eth_dev *dev,
1366 const struct rte_flow_attr *attr,
1367 const struct rte_flow_item patterns[],
1368 const struct rte_flow_action actions[],
1369 struct rte_flow_error *error)
1371 return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1374 static struct rte_flow *
1375 qede_flow_create(struct rte_eth_dev *dev,
1376 const struct rte_flow_attr *attr,
1377 const struct rte_flow_item pattern[],
1378 const struct rte_flow_action actions[],
1379 struct rte_flow_error *error)
1381 struct rte_flow *flow = NULL;
1384 flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1386 rte_flow_error_set(error, ENOMEM,
1387 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1388 "Failed to allocate memory");
1392 rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1398 rc = qede_config_arfs_filter(dev, &flow->entry, true);
1400 rte_flow_error_set(error, rc,
1401 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1402 "Failed to configure flow filter");
1411 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1412 struct rte_flow *flow,
1413 struct rte_flow_error *error)
1417 rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1419 rte_flow_error_set(error, rc,
1420 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1421 "Failed to delete flow filter");
1429 qede_flow_flush(struct rte_eth_dev *eth_dev,
1430 struct rte_flow_error *error)
1432 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1433 struct qede_arfs_entry *tmp = NULL;
1436 while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1437 tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1439 rc = qede_config_arfs_filter(eth_dev, tmp, false);
1441 rte_flow_error_set(error, rc,
1442 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1443 "Failed to flush flow filter");
1449 const struct rte_flow_ops qede_flow_ops = {
1450 .validate = qede_flow_validate,
1451 .create = qede_flow_create,
1452 .destroy = qede_flow_destroy,
1453 .flush = qede_flow_flush,
1456 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1457 enum rte_filter_type filter_type,
1458 enum rte_filter_op filter_op,
1461 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1462 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1463 struct rte_eth_tunnel_filter_conf *filter_conf =
1464 (struct rte_eth_tunnel_filter_conf *)arg;
1466 switch (filter_type) {
1467 case RTE_ETH_FILTER_TUNNEL:
1468 switch (filter_conf->tunnel_type) {
1469 case RTE_TUNNEL_TYPE_VXLAN:
1470 case RTE_TUNNEL_TYPE_GENEVE:
1471 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1473 "Packet steering to the specified Rx queue"
1474 " is not supported with UDP tunneling");
1475 return(qede_tunn_filter_config(eth_dev, filter_op,
1477 case RTE_TUNNEL_TYPE_TEREDO:
1478 case RTE_TUNNEL_TYPE_NVGRE:
1479 case RTE_L2_TUNNEL_TYPE_E_TAG:
1480 DP_ERR(edev, "Unsupported tunnel type %d\n",
1481 filter_conf->tunnel_type);
1483 case RTE_TUNNEL_TYPE_NONE:
1488 case RTE_ETH_FILTER_FDIR:
1489 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1490 case RTE_ETH_FILTER_GENERIC:
1491 if (ECORE_IS_CMT(edev)) {
1492 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1496 if (filter_op != RTE_ETH_FILTER_GET)
1499 *(const void **)arg = &qede_flow_ops;
1501 case RTE_ETH_FILTER_HASH:
1502 case RTE_ETH_FILTER_L2_TUNNEL:
1503 case RTE_ETH_FILTER_MAX:
1505 DP_ERR(edev, "Unsupported filter type %d\n",