1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
13 #include "qede_ethdev.h"
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17 uint16_t rte_filter_type;
18 enum ecore_filter_ucast_type qede_type;
19 enum ecore_tunn_clss qede_tunn_clss;
21 } qede_tunn_types[] = {
23 ETH_TUNNEL_FILTER_OMAC,
25 ECORE_TUNN_CLSS_MAC_VLAN,
29 ETH_TUNNEL_FILTER_TENID,
31 ECORE_TUNN_CLSS_MAC_VNI,
35 ETH_TUNNEL_FILTER_IMAC,
36 ECORE_FILTER_INNER_MAC,
37 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
41 ETH_TUNNEL_FILTER_IVLAN,
42 ECORE_FILTER_INNER_VLAN,
43 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48 ECORE_FILTER_MAC_VNI_PAIR,
49 ECORE_TUNN_CLSS_MAC_VNI,
53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56 "outer-mac and inner-mac"
59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62 "outer-mac and inner-vlan"
65 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67 ECORE_TUNN_CLSS_INNER_MAC_VNI,
71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
77 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78 ECORE_FILTER_INNER_PAIR,
79 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80 "inner-mac and inner-vlan",
83 ETH_TUNNEL_FILTER_OIP,
89 ETH_TUNNEL_FILTER_IIP,
95 RTE_TUNNEL_FILTER_IMAC_IVLAN,
101 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
107 RTE_TUNNEL_FILTER_IMAC_TENID,
113 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
120 #define IP_VERSION (0x40)
121 #define IP_HDRLEN (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
131 #define QEDE_MAX_FDIR_PKT_LEN (86)
133 static inline bool qede_valid_flow(uint16_t flow_type)
135 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143 struct qede_arfs_entry *arfs,
145 struct ecore_arfs_config_params *params);
147 /* Note: Flowdir support is only partial.
148 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149 * Parameters like pballoc/status fields are irrelevant here.
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
153 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
157 /* check FDIR modes */
158 switch (fdir->mode) {
159 case RTE_FDIR_MODE_NONE:
160 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161 DP_INFO(edev, "flowdir is disabled\n");
163 case RTE_FDIR_MODE_PERFECT:
164 if (ECORE_IS_CMT(edev)) {
165 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166 qdev->arfs_info.arfs.mode =
167 ECORE_FILTER_CONFIG_MODE_DISABLE;
170 qdev->arfs_info.arfs.mode =
171 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
174 case RTE_FDIR_MODE_PERFECT_TUNNEL:
175 case RTE_FDIR_MODE_SIGNATURE:
176 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
186 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187 struct qede_arfs_entry *tmp = NULL;
189 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
192 rte_memzone_free(tmp->mz);
193 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194 qede_arfs_entry, list);
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202 struct rte_eth_fdir_filter *fdir,
203 struct qede_arfs_entry *arfs)
205 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207 struct rte_eth_fdir_input *input;
209 static const uint8_t next_proto[] = {
210 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
216 input = &fdir->input;
218 DP_INFO(edev, "flow_type %d\n", input->flow_type);
220 switch (input->flow_type) {
221 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223 /* fill the common ip header */
224 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
225 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227 arfs->tuple.ip_proto = next_proto[input->flow_type];
230 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231 arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232 arfs->tuple.src_port = input->flow.udp4_flow.src_port;
234 arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235 arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
238 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240 arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
241 arfs->tuple.ip_proto = next_proto[input->flow_type];
242 rte_memcpy(arfs->tuple.dst_ipv6,
243 &input->flow.ipv6_flow.dst_ip,
245 rte_memcpy(arfs->tuple.src_ipv6,
246 &input->flow.ipv6_flow.src_ip,
250 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251 arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252 arfs->tuple.src_port = input->flow.udp6_flow.src_port;
254 arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255 arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
259 DP_ERR(edev, "Unsupported flow_type %u\n",
264 arfs->rx_queue = fdir->action.rx_queue;
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270 struct qede_arfs_entry *arfs,
273 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275 struct ecore_ntuple_filter_params params;
276 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
277 struct qede_arfs_entry *tmp = NULL;
278 const struct rte_memzone *mz;
279 struct ecore_hwfn *p_hwfn;
280 enum _ecore_status_t rc;
285 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
286 DP_ERR(edev, "Reached max flowdir filter limit\n");
291 /* soft_id could have been used as memzone string, but soft_id is
292 * not currently used so it has no significance.
294 snprintf(mz_name, sizeof(mz_name), "%lx",
295 (unsigned long)rte_get_timer_cycles());
296 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
297 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
299 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
300 rte_strerror(rte_errno));
305 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
306 pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
307 &qdev->arfs_info.arfs);
313 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
315 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
316 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
317 DP_INFO(edev, "flowdir filter exist\n");
323 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
324 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
328 DP_ERR(edev, "flowdir filter does not exist\n");
333 p_hwfn = ECORE_LEADING_HWFN(edev);
335 if (qdev->arfs_info.arfs.mode ==
336 ECORE_FILTER_CONFIG_MODE_DISABLE) {
338 eth_dev->data->dev_conf.fdir_conf.mode =
339 RTE_FDIR_MODE_PERFECT;
340 qdev->arfs_info.arfs.mode =
341 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
342 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
344 /* Enable ARFS searcher with updated flow_types */
345 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
346 &qdev->arfs_info.arfs);
349 memset(¶ms, 0, sizeof(params));
350 params.addr = (dma_addr_t)mz->iova;
351 params.length = pkt_len;
352 params.qid = arfs->rx_queue;
354 params.b_is_add = add;
355 params.b_is_drop = arfs->is_drop;
357 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
358 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
360 if (rc == ECORE_SUCCESS) {
362 arfs->pkt_len = pkt_len;
364 SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
366 qdev->arfs_info.filter_count++;
367 DP_INFO(edev, "flowdir filter added, count = %d\n",
368 qdev->arfs_info.filter_count);
370 rte_memzone_free(tmp->mz);
371 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
372 qede_arfs_entry, list);
373 rte_free(tmp); /* the node deleted */
374 rte_memzone_free(mz); /* temp node allocated */
375 qdev->arfs_info.filter_count--;
376 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
377 qdev->arfs_info.filter_count);
380 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
381 rc, qdev->arfs_info.filter_count);
384 /* Disable ARFS searcher if there are no more filters */
385 if (qdev->arfs_info.filter_count == 0) {
386 memset(&qdev->arfs_info.arfs, 0,
387 sizeof(struct ecore_arfs_config_params));
388 DP_INFO(edev, "Disabling flowdir\n");
389 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
390 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
391 &qdev->arfs_info.arfs);
396 rte_memzone_free(mz);
401 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
402 struct rte_eth_fdir_filter *fdir_filter,
405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
407 struct qede_arfs_entry *arfs = NULL;
410 arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
411 RTE_CACHE_LINE_SIZE);
413 DP_ERR(edev, "Did not allocate memory for arfs\n");
417 rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
421 rc = qede_config_arfs_filter(eth_dev, arfs, add);
429 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
430 struct rte_eth_fdir_filter *fdir,
433 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
436 if (!qede_valid_flow(fdir->input.flow_type)) {
437 DP_ERR(edev, "invalid flow_type input\n");
441 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
442 DP_ERR(edev, "invalid queue number %u\n",
443 fdir->action.rx_queue);
447 if (fdir->input.flow_ext.is_vf) {
448 DP_ERR(edev, "flowdir is not supported over VF\n");
452 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
455 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
457 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
458 struct qede_arfs_entry *arfs,
460 struct ecore_arfs_config_params *params)
463 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465 uint16_t *ether_type;
467 struct rte_ipv4_hdr *ip;
468 struct rte_ipv6_hdr *ip6;
469 struct rte_udp_hdr *udp;
470 struct rte_tcp_hdr *tcp;
473 raw_pkt = (uint8_t *)buff;
475 len = 2 * sizeof(struct rte_ether_addr);
476 raw_pkt += 2 * sizeof(struct rte_ether_addr);
477 ether_type = (uint16_t *)raw_pkt;
478 raw_pkt += sizeof(uint16_t);
479 len += sizeof(uint16_t);
481 *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
482 switch (arfs->tuple.eth_proto) {
483 case RTE_ETHER_TYPE_IPV4:
484 ip = (struct rte_ipv4_hdr *)raw_pkt;
485 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
486 ip->total_length = sizeof(struct rte_ipv4_hdr);
487 ip->next_proto_id = arfs->tuple.ip_proto;
488 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
489 ip->dst_addr = arfs->tuple.dst_ipv4;
490 ip->src_addr = arfs->tuple.src_ipv4;
491 len += sizeof(struct rte_ipv4_hdr);
494 raw_pkt = (uint8_t *)buff;
496 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
497 udp = (struct rte_udp_hdr *)(raw_pkt + len);
498 udp->dst_port = arfs->tuple.dst_port;
499 udp->src_port = arfs->tuple.src_port;
500 udp->dgram_len = sizeof(struct rte_udp_hdr);
501 len += sizeof(struct rte_udp_hdr);
502 /* adjust ip total_length */
503 ip->total_length += sizeof(struct rte_udp_hdr);
506 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
507 tcp->src_port = arfs->tuple.src_port;
508 tcp->dst_port = arfs->tuple.dst_port;
509 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
510 len += sizeof(struct rte_tcp_hdr);
511 /* adjust ip total_length */
512 ip->total_length += sizeof(struct rte_tcp_hdr);
516 case RTE_ETHER_TYPE_IPV6:
517 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
518 ip6->proto = arfs->tuple.ip_proto;
520 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
522 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
524 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
526 len += sizeof(struct rte_ipv6_hdr);
529 raw_pkt = (uint8_t *)buff;
531 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
532 udp = (struct rte_udp_hdr *)(raw_pkt + len);
533 udp->src_port = arfs->tuple.src_port;
534 udp->dst_port = arfs->tuple.dst_port;
535 len += sizeof(struct rte_udp_hdr);
538 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
539 tcp->src_port = arfs->tuple.src_port;
540 tcp->dst_port = arfs->tuple.dst_port;
541 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
542 len += sizeof(struct rte_tcp_hdr);
547 DP_ERR(edev, "Unsupported eth_proto %u\n",
548 arfs->tuple.eth_proto);
556 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
557 enum rte_filter_op filter_op,
560 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562 struct rte_eth_fdir_filter *fdir;
565 fdir = (struct rte_eth_fdir_filter *)arg;
567 case RTE_ETH_FILTER_NOP:
568 /* Typically used to query flowdir support */
569 if (ECORE_IS_CMT(edev)) {
570 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
573 return 0; /* means supported */
574 case RTE_ETH_FILTER_ADD:
575 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
577 case RTE_ETH_FILTER_DELETE:
578 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
580 case RTE_ETH_FILTER_FLUSH:
581 case RTE_ETH_FILTER_UPDATE:
582 case RTE_ETH_FILTER_INFO:
586 DP_ERR(edev, "unknown operation %u", filter_op);
594 qede_tunnel_update(struct qede_dev *qdev,
595 struct ecore_tunnel_info *tunn_info)
597 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
598 enum _ecore_status_t rc = ECORE_INVAL;
599 struct ecore_hwfn *p_hwfn;
600 struct ecore_ptt *p_ptt;
603 for_each_hwfn(edev, i) {
604 p_hwfn = &edev->hwfns[i];
606 p_ptt = ecore_ptt_acquire(p_hwfn);
608 DP_ERR(p_hwfn, "Can't acquire PTT\n");
615 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
616 tunn_info, ECORE_SPQ_MODE_CB, NULL);
618 ecore_ptt_release(p_hwfn, p_ptt);
620 if (rc != ECORE_SUCCESS)
628 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
631 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
632 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
633 enum _ecore_status_t rc = ECORE_INVAL;
634 struct ecore_tunnel_info tunn;
636 if (qdev->vxlan.enable == enable)
637 return ECORE_SUCCESS;
639 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
640 tunn.vxlan.b_update_mode = true;
641 tunn.vxlan.b_mode_enabled = enable;
642 tunn.b_update_rx_cls = true;
643 tunn.b_update_tx_cls = true;
644 tunn.vxlan.tun_cls = clss;
646 tunn.vxlan_port.b_update_port = true;
647 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
649 rc = qede_tunnel_update(qdev, &tunn);
650 if (rc == ECORE_SUCCESS) {
651 qdev->vxlan.enable = enable;
652 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
653 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
654 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
656 DP_ERR(edev, "Failed to update tunn_clss %u\n",
664 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
667 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
668 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
669 enum _ecore_status_t rc = ECORE_INVAL;
670 struct ecore_tunnel_info tunn;
672 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
673 tunn.l2_geneve.b_update_mode = true;
674 tunn.l2_geneve.b_mode_enabled = enable;
675 tunn.ip_geneve.b_update_mode = true;
676 tunn.ip_geneve.b_mode_enabled = enable;
677 tunn.l2_geneve.tun_cls = clss;
678 tunn.ip_geneve.tun_cls = clss;
679 tunn.b_update_rx_cls = true;
680 tunn.b_update_tx_cls = true;
682 tunn.geneve_port.b_update_port = true;
683 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
685 rc = qede_tunnel_update(qdev, &tunn);
686 if (rc == ECORE_SUCCESS) {
687 qdev->geneve.enable = enable;
688 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
689 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
690 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
692 DP_ERR(edev, "Failed to update tunn_clss %u\n",
700 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
701 struct rte_eth_udp_tunnel *tunnel_udp)
703 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
704 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
705 struct ecore_tunnel_info tunn; /* @DPDK */
709 PMD_INIT_FUNC_TRACE(edev);
711 memset(&tunn, 0, sizeof(tunn));
713 switch (tunnel_udp->prot_type) {
714 case RTE_TUNNEL_TYPE_VXLAN:
715 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
716 DP_ERR(edev, "UDP port %u doesn't exist\n",
717 tunnel_udp->udp_port);
722 tunn.vxlan_port.b_update_port = true;
723 tunn.vxlan_port.port = udp_port;
725 rc = qede_tunnel_update(qdev, &tunn);
726 if (rc != ECORE_SUCCESS) {
727 DP_ERR(edev, "Unable to config UDP port %u\n",
728 tunn.vxlan_port.port);
732 qdev->vxlan.udp_port = udp_port;
733 /* If the request is to delete UDP port and if the number of
734 * VXLAN filters have reached 0 then VxLAN offload can be be
737 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
738 return qede_vxlan_enable(eth_dev,
739 ECORE_TUNN_CLSS_MAC_VLAN, false);
742 case RTE_TUNNEL_TYPE_GENEVE:
743 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
744 DP_ERR(edev, "UDP port %u doesn't exist\n",
745 tunnel_udp->udp_port);
751 tunn.geneve_port.b_update_port = true;
752 tunn.geneve_port.port = udp_port;
754 rc = qede_tunnel_update(qdev, &tunn);
755 if (rc != ECORE_SUCCESS) {
756 DP_ERR(edev, "Unable to config UDP port %u\n",
757 tunn.vxlan_port.port);
761 qdev->vxlan.udp_port = udp_port;
762 /* If the request is to delete UDP port and if the number of
763 * GENEVE filters have reached 0 then GENEVE offload can be be
766 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
767 return qede_geneve_enable(eth_dev,
768 ECORE_TUNN_CLSS_MAC_VLAN, false);
780 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
781 struct rte_eth_udp_tunnel *tunnel_udp)
783 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
784 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
785 struct ecore_tunnel_info tunn; /* @DPDK */
789 PMD_INIT_FUNC_TRACE(edev);
791 memset(&tunn, 0, sizeof(tunn));
793 switch (tunnel_udp->prot_type) {
794 case RTE_TUNNEL_TYPE_VXLAN:
795 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
797 "UDP port %u for VXLAN was already configured\n",
798 tunnel_udp->udp_port);
799 return ECORE_SUCCESS;
802 /* Enable VxLAN tunnel with default MAC/VLAN classification if
803 * it was not enabled while adding VXLAN filter before UDP port
806 if (!qdev->vxlan.enable) {
807 rc = qede_vxlan_enable(eth_dev,
808 ECORE_TUNN_CLSS_MAC_VLAN, true);
809 if (rc != ECORE_SUCCESS) {
810 DP_ERR(edev, "Failed to enable VXLAN "
811 "prior to updating UDP port\n");
815 udp_port = tunnel_udp->udp_port;
817 tunn.vxlan_port.b_update_port = true;
818 tunn.vxlan_port.port = udp_port;
820 rc = qede_tunnel_update(qdev, &tunn);
821 if (rc != ECORE_SUCCESS) {
822 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
827 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
829 qdev->vxlan.udp_port = udp_port;
831 case RTE_TUNNEL_TYPE_GENEVE:
832 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
834 "UDP port %u for GENEVE was already configured\n",
835 tunnel_udp->udp_port);
836 return ECORE_SUCCESS;
839 /* Enable GENEVE tunnel with default MAC/VLAN classification if
840 * it was not enabled while adding GENEVE filter before UDP port
843 if (!qdev->geneve.enable) {
844 rc = qede_geneve_enable(eth_dev,
845 ECORE_TUNN_CLSS_MAC_VLAN, true);
846 if (rc != ECORE_SUCCESS) {
847 DP_ERR(edev, "Failed to enable GENEVE "
848 "prior to updating UDP port\n");
852 udp_port = tunnel_udp->udp_port;
854 tunn.geneve_port.b_update_port = true;
855 tunn.geneve_port.port = udp_port;
857 rc = qede_tunnel_update(qdev, &tunn);
858 if (rc != ECORE_SUCCESS) {
859 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
864 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
866 qdev->geneve.udp_port = udp_port;
876 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
877 const struct rte_flow_attr *attr,
878 struct rte_flow_error *error)
881 rte_flow_error_set(error, EINVAL,
882 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
887 if (attr->group != 0) {
888 rte_flow_error_set(error, ENOTSUP,
889 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
890 "Groups are not supported");
894 if (attr->priority != 0) {
895 rte_flow_error_set(error, ENOTSUP,
896 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
897 "Priorities are not supported");
901 if (attr->egress != 0) {
902 rte_flow_error_set(error, ENOTSUP,
903 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
904 "Egress is not supported");
908 if (attr->transfer != 0) {
909 rte_flow_error_set(error, ENOTSUP,
910 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
911 "Transfer is not supported");
915 if (attr->ingress == 0) {
916 rte_flow_error_set(error, ENOTSUP,
917 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
918 "Only ingress is supported");
926 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
927 const struct rte_flow_item pattern[],
928 struct rte_flow_error *error,
929 struct rte_flow *flow)
931 bool l3 = false, l4 = false;
933 if (pattern == NULL) {
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
940 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
941 if (!pattern->spec) {
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ITEM,
945 "Item spec not defined");
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ITEM,
953 "Item last not supported");
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ITEM,
961 "Item mask not supported");
965 /* Below validation is only for 4 tuple flow
966 * (GFT_PROFILE_TYPE_4_TUPLE)
967 * - src and dst L3 address (IPv4 or IPv6)
968 * - src and dst L4 port (TCP or UDP)
971 switch (pattern->type) {
972 case RTE_FLOW_ITEM_TYPE_IPV4:
976 const struct rte_flow_item_ipv4 *spec;
978 spec = pattern->spec;
979 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
980 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
981 flow->entry.tuple.eth_proto =
986 case RTE_FLOW_ITEM_TYPE_IPV6:
990 const struct rte_flow_item_ipv6 *spec;
992 spec = pattern->spec;
993 rte_memcpy(flow->entry.tuple.src_ipv6,
996 rte_memcpy(flow->entry.tuple.dst_ipv6,
999 flow->entry.tuple.eth_proto =
1000 RTE_ETHER_TYPE_IPV6;
1004 case RTE_FLOW_ITEM_TYPE_UDP:
1008 const struct rte_flow_item_udp *spec;
1010 spec = pattern->spec;
1011 flow->entry.tuple.src_port =
1013 flow->entry.tuple.dst_port =
1015 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1019 case RTE_FLOW_ITEM_TYPE_TCP:
1023 const struct rte_flow_item_tcp *spec;
1025 spec = pattern->spec;
1026 flow->entry.tuple.src_port =
1028 flow->entry.tuple.dst_port =
1030 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1035 rte_flow_error_set(error, EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ITEM,
1038 "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1044 rte_flow_error_set(error, EINVAL,
1045 RTE_FLOW_ERROR_TYPE_ITEM,
1047 "Item types need to have both L3 and L4 protocols");
1055 qede_flow_parse_actions(struct rte_eth_dev *dev,
1056 const struct rte_flow_action actions[],
1057 struct rte_flow_error *error,
1058 struct rte_flow *flow)
1060 const struct rte_flow_action_queue *queue;
1062 if (actions == NULL) {
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1069 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1070 switch (actions->type) {
1071 case RTE_FLOW_ACTION_TYPE_QUEUE:
1072 queue = actions->conf;
1074 if (queue->index >= QEDE_RSS_COUNT(dev)) {
1075 rte_flow_error_set(error, EINVAL,
1076 RTE_FLOW_ERROR_TYPE_ACTION,
1078 "Bad QUEUE action");
1083 flow->entry.rx_queue = queue->index;
1086 case RTE_FLOW_ACTION_TYPE_DROP:
1088 flow->entry.is_drop = true;
1091 rte_flow_error_set(error, ENOTSUP,
1092 RTE_FLOW_ERROR_TYPE_ACTION,
1094 "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
1103 qede_flow_parse(struct rte_eth_dev *dev,
1104 const struct rte_flow_attr *attr,
1105 const struct rte_flow_item patterns[],
1106 const struct rte_flow_action actions[],
1107 struct rte_flow_error *error,
1108 struct rte_flow *flow)
1113 rc = qede_flow_validate_attr(dev, attr, error);
1117 /* parse and validate item pattern and actions.
1118 * Given item list and actions will be translate to qede PMD
1119 * specific arfs structure.
1121 rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1125 rc = qede_flow_parse_actions(dev, actions, error, flow);
1131 qede_flow_validate(struct rte_eth_dev *dev,
1132 const struct rte_flow_attr *attr,
1133 const struct rte_flow_item patterns[],
1134 const struct rte_flow_action actions[],
1135 struct rte_flow_error *error)
1137 return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1140 static struct rte_flow *
1141 qede_flow_create(struct rte_eth_dev *dev,
1142 const struct rte_flow_attr *attr,
1143 const struct rte_flow_item pattern[],
1144 const struct rte_flow_action actions[],
1145 struct rte_flow_error *error)
1147 struct rte_flow *flow = NULL;
1150 flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1152 rte_flow_error_set(error, ENOMEM,
1153 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1154 "Failed to allocate memory");
1158 rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1164 rc = qede_config_arfs_filter(dev, &flow->entry, true);
1166 rte_flow_error_set(error, rc,
1167 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1168 "Failed to configure flow filter");
1177 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1178 struct rte_flow *flow,
1179 struct rte_flow_error *error)
1183 rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1185 rte_flow_error_set(error, rc,
1186 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1187 "Failed to delete flow filter");
1195 qede_flow_flush(struct rte_eth_dev *eth_dev,
1196 struct rte_flow_error *error)
1198 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1199 struct qede_arfs_entry *tmp = NULL;
1202 while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1203 tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1205 rc = qede_config_arfs_filter(eth_dev, tmp, false);
1207 rte_flow_error_set(error, rc,
1208 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1209 "Failed to flush flow filter");
1215 const struct rte_flow_ops qede_flow_ops = {
1216 .validate = qede_flow_validate,
1217 .create = qede_flow_create,
1218 .destroy = qede_flow_destroy,
1219 .flush = qede_flow_flush,
1222 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1223 enum rte_filter_type filter_type,
1224 enum rte_filter_op filter_op,
1227 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1228 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1230 switch (filter_type) {
1231 case RTE_ETH_FILTER_FDIR:
1232 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1233 case RTE_ETH_FILTER_GENERIC:
1234 if (ECORE_IS_CMT(edev)) {
1235 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1239 if (filter_op != RTE_ETH_FILTER_GET)
1242 *(const void **)arg = &qede_flow_ops;
1244 case RTE_ETH_FILTER_MAX:
1246 DP_ERR(edev, "Unsupported filter type %d\n",