1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
12 #include "qede_ethdev.h"
14 #define IP_VERSION (0x40)
15 #define IP_HDRLEN (0x5)
16 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
17 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
18 #define QEDE_FDIR_IPV4_DEF_TTL (64)
20 /* Sum of length of header types of L2, L3, L4.
21 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
25 #define QEDE_MAX_FDIR_PKT_LEN (86)
28 #define IPV6_ADDR_LEN (16)
31 #define QEDE_VALID_FLOW(flow_type) \
32 ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
33 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
34 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
35 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
37 /* Note: Flowdir support is only partial.
38 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
39 * Parameters like pballoc/status fields are irrelevant here.
41 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
43 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
44 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
45 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
47 /* check FDIR modes */
49 case RTE_FDIR_MODE_NONE:
50 qdev->fdir_info.arfs.arfs_enable = false;
51 DP_INFO(edev, "flowdir is disabled\n");
53 case RTE_FDIR_MODE_PERFECT:
54 if (ECORE_IS_CMT(edev)) {
55 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
56 qdev->fdir_info.arfs.arfs_enable = false;
59 qdev->fdir_info.arfs.arfs_enable = true;
60 DP_INFO(edev, "flowdir is enabled\n");
62 case RTE_FDIR_MODE_PERFECT_TUNNEL:
63 case RTE_FDIR_MODE_SIGNATURE:
64 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
65 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
72 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
74 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
75 struct qede_fdir_entry *tmp = NULL;
77 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
80 rte_memzone_free(tmp->mz);
81 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
82 qede_fdir_entry, list);
89 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
90 struct rte_eth_fdir_filter *fdir_filter,
93 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
94 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
95 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
96 struct qede_fdir_entry *tmp = NULL;
97 struct qede_fdir_entry *fdir = NULL;
98 const struct rte_memzone *mz;
99 struct ecore_hwfn *p_hwfn;
100 enum _ecore_status_t rc;
105 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
106 DP_ERR(edev, "Reached max flowdir filter limit\n");
109 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
110 RTE_CACHE_LINE_SIZE);
112 DP_ERR(edev, "Did not allocate memory for fdir\n");
116 /* soft_id could have been used as memzone string, but soft_id is
117 * not currently used so it has no significance.
119 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
120 (unsigned long)rte_get_timer_cycles());
121 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
122 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
124 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
125 rte_strerror(rte_errno));
131 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
132 pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
133 &qdev->fdir_info.arfs);
138 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
140 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
141 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
142 DP_INFO(edev, "flowdir filter exist\n");
148 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
149 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
153 DP_ERR(edev, "flowdir filter does not exist\n");
158 p_hwfn = ECORE_LEADING_HWFN(edev);
160 if (!qdev->fdir_info.arfs.arfs_enable) {
162 eth_dev->data->dev_conf.fdir_conf.mode =
163 RTE_FDIR_MODE_PERFECT;
164 qdev->fdir_info.arfs.arfs_enable = true;
165 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
167 /* Enable ARFS searcher with updated flow_types */
168 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
169 &qdev->fdir_info.arfs);
171 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
172 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
173 (dma_addr_t)mz->iova,
175 fdir_filter->action.rx_queue,
177 if (rc == ECORE_SUCCESS) {
179 fdir->rx_queue = fdir_filter->action.rx_queue;
180 fdir->pkt_len = pkt_len;
182 SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
184 qdev->fdir_info.filter_count++;
185 DP_INFO(edev, "flowdir filter added, count = %d\n",
186 qdev->fdir_info.filter_count);
188 rte_memzone_free(tmp->mz);
189 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
190 qede_fdir_entry, list);
191 rte_free(tmp); /* the node deleted */
192 rte_memzone_free(mz); /* temp node allocated */
193 qdev->fdir_info.filter_count--;
194 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
195 qdev->fdir_info.filter_count);
198 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
199 rc, qdev->fdir_info.filter_count);
202 /* Disable ARFS searcher if there are no more filters */
203 if (qdev->fdir_info.filter_count == 0) {
204 memset(&qdev->fdir_info.arfs, 0,
205 sizeof(struct ecore_arfs_config_params));
206 DP_INFO(edev, "Disabling flowdir\n");
207 qdev->fdir_info.arfs.arfs_enable = false;
208 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
209 &qdev->fdir_info.arfs);
214 rte_memzone_free(mz);
222 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
223 struct rte_eth_fdir_filter *fdir,
226 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
227 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
229 if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
230 DP_ERR(edev, "invalid flow_type input\n");
234 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
235 DP_ERR(edev, "invalid queue number %u\n",
236 fdir->action.rx_queue);
240 if (fdir->input.flow_ext.is_vf) {
241 DP_ERR(edev, "flowdir is not supported over VF\n");
245 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
248 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
250 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
251 struct rte_eth_fdir_filter *fdir,
253 struct ecore_arfs_config_params *params)
256 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
257 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
258 uint16_t *ether_type;
260 struct rte_eth_fdir_input *input;
261 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
263 struct ipv6_hdr *ip6;
267 static const uint8_t next_proto[] = {
268 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
269 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
270 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
271 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
273 raw_pkt = (uint8_t *)buff;
274 input = &fdir->input;
275 DP_INFO(edev, "flow_type %d\n", input->flow_type);
277 len = 2 * sizeof(struct ether_addr);
278 raw_pkt += 2 * sizeof(struct ether_addr);
279 if (input->flow_ext.vlan_tci) {
280 DP_INFO(edev, "adding VLAN header\n");
281 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
282 rte_memcpy(raw_pkt + sizeof(uint16_t),
283 &input->flow_ext.vlan_tci,
285 raw_pkt += sizeof(vlan_frame);
286 len += sizeof(vlan_frame);
288 ether_type = (uint16_t *)raw_pkt;
289 raw_pkt += sizeof(uint16_t);
290 len += sizeof(uint16_t);
292 switch (input->flow_type) {
293 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
294 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
295 /* fill the common ip header */
296 ip = (struct ipv4_hdr *)raw_pkt;
297 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
298 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
299 ip->total_length = sizeof(struct ipv4_hdr);
300 ip->next_proto_id = input->flow.ip4_flow.proto ?
301 input->flow.ip4_flow.proto :
302 next_proto[input->flow_type];
303 ip->time_to_live = input->flow.ip4_flow.ttl ?
304 input->flow.ip4_flow.ttl :
305 QEDE_FDIR_IPV4_DEF_TTL;
306 ip->type_of_service = input->flow.ip4_flow.tos;
307 ip->dst_addr = input->flow.ip4_flow.dst_ip;
308 ip->src_addr = input->flow.ip4_flow.src_ip;
309 len += sizeof(struct ipv4_hdr);
312 raw_pkt = (uint8_t *)buff;
314 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
315 udp = (struct udp_hdr *)(raw_pkt + len);
316 udp->dst_port = input->flow.udp4_flow.dst_port;
317 udp->src_port = input->flow.udp4_flow.src_port;
318 udp->dgram_len = sizeof(struct udp_hdr);
319 len += sizeof(struct udp_hdr);
320 /* adjust ip total_length */
321 ip->total_length += sizeof(struct udp_hdr);
324 tcp = (struct tcp_hdr *)(raw_pkt + len);
325 tcp->src_port = input->flow.tcp4_flow.src_port;
326 tcp->dst_port = input->flow.tcp4_flow.dst_port;
327 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
328 len += sizeof(struct tcp_hdr);
329 /* adjust ip total_length */
330 ip->total_length += sizeof(struct tcp_hdr);
334 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
335 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
336 ip6 = (struct ipv6_hdr *)raw_pkt;
337 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
338 ip6->proto = input->flow.ipv6_flow.proto ?
339 input->flow.ipv6_flow.proto :
340 next_proto[input->flow_type];
341 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
343 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
345 len += sizeof(struct ipv6_hdr);
347 raw_pkt = (uint8_t *)buff;
349 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
350 udp = (struct udp_hdr *)(raw_pkt + len);
351 udp->src_port = input->flow.udp6_flow.dst_port;
352 udp->dst_port = input->flow.udp6_flow.src_port;
353 len += sizeof(struct udp_hdr);
356 tcp = (struct tcp_hdr *)(raw_pkt + len);
357 tcp->src_port = input->flow.tcp4_flow.src_port;
358 tcp->dst_port = input->flow.tcp4_flow.dst_port;
359 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
360 len += sizeof(struct tcp_hdr);
365 DP_ERR(edev, "Unsupported flow_type %u\n",
374 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
375 enum rte_filter_op filter_op,
378 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
379 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
380 struct rte_eth_fdir_filter *fdir;
383 fdir = (struct rte_eth_fdir_filter *)arg;
385 case RTE_ETH_FILTER_NOP:
386 /* Typically used to query flowdir support */
387 if (ECORE_IS_CMT(edev)) {
388 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
391 return 0; /* means supported */
392 case RTE_ETH_FILTER_ADD:
393 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
395 case RTE_ETH_FILTER_DELETE:
396 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
398 case RTE_ETH_FILTER_FLUSH:
399 case RTE_ETH_FILTER_UPDATE:
400 case RTE_ETH_FILTER_INFO:
404 DP_ERR(edev, "unknown operation %u", filter_op);
411 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
412 enum rte_filter_op filter_op,
415 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
416 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
417 struct rte_eth_ntuple_filter *ntuple;
418 struct rte_eth_fdir_filter fdir_entry;
419 struct rte_eth_tcpv4_flow *tcpv4_flow;
420 struct rte_eth_udpv4_flow *udpv4_flow;
424 case RTE_ETH_FILTER_NOP:
425 /* Typically used to query fdir support */
426 if (ECORE_IS_CMT(edev)) {
427 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
430 return 0; /* means supported */
431 case RTE_ETH_FILTER_ADD:
434 case RTE_ETH_FILTER_DELETE:
436 case RTE_ETH_FILTER_INFO:
437 case RTE_ETH_FILTER_GET:
438 case RTE_ETH_FILTER_UPDATE:
439 case RTE_ETH_FILTER_FLUSH:
440 case RTE_ETH_FILTER_SET:
441 case RTE_ETH_FILTER_STATS:
442 case RTE_ETH_FILTER_OP_MAX:
443 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
446 ntuple = (struct rte_eth_ntuple_filter *)arg;
447 /* Internally convert ntuple to fdir entry */
448 memset(&fdir_entry, 0, sizeof(fdir_entry));
449 if (ntuple->proto == IPPROTO_TCP) {
450 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
451 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
452 tcpv4_flow->ip.src_ip = ntuple->src_ip;
453 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
454 tcpv4_flow->ip.proto = IPPROTO_TCP;
455 tcpv4_flow->src_port = ntuple->src_port;
456 tcpv4_flow->dst_port = ntuple->dst_port;
458 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
459 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
460 udpv4_flow->ip.src_ip = ntuple->src_ip;
461 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
462 udpv4_flow->ip.proto = IPPROTO_TCP;
463 udpv4_flow->src_port = ntuple->src_port;
464 udpv4_flow->dst_port = ntuple->dst_port;
466 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);