2 * Copyright (c) 2017 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include <rte_errno.h>
14 #include "qede_ethdev.h"
16 #define IP_VERSION (0x40)
17 #define IP_HDRLEN (0x5)
18 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
19 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
20 #define QEDE_FDIR_IPV4_DEF_TTL (64)
22 /* Sum of length of header types of L2, L3, L4.
23 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
27 #define QEDE_MAX_FDIR_PKT_LEN (86)
30 #define IPV6_ADDR_LEN (16)
33 #define QEDE_VALID_FLOW(flow_type) \
34 ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
35 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
36 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
37 (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \
38 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
39 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
41 /* Note: Flowdir support is only partial.
42 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
43 * Parameters like pballoc/status fields are irrelevant here.
45 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
47 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
48 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
49 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
51 /* check FDIR modes */
53 case RTE_FDIR_MODE_NONE:
54 qdev->fdir_info.arfs.arfs_enable = false;
55 DP_INFO(edev, "flowdir is disabled\n");
57 case RTE_FDIR_MODE_PERFECT:
58 if (edev->num_hwfns > 1) {
59 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
60 qdev->fdir_info.arfs.arfs_enable = false;
63 qdev->fdir_info.arfs.arfs_enable = true;
64 DP_INFO(edev, "flowdir is enabled\n");
66 case RTE_FDIR_MODE_PERFECT_TUNNEL:
67 case RTE_FDIR_MODE_SIGNATURE:
68 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
69 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
76 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
78 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
79 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
80 struct qede_fdir_entry *tmp = NULL;
81 struct qede_fdir_entry *fdir;
83 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
86 rte_memzone_free(tmp->mz);
87 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
88 qede_fdir_entry, list);
95 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
96 struct rte_eth_fdir_filter *fdir_filter,
99 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
100 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
101 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
102 struct qede_fdir_entry *tmp = NULL;
103 struct qede_fdir_entry *fdir;
104 const struct rte_memzone *mz;
105 struct ecore_hwfn *p_hwfn;
106 enum _ecore_status_t rc;
112 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
113 DP_ERR(edev, "Reached max flowdir filter limit\n");
116 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
117 RTE_CACHE_LINE_SIZE);
119 DP_ERR(edev, "Did not allocate memory for fdir\n");
123 /* soft_id could have been used as memzone string, but soft_id is
124 * not currently used so it has no significance.
126 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
127 (unsigned long)rte_get_timer_cycles());
128 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
129 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
131 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
132 rte_strerror(rte_errno));
138 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
139 pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
140 &qdev->fdir_info.arfs);
145 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
147 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
148 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
149 DP_ERR(edev, "flowdir filter exist\n");
155 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
156 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
160 DP_ERR(edev, "flowdir filter does not exist\n");
165 p_hwfn = ECORE_LEADING_HWFN(edev);
167 if (!qdev->fdir_info.arfs.arfs_enable) {
169 eth_dev->data->dev_conf.fdir_conf.mode =
170 RTE_FDIR_MODE_PERFECT;
171 qdev->fdir_info.arfs.arfs_enable = true;
172 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
174 /* Enable ARFS searcher with updated flow_types */
175 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
176 &qdev->fdir_info.arfs);
178 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
179 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
180 (dma_addr_t)mz->phys_addr,
182 fdir_filter->action.rx_queue,
184 if (rc == ECORE_SUCCESS) {
186 fdir->rx_queue = fdir_filter->action.rx_queue;
187 fdir->pkt_len = pkt_len;
189 SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
191 qdev->fdir_info.filter_count++;
192 DP_INFO(edev, "flowdir filter added, count = %d\n",
193 qdev->fdir_info.filter_count);
195 rte_memzone_free(tmp->mz);
196 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
197 qede_fdir_entry, list);
198 rte_free(tmp); /* the node deleted */
199 rte_memzone_free(mz); /* temp node allocated */
200 qdev->fdir_info.filter_count--;
201 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
202 qdev->fdir_info.filter_count);
205 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
206 rc, qdev->fdir_info.filter_count);
209 /* Disable ARFS searcher if there are no more filters */
210 if (qdev->fdir_info.filter_count == 0) {
211 memset(&qdev->fdir_info.arfs, 0,
212 sizeof(struct ecore_arfs_config_params));
213 DP_INFO(edev, "Disabling flowdir\n");
214 qdev->fdir_info.arfs.arfs_enable = false;
215 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
216 &qdev->fdir_info.arfs);
221 rte_memzone_free(mz);
229 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
230 struct rte_eth_fdir_filter *fdir,
233 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
234 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
236 if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
237 DP_ERR(edev, "invalid flow_type input\n");
241 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
242 DP_ERR(edev, "invalid queue number %u\n",
243 fdir->action.rx_queue);
247 if (fdir->input.flow_ext.is_vf) {
248 DP_ERR(edev, "flowdir is not supported over VF\n");
252 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
255 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
257 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
258 struct rte_eth_fdir_filter *fdir,
260 struct ecore_arfs_config_params *params)
263 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
264 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
265 uint16_t *ether_type;
267 struct rte_eth_fdir_input *input;
268 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
270 struct ipv6_hdr *ip6;
273 struct sctp_hdr *sctp;
274 uint8_t size, dst = 0;
276 static const uint8_t next_proto[] = {
277 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
278 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
279 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
280 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
281 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
282 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
284 raw_pkt = (uint8_t *)buff;
285 input = &fdir->input;
286 DP_INFO(edev, "flow_type %d\n", input->flow_type);
288 len = 2 * sizeof(struct ether_addr);
289 raw_pkt += 2 * sizeof(struct ether_addr);
290 if (input->flow_ext.vlan_tci) {
291 DP_INFO(edev, "adding VLAN header\n");
292 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
293 rte_memcpy(raw_pkt + sizeof(uint16_t),
294 &input->flow_ext.vlan_tci,
296 raw_pkt += sizeof(vlan_frame);
297 len += sizeof(vlan_frame);
299 ether_type = (uint16_t *)raw_pkt;
300 raw_pkt += sizeof(uint16_t);
301 len += sizeof(uint16_t);
303 /* fill the common ip header */
304 switch (input->flow_type) {
305 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
306 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
307 case RTE_ETH_FLOW_FRAG_IPV4:
308 ip = (struct ipv4_hdr *)raw_pkt;
309 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
310 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
311 ip->total_length = sizeof(struct ipv4_hdr);
312 ip->next_proto_id = input->flow.ip4_flow.proto ?
313 input->flow.ip4_flow.proto :
314 next_proto[input->flow_type];
315 ip->time_to_live = input->flow.ip4_flow.ttl ?
316 input->flow.ip4_flow.ttl :
317 QEDE_FDIR_IPV4_DEF_TTL;
318 ip->type_of_service = input->flow.ip4_flow.tos;
319 ip->dst_addr = input->flow.ip4_flow.dst_ip;
320 ip->src_addr = input->flow.ip4_flow.src_ip;
321 len += sizeof(struct ipv4_hdr);
324 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
325 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
326 case RTE_ETH_FLOW_FRAG_IPV6:
327 ip6 = (struct ipv6_hdr *)raw_pkt;
328 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
329 ip6->proto = input->flow.ipv6_flow.proto ?
330 input->flow.ipv6_flow.proto :
331 next_proto[input->flow_type];
332 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
334 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
336 len += sizeof(struct ipv6_hdr);
339 DP_ERR(edev, "Unsupported flow_type %u\n",
344 /* fill the L4 header */
345 raw_pkt = (uint8_t *)buff;
346 switch (input->flow_type) {
347 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
348 udp = (struct udp_hdr *)(raw_pkt + len);
349 udp->dst_port = input->flow.udp4_flow.dst_port;
350 udp->src_port = input->flow.udp4_flow.src_port;
351 udp->dgram_len = sizeof(struct udp_hdr);
352 len += sizeof(struct udp_hdr);
353 /* adjust ip total_length */
354 ip->total_length += sizeof(struct udp_hdr);
357 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
358 tcp = (struct tcp_hdr *)(raw_pkt + len);
359 tcp->src_port = input->flow.tcp4_flow.src_port;
360 tcp->dst_port = input->flow.tcp4_flow.dst_port;
361 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
362 len += sizeof(struct tcp_hdr);
363 /* adjust ip total_length */
364 ip->total_length += sizeof(struct tcp_hdr);
367 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
368 tcp = (struct tcp_hdr *)(raw_pkt + len);
369 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
370 tcp->src_port = input->flow.udp6_flow.src_port;
371 tcp->dst_port = input->flow.udp6_flow.dst_port;
372 /* adjust ip total_length */
373 len += sizeof(struct tcp_hdr);
376 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
377 udp = (struct udp_hdr *)(raw_pkt + len);
378 udp->src_port = input->flow.udp6_flow.dst_port;
379 udp->dst_port = input->flow.udp6_flow.src_port;
380 /* adjust ip total_length */
381 len += sizeof(struct udp_hdr);
385 DP_ERR(edev, "Unsupported flow_type %d\n", input->flow_type);
392 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
393 enum rte_filter_op filter_op,
396 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
397 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
398 struct rte_eth_fdir_filter *fdir;
401 fdir = (struct rte_eth_fdir_filter *)arg;
403 case RTE_ETH_FILTER_NOP:
404 /* Typically used to query flowdir support */
405 if (edev->num_hwfns > 1) {
406 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
409 return 0; /* means supported */
410 case RTE_ETH_FILTER_ADD:
411 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
413 case RTE_ETH_FILTER_DELETE:
414 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
416 case RTE_ETH_FILTER_FLUSH:
417 case RTE_ETH_FILTER_UPDATE:
418 case RTE_ETH_FILTER_INFO:
422 DP_ERR(edev, "unknown operation %u", filter_op);
429 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
430 enum rte_filter_op filter_op,
433 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
434 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
435 struct rte_eth_ntuple_filter *ntuple;
436 struct rte_eth_fdir_filter fdir_entry;
437 struct rte_eth_tcpv4_flow *tcpv4_flow;
438 struct rte_eth_udpv4_flow *udpv4_flow;
439 struct ecore_hwfn *p_hwfn;
443 case RTE_ETH_FILTER_NOP:
444 /* Typically used to query fdir support */
445 if (edev->num_hwfns > 1) {
446 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
449 return 0; /* means supported */
450 case RTE_ETH_FILTER_ADD:
453 case RTE_ETH_FILTER_DELETE:
456 case RTE_ETH_FILTER_INFO:
457 case RTE_ETH_FILTER_GET:
458 case RTE_ETH_FILTER_UPDATE:
459 case RTE_ETH_FILTER_FLUSH:
460 case RTE_ETH_FILTER_SET:
461 case RTE_ETH_FILTER_STATS:
462 case RTE_ETH_FILTER_OP_MAX:
463 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
466 ntuple = (struct rte_eth_ntuple_filter *)arg;
467 /* Internally convert ntuple to fdir entry */
468 memset(&fdir_entry, 0, sizeof(fdir_entry));
469 if (ntuple->proto == IPPROTO_TCP) {
470 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
471 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
472 tcpv4_flow->ip.src_ip = ntuple->src_ip;
473 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
474 tcpv4_flow->ip.proto = IPPROTO_TCP;
475 tcpv4_flow->src_port = ntuple->src_port;
476 tcpv4_flow->dst_port = ntuple->dst_port;
478 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
479 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
480 udpv4_flow->ip.src_ip = ntuple->src_ip;
481 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
482 udpv4_flow->ip.proto = IPPROTO_TCP;
483 udpv4_flow->src_port = ntuple->src_port;
484 udpv4_flow->dst_port = ntuple->dst_port;
486 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);