net/mlx5: add new memory region support
[dpdk.git] / drivers / net / mlx5 / mlx5_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #define _GNU_SOURCE
7
8 #include <stddef.h>
9 #include <assert.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <string.h>
15 #include <stdlib.h>
16 #include <errno.h>
17 #include <dirent.h>
18 #include <net/if.h>
19 #include <sys/ioctl.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <linux/ethtool.h>
23 #include <linux/sockios.h>
24 #include <fcntl.h>
25 #include <stdalign.h>
26 #include <sys/un.h>
27 #include <time.h>
28
29 #include <rte_atomic.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_bus_pci.h>
32 #include <rte_mbuf.h>
33 #include <rte_common.h>
34 #include <rte_interrupts.h>
35 #include <rte_malloc.h>
36 #include <rte_string_fns.h>
37 #include <rte_rwlock.h>
38
39 #include "mlx5.h"
40 #include "mlx5_glue.h"
41 #include "mlx5_rxtx.h"
42 #include "mlx5_utils.h"
43
44 /* Add defines in case the running kernel is not the same as user headers. */
45 #ifndef ETHTOOL_GLINKSETTINGS
46 struct ethtool_link_settings {
47         uint32_t cmd;
48         uint32_t speed;
49         uint8_t duplex;
50         uint8_t port;
51         uint8_t phy_address;
52         uint8_t autoneg;
53         uint8_t mdio_support;
54         uint8_t eth_to_mdix;
55         uint8_t eth_tp_mdix_ctrl;
56         int8_t link_mode_masks_nwords;
57         uint32_t reserved[8];
58         uint32_t link_mode_masks[];
59 };
60
61 #define ETHTOOL_GLINKSETTINGS 0x0000004c
62 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
63 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
64 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
65 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
66 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
67 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
68 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
69 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
70 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
71 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
72 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
73 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
74 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
75 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
76 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
77 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
78 #endif
79 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
80 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
81 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
82 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
83 #endif
84 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
85 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
86 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
87 #endif
88 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
89 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
90 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
91 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
92 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
93 #endif
94
95 /**
96  * Get interface name from private structure.
97  *
98  * @param[in] dev
99  *   Pointer to Ethernet device.
100  * @param[out] ifname
101  *   Interface name output buffer.
102  *
103  * @return
104  *   0 on success, a negative errno value otherwise and rte_errno is set.
105  */
106 int
107 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
108 {
109         struct priv *priv = dev->data->dev_private;
110         DIR *dir;
111         struct dirent *dent;
112         unsigned int dev_type = 0;
113         unsigned int dev_port_prev = ~0u;
114         char match[IF_NAMESIZE] = "";
115
116         {
117                 MKSTR(path, "%s/device/net", priv->ibdev_path);
118
119                 dir = opendir(path);
120                 if (dir == NULL) {
121                         rte_errno = errno;
122                         return -rte_errno;
123                 }
124         }
125         while ((dent = readdir(dir)) != NULL) {
126                 char *name = dent->d_name;
127                 FILE *file;
128                 unsigned int dev_port;
129                 int r;
130
131                 if ((name[0] == '.') &&
132                     ((name[1] == '\0') ||
133                      ((name[1] == '.') && (name[2] == '\0'))))
134                         continue;
135
136                 MKSTR(path, "%s/device/net/%s/%s",
137                       priv->ibdev_path, name,
138                       (dev_type ? "dev_id" : "dev_port"));
139
140                 file = fopen(path, "rb");
141                 if (file == NULL) {
142                         if (errno != ENOENT)
143                                 continue;
144                         /*
145                          * Switch to dev_id when dev_port does not exist as
146                          * is the case with Linux kernel versions < 3.15.
147                          */
148 try_dev_id:
149                         match[0] = '\0';
150                         if (dev_type)
151                                 break;
152                         dev_type = 1;
153                         dev_port_prev = ~0u;
154                         rewinddir(dir);
155                         continue;
156                 }
157                 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
158                 fclose(file);
159                 if (r != 1)
160                         continue;
161                 /*
162                  * Switch to dev_id when dev_port returns the same value for
163                  * all ports. May happen when using a MOFED release older than
164                  * 3.0 with a Linux kernel >= 3.15.
165                  */
166                 if (dev_port == dev_port_prev)
167                         goto try_dev_id;
168                 dev_port_prev = dev_port;
169                 if (dev_port == (priv->port - 1u))
170                         strlcpy(match, name, sizeof(match));
171         }
172         closedir(dir);
173         if (match[0] == '\0') {
174                 rte_errno = ENOENT;
175                 return -rte_errno;
176         }
177         strncpy(*ifname, match, sizeof(*ifname));
178         return 0;
179 }
180
181 /**
182  * Get the interface index from device name.
183  *
184  * @param[in] dev
185  *   Pointer to Ethernet device.
186  *
187  * @return
188  *   Interface index on success, a negative errno value otherwise and
189  *   rte_errno is set.
190  */
191 int
192 mlx5_ifindex(const struct rte_eth_dev *dev)
193 {
194         char ifname[IF_NAMESIZE];
195         int ret;
196
197         ret = mlx5_get_ifname(dev, &ifname);
198         if (ret)
199                 return ret;
200         ret = if_nametoindex(ifname);
201         if (ret == -1) {
202                 rte_errno = errno;
203                 return -rte_errno;
204         }
205         return ret;
206 }
207
208 /**
209  * Perform ifreq ioctl() on associated Ethernet device.
210  *
211  * @param[in] dev
212  *   Pointer to Ethernet device.
213  * @param req
214  *   Request number to pass to ioctl().
215  * @param[out] ifr
216  *   Interface request structure output buffer.
217  *
218  * @return
219  *   0 on success, a negative errno value otherwise and rte_errno is set.
220  */
221 int
222 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
223 {
224         int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
225         int ret = 0;
226
227         if (sock == -1) {
228                 rte_errno = errno;
229                 return -rte_errno;
230         }
231         ret = mlx5_get_ifname(dev, &ifr->ifr_name);
232         if (ret)
233                 goto error;
234         ret = ioctl(sock, req, ifr);
235         if (ret == -1) {
236                 rte_errno = errno;
237                 goto error;
238         }
239         close(sock);
240         return 0;
241 error:
242         close(sock);
243         return -rte_errno;
244 }
245
246 /**
247  * Get device MTU.
248  *
249  * @param dev
250  *   Pointer to Ethernet device.
251  * @param[out] mtu
252  *   MTU value output buffer.
253  *
254  * @return
255  *   0 on success, a negative errno value otherwise and rte_errno is set.
256  */
257 int
258 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
259 {
260         struct ifreq request;
261         int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
262
263         if (ret)
264                 return ret;
265         *mtu = request.ifr_mtu;
266         return 0;
267 }
268
269 /**
270  * Set device MTU.
271  *
272  * @param dev
273  *   Pointer to Ethernet device.
274  * @param mtu
275  *   MTU value to set.
276  *
277  * @return
278  *   0 on success, a negative errno value otherwise and rte_errno is set.
279  */
280 static int
281 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
282 {
283         struct ifreq request = { .ifr_mtu = mtu, };
284
285         return mlx5_ifreq(dev, SIOCSIFMTU, &request);
286 }
287
288 /**
289  * Set device flags.
290  *
291  * @param dev
292  *   Pointer to Ethernet device.
293  * @param keep
294  *   Bitmask for flags that must remain untouched.
295  * @param flags
296  *   Bitmask for flags to modify.
297  *
298  * @return
299  *   0 on success, a negative errno value otherwise and rte_errno is set.
300  */
301 int
302 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
303 {
304         struct ifreq request;
305         int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
306
307         if (ret)
308                 return ret;
309         request.ifr_flags &= keep;
310         request.ifr_flags |= flags & ~keep;
311         return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
312 }
313
314 /**
315  * DPDK callback for Ethernet device configuration.
316  *
317  * @param dev
318  *   Pointer to Ethernet device structure.
319  *
320  * @return
321  *   0 on success, a negative errno value otherwise and rte_errno is set.
322  */
323 int
324 mlx5_dev_configure(struct rte_eth_dev *dev)
325 {
326         struct priv *priv = dev->data->dev_private;
327         unsigned int rxqs_n = dev->data->nb_rx_queues;
328         unsigned int txqs_n = dev->data->nb_tx_queues;
329         unsigned int i;
330         unsigned int j;
331         unsigned int reta_idx_n;
332         const uint8_t use_app_rss_key =
333                 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
334         int ret = 0;
335
336         if (use_app_rss_key &&
337             (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
338              rss_hash_default_key_len)) {
339                 DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long",
340                         dev->data->port_id, rss_hash_default_key_len);
341                 rte_errno = EINVAL;
342                 return -rte_errno;
343         }
344         priv->rss_conf.rss_key =
345                 rte_realloc(priv->rss_conf.rss_key,
346                             rss_hash_default_key_len, 0);
347         if (!priv->rss_conf.rss_key) {
348                 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
349                         dev->data->port_id, rxqs_n);
350                 rte_errno = ENOMEM;
351                 return -rte_errno;
352         }
353         memcpy(priv->rss_conf.rss_key,
354                use_app_rss_key ?
355                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
356                rss_hash_default_key,
357                rss_hash_default_key_len);
358         priv->rss_conf.rss_key_len = rss_hash_default_key_len;
359         priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
360         priv->rxqs = (void *)dev->data->rx_queues;
361         priv->txqs = (void *)dev->data->tx_queues;
362         if (txqs_n != priv->txqs_n) {
363                 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
364                         dev->data->port_id, priv->txqs_n, txqs_n);
365                 priv->txqs_n = txqs_n;
366         }
367         if (rxqs_n > priv->config.ind_table_max_size) {
368                 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
369                         dev->data->port_id, rxqs_n);
370                 rte_errno = EINVAL;
371                 return -rte_errno;
372         }
373         if (rxqs_n == priv->rxqs_n)
374                 return 0;
375         DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
376                 dev->data->port_id, priv->rxqs_n, rxqs_n);
377         priv->rxqs_n = rxqs_n;
378         /* If the requested number of RX queues is not a power of two, use the
379          * maximum indirection table size for better balancing.
380          * The result is always rounded to the next power of two. */
381         reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
382                                      priv->config.ind_table_max_size :
383                                      rxqs_n));
384         ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
385         if (ret)
386                 return ret;
387         /* When the number of RX queues is not a power of two, the remaining
388          * table entries are padded with reused WQs and hashes are not spread
389          * uniformly. */
390         for (i = 0, j = 0; (i != reta_idx_n); ++i) {
391                 (*priv->reta_idx)[i] = j;
392                 if (++j == rxqs_n)
393                         j = 0;
394         }
395         /*
396          * Once the device is added to the list of memory event callback, its
397          * global MR cache table cannot be expanded on the fly because of
398          * deadlock. If it overflows, lookup should be done by searching MR list
399          * linearly, which is slow.
400          */
401         if (mlx5_mr_btree_init(&priv->mr.cache, MLX5_MR_BTREE_CACHE_N * 2,
402                                dev->device->numa_node)) {
403                 /* rte_errno is already set. */
404                 return -rte_errno;
405         }
406         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
407         LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
408                          priv, mem_event_cb);
409         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
410         return 0;
411 }
412
413 /**
414  * Sets default tuning parameters.
415  *
416  * @param dev
417  *   Pointer to Ethernet device.
418  * @param[out] info
419  *   Info structure output buffer.
420  */
421 static void
422 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
423 {
424         struct priv *priv = dev->data->dev_private;
425
426         /* Minimum CPU utilization. */
427         info->default_rxportconf.ring_size = 256;
428         info->default_txportconf.ring_size = 256;
429         info->default_rxportconf.burst_size = 64;
430         info->default_txportconf.burst_size = 64;
431         if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
432                 info->default_rxportconf.nb_queues = 16;
433                 info->default_txportconf.nb_queues = 16;
434                 if (dev->data->nb_rx_queues > 2 ||
435                     dev->data->nb_tx_queues > 2) {
436                         /* Max Throughput. */
437                         info->default_rxportconf.ring_size = 2048;
438                         info->default_txportconf.ring_size = 2048;
439                 }
440         } else {
441                 info->default_rxportconf.nb_queues = 8;
442                 info->default_txportconf.nb_queues = 8;
443                 if (dev->data->nb_rx_queues > 2 ||
444                     dev->data->nb_tx_queues > 2) {
445                         /* Max Throughput. */
446                         info->default_rxportconf.ring_size = 4096;
447                         info->default_txportconf.ring_size = 4096;
448                 }
449         }
450 }
451
452 /**
453  * DPDK callback to get information about the device.
454  *
455  * @param dev
456  *   Pointer to Ethernet device structure.
457  * @param[out] info
458  *   Info structure output buffer.
459  */
460 void
461 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
462 {
463         struct priv *priv = dev->data->dev_private;
464         struct mlx5_dev_config *config = &priv->config;
465         unsigned int max;
466         char ifname[IF_NAMESIZE];
467
468         /* FIXME: we should ask the device for these values. */
469         info->min_rx_bufsize = 32;
470         info->max_rx_pktlen = 65536;
471         /*
472          * Since we need one CQ per QP, the limit is the minimum number
473          * between the two values.
474          */
475         max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
476                       priv->device_attr.orig_attr.max_qp);
477         /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
478         if (max >= 65535)
479                 max = 65535;
480         info->max_rx_queues = max;
481         info->max_tx_queues = max;
482         info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
483         info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
484         info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
485                                  info->rx_queue_offload_capa);
486         info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
487         if (mlx5_get_ifname(dev, &ifname) == 0)
488                 info->if_index = if_nametoindex(ifname);
489         info->reta_size = priv->reta_idx_n ?
490                 priv->reta_idx_n : config->ind_table_max_size;
491         info->hash_key_size = rss_hash_default_key_len;
492         info->speed_capa = priv->link_speed_capa;
493         info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
494         mlx5_set_default_params(dev, info);
495 }
496
497 /**
498  * Get supported packet types.
499  *
500  * @param dev
501  *   Pointer to Ethernet device structure.
502  *
503  * @return
504  *   A pointer to the supported Packet types array.
505  */
506 const uint32_t *
507 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
508 {
509         static const uint32_t ptypes[] = {
510                 /* refers to rxq_cq_to_pkt_type() */
511                 RTE_PTYPE_L2_ETHER,
512                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
513                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
514                 RTE_PTYPE_L4_NONFRAG,
515                 RTE_PTYPE_L4_FRAG,
516                 RTE_PTYPE_L4_TCP,
517                 RTE_PTYPE_L4_UDP,
518                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
519                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
520                 RTE_PTYPE_INNER_L4_NONFRAG,
521                 RTE_PTYPE_INNER_L4_FRAG,
522                 RTE_PTYPE_INNER_L4_TCP,
523                 RTE_PTYPE_INNER_L4_UDP,
524                 RTE_PTYPE_UNKNOWN
525         };
526
527         if (dev->rx_pkt_burst == mlx5_rx_burst ||
528             dev->rx_pkt_burst == mlx5_rx_burst_vec)
529                 return ptypes;
530         return NULL;
531 }
532
533 /**
534  * DPDK callback to retrieve physical link information.
535  *
536  * @param dev
537  *   Pointer to Ethernet device structure.
538  * @param[out] link
539  *   Storage for current link status.
540  *
541  * @return
542  *   0 on success, a negative errno value otherwise and rte_errno is set.
543  */
544 static int
545 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
546                                struct rte_eth_link *link)
547 {
548         struct priv *priv = dev->data->dev_private;
549         struct ethtool_cmd edata = {
550                 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
551         };
552         struct ifreq ifr;
553         struct rte_eth_link dev_link;
554         int link_speed = 0;
555         int ret;
556
557         ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
558         if (ret) {
559                 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
560                         dev->data->port_id, strerror(rte_errno));
561                 return ret;
562         }
563         memset(&dev_link, 0, sizeof(dev_link));
564         dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
565                                 (ifr.ifr_flags & IFF_RUNNING));
566         ifr.ifr_data = (void *)&edata;
567         ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
568         if (ret) {
569                 DRV_LOG(WARNING,
570                         "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
571                         dev->data->port_id, strerror(rte_errno));
572                 return ret;
573         }
574         link_speed = ethtool_cmd_speed(&edata);
575         if (link_speed == -1)
576                 dev_link.link_speed = ETH_SPEED_NUM_NONE;
577         else
578                 dev_link.link_speed = link_speed;
579         priv->link_speed_capa = 0;
580         if (edata.supported & SUPPORTED_Autoneg)
581                 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
582         if (edata.supported & (SUPPORTED_1000baseT_Full |
583                                SUPPORTED_1000baseKX_Full))
584                 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
585         if (edata.supported & SUPPORTED_10000baseKR_Full)
586                 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
587         if (edata.supported & (SUPPORTED_40000baseKR4_Full |
588                                SUPPORTED_40000baseCR4_Full |
589                                SUPPORTED_40000baseSR4_Full |
590                                SUPPORTED_40000baseLR4_Full))
591                 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
592         dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
593                                 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
594         dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
595                         ETH_LINK_SPEED_FIXED);
596         if ((dev_link.link_speed && !dev_link.link_status) ||
597             (!dev_link.link_speed && dev_link.link_status)) {
598                 rte_errno = EAGAIN;
599                 return -rte_errno;
600         }
601         *link = dev_link;
602         return 0;
603 }
604
605 /**
606  * Retrieve physical link information (unlocked version using new ioctl).
607  *
608  * @param dev
609  *   Pointer to Ethernet device structure.
610  * @param[out] link
611  *   Storage for current link status.
612  *
613  * @return
614  *   0 on success, a negative errno value otherwise and rte_errno is set.
615  */
616 static int
617 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
618                              struct rte_eth_link *link)
619
620 {
621         struct priv *priv = dev->data->dev_private;
622         struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
623         struct ifreq ifr;
624         struct rte_eth_link dev_link;
625         uint64_t sc;
626         int ret;
627
628         ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
629         if (ret) {
630                 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
631                         dev->data->port_id, strerror(rte_errno));
632                 return ret;
633         }
634         memset(&dev_link, 0, sizeof(dev_link));
635         dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
636                                 (ifr.ifr_flags & IFF_RUNNING));
637         ifr.ifr_data = (void *)&gcmd;
638         ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
639         if (ret) {
640                 DRV_LOG(DEBUG,
641                         "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
642                         " failed: %s",
643                         dev->data->port_id, strerror(rte_errno));
644                 return ret;
645         }
646         gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
647
648         alignas(struct ethtool_link_settings)
649         uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
650                      sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
651         struct ethtool_link_settings *ecmd = (void *)data;
652
653         *ecmd = gcmd;
654         ifr.ifr_data = (void *)ecmd;
655         ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
656         if (ret) {
657                 DRV_LOG(DEBUG,
658                         "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
659                         " failed: %s",
660                         dev->data->port_id, strerror(rte_errno));
661                 return ret;
662         }
663         dev_link.link_speed = ecmd->speed;
664         sc = ecmd->link_mode_masks[0] |
665                 ((uint64_t)ecmd->link_mode_masks[1] << 32);
666         priv->link_speed_capa = 0;
667         if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
668                 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
669         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
670                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
671                 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
672         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
673                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
674                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
675                 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
676         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
677                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
678                 priv->link_speed_capa |= ETH_LINK_SPEED_20G;
679         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
680                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
681                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
682                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
683                 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
684         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
685                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
686                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
687                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
688                 priv->link_speed_capa |= ETH_LINK_SPEED_56G;
689         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
690                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
691                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
692                 priv->link_speed_capa |= ETH_LINK_SPEED_25G;
693         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
694                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
695                 priv->link_speed_capa |= ETH_LINK_SPEED_50G;
696         if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
697                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
698                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
699                   MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
700                 priv->link_speed_capa |= ETH_LINK_SPEED_100G;
701         dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
702                                 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
703         dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
704                                   ETH_LINK_SPEED_FIXED);
705         if ((dev_link.link_speed && !dev_link.link_status) ||
706             (!dev_link.link_speed && dev_link.link_status)) {
707                 rte_errno = EAGAIN;
708                 return -rte_errno;
709         }
710         *link = dev_link;
711         return 0;
712 }
713
714 /**
715  * DPDK callback to retrieve physical link information.
716  *
717  * @param dev
718  *   Pointer to Ethernet device structure.
719  * @param wait_to_complete
720  *   Wait for request completion.
721  *
722  * @return
723  *   0 if link status was not updated, positive if it was, a negative errno
724  *   value otherwise and rte_errno is set.
725  */
726 int
727 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
728 {
729         int ret;
730         struct rte_eth_link dev_link;
731         time_t start_time = time(NULL);
732
733         do {
734                 ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
735                 if (ret)
736                         ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
737                 if (ret == 0)
738                         break;
739                 /* Handle wait to complete situation. */
740                 if (wait_to_complete && ret == -EAGAIN) {
741                         if (abs((int)difftime(time(NULL), start_time)) <
742                             MLX5_LINK_STATUS_TIMEOUT) {
743                                 usleep(0);
744                                 continue;
745                         } else {
746                                 rte_errno = EBUSY;
747                                 return -rte_errno;
748                         }
749                 } else if (ret < 0) {
750                         return ret;
751                 }
752         } while (wait_to_complete);
753         ret = !!memcmp(&dev->data->dev_link, &dev_link,
754                        sizeof(struct rte_eth_link));
755         dev->data->dev_link = dev_link;
756         return ret;
757 }
758
759 /**
760  * DPDK callback to change the MTU.
761  *
762  * @param dev
763  *   Pointer to Ethernet device structure.
764  * @param in_mtu
765  *   New MTU.
766  *
767  * @return
768  *   0 on success, a negative errno value otherwise and rte_errno is set.
769  */
770 int
771 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
772 {
773         struct priv *priv = dev->data->dev_private;
774         uint16_t kern_mtu = 0;
775         int ret;
776
777         ret = mlx5_get_mtu(dev, &kern_mtu);
778         if (ret)
779                 return ret;
780         /* Set kernel interface MTU first. */
781         ret = mlx5_set_mtu(dev, mtu);
782         if (ret)
783                 return ret;
784         ret = mlx5_get_mtu(dev, &kern_mtu);
785         if (ret)
786                 return ret;
787         if (kern_mtu == mtu) {
788                 priv->mtu = mtu;
789                 DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
790                         dev->data->port_id, mtu);
791                 return 0;
792         }
793         rte_errno = EAGAIN;
794         return -rte_errno;
795 }
796
797 /**
798  * DPDK callback to get flow control status.
799  *
800  * @param dev
801  *   Pointer to Ethernet device structure.
802  * @param[out] fc_conf
803  *   Flow control output buffer.
804  *
805  * @return
806  *   0 on success, a negative errno value otherwise and rte_errno is set.
807  */
808 int
809 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
810 {
811         struct ifreq ifr;
812         struct ethtool_pauseparam ethpause = {
813                 .cmd = ETHTOOL_GPAUSEPARAM
814         };
815         int ret;
816
817         ifr.ifr_data = (void *)&ethpause;
818         ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
819         if (ret) {
820                 DRV_LOG(WARNING,
821                         "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
822                         " %s",
823                         dev->data->port_id, strerror(rte_errno));
824                 return ret;
825         }
826         fc_conf->autoneg = ethpause.autoneg;
827         if (ethpause.rx_pause && ethpause.tx_pause)
828                 fc_conf->mode = RTE_FC_FULL;
829         else if (ethpause.rx_pause)
830                 fc_conf->mode = RTE_FC_RX_PAUSE;
831         else if (ethpause.tx_pause)
832                 fc_conf->mode = RTE_FC_TX_PAUSE;
833         else
834                 fc_conf->mode = RTE_FC_NONE;
835         return 0;
836 }
837
838 /**
839  * DPDK callback to modify flow control parameters.
840  *
841  * @param dev
842  *   Pointer to Ethernet device structure.
843  * @param[in] fc_conf
844  *   Flow control parameters.
845  *
846  * @return
847  *   0 on success, a negative errno value otherwise and rte_errno is set.
848  */
849 int
850 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
851 {
852         struct ifreq ifr;
853         struct ethtool_pauseparam ethpause = {
854                 .cmd = ETHTOOL_SPAUSEPARAM
855         };
856         int ret;
857
858         ifr.ifr_data = (void *)&ethpause;
859         ethpause.autoneg = fc_conf->autoneg;
860         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
861             (fc_conf->mode & RTE_FC_RX_PAUSE))
862                 ethpause.rx_pause = 1;
863         else
864                 ethpause.rx_pause = 0;
865
866         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
867             (fc_conf->mode & RTE_FC_TX_PAUSE))
868                 ethpause.tx_pause = 1;
869         else
870                 ethpause.tx_pause = 0;
871         ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
872         if (ret) {
873                 DRV_LOG(WARNING,
874                         "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
875                         " failed: %s",
876                         dev->data->port_id, strerror(rte_errno));
877                 return ret;
878         }
879         return 0;
880 }
881
882 /**
883  * Get PCI information from struct ibv_device.
884  *
885  * @param device
886  *   Pointer to Ethernet device structure.
887  * @param[out] pci_addr
888  *   PCI bus address output buffer.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 int
894 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
895                             struct rte_pci_addr *pci_addr)
896 {
897         FILE *file;
898         char line[32];
899         MKSTR(path, "%s/device/uevent", device->ibdev_path);
900
901         file = fopen(path, "rb");
902         if (file == NULL) {
903                 rte_errno = errno;
904                 return -rte_errno;
905         }
906         while (fgets(line, sizeof(line), file) == line) {
907                 size_t len = strlen(line);
908                 int ret;
909
910                 /* Truncate long lines. */
911                 if (len == (sizeof(line) - 1))
912                         while (line[(len - 1)] != '\n') {
913                                 ret = fgetc(file);
914                                 if (ret == EOF)
915                                         break;
916                                 line[(len - 1)] = ret;
917                         }
918                 /* Extract information. */
919                 if (sscanf(line,
920                            "PCI_SLOT_NAME="
921                            "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
922                            &pci_addr->domain,
923                            &pci_addr->bus,
924                            &pci_addr->devid,
925                            &pci_addr->function) == 4) {
926                         ret = 0;
927                         break;
928                 }
929         }
930         fclose(file);
931         return 0;
932 }
933
934 /**
935  * Device status handler.
936  *
937  * @param dev
938  *   Pointer to Ethernet device.
939  * @param events
940  *   Pointer to event flags holder.
941  *
942  * @return
943  *   Events bitmap of callback process which can be called immediately.
944  */
945 static uint32_t
946 mlx5_dev_status_handler(struct rte_eth_dev *dev)
947 {
948         struct priv *priv = dev->data->dev_private;
949         struct ibv_async_event event;
950         uint32_t ret = 0;
951
952         if (mlx5_link_update(dev, 0) == -EAGAIN) {
953                 usleep(0);
954                 return 0;
955         }
956         /* Read all message and acknowledge them. */
957         for (;;) {
958                 if (mlx5_glue->get_async_event(priv->ctx, &event))
959                         break;
960                 if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
961                         event.event_type == IBV_EVENT_PORT_ERR) &&
962                         (dev->data->dev_conf.intr_conf.lsc == 1))
963                         ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
964                 else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
965                         dev->data->dev_conf.intr_conf.rmv == 1)
966                         ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
967                 else
968                         DRV_LOG(DEBUG,
969                                 "port %u event type %d on not handled",
970                                 dev->data->port_id, event.event_type);
971                 mlx5_glue->ack_async_event(&event);
972         }
973         return ret;
974 }
975
976 /**
977  * Handle interrupts from the NIC.
978  *
979  * @param[in] intr_handle
980  *   Interrupt handler.
981  * @param cb_arg
982  *   Callback argument.
983  */
984 void
985 mlx5_dev_interrupt_handler(void *cb_arg)
986 {
987         struct rte_eth_dev *dev = cb_arg;
988         uint32_t events;
989
990         events = mlx5_dev_status_handler(dev);
991         if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
992                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
993         if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
994                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
995 }
996
997 /**
998  * Handle interrupts from the socket.
999  *
1000  * @param cb_arg
1001  *   Callback argument.
1002  */
1003 static void
1004 mlx5_dev_handler_socket(void *cb_arg)
1005 {
1006         struct rte_eth_dev *dev = cb_arg;
1007
1008         mlx5_socket_handle(dev);
1009 }
1010
1011 /**
1012  * Uninstall interrupt handler.
1013  *
1014  * @param dev
1015  *   Pointer to Ethernet device.
1016  */
1017 void
1018 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
1019 {
1020         struct priv *priv = dev->data->dev_private;
1021
1022         if (dev->data->dev_conf.intr_conf.lsc ||
1023             dev->data->dev_conf.intr_conf.rmv)
1024                 rte_intr_callback_unregister(&priv->intr_handle,
1025                                              mlx5_dev_interrupt_handler, dev);
1026         if (priv->primary_socket)
1027                 rte_intr_callback_unregister(&priv->intr_handle_socket,
1028                                              mlx5_dev_handler_socket, dev);
1029         priv->intr_handle.fd = 0;
1030         priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1031         priv->intr_handle_socket.fd = 0;
1032         priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
1033 }
1034
1035 /**
1036  * Install interrupt handler.
1037  *
1038  * @param dev
1039  *   Pointer to Ethernet device.
1040  */
1041 void
1042 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
1043 {
1044         struct priv *priv = dev->data->dev_private;
1045         int ret;
1046         int flags;
1047
1048         assert(priv->ctx->async_fd > 0);
1049         flags = fcntl(priv->ctx->async_fd, F_GETFL);
1050         ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1051         if (ret) {
1052                 DRV_LOG(INFO,
1053                         "port %u failed to change file descriptor async event"
1054                         " queue",
1055                         dev->data->port_id);
1056                 dev->data->dev_conf.intr_conf.lsc = 0;
1057                 dev->data->dev_conf.intr_conf.rmv = 0;
1058         }
1059         if (dev->data->dev_conf.intr_conf.lsc ||
1060             dev->data->dev_conf.intr_conf.rmv) {
1061                 priv->intr_handle.fd = priv->ctx->async_fd;
1062                 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
1063                 rte_intr_callback_register(&priv->intr_handle,
1064                                            mlx5_dev_interrupt_handler, dev);
1065         }
1066         ret = mlx5_socket_init(dev);
1067         if (ret)
1068                 DRV_LOG(ERR, "port %u cannot initialise socket: %s",
1069                         dev->data->port_id, strerror(rte_errno));
1070         else if (priv->primary_socket) {
1071                 priv->intr_handle_socket.fd = priv->primary_socket;
1072                 priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
1073                 rte_intr_callback_register(&priv->intr_handle_socket,
1074                                            mlx5_dev_handler_socket, dev);
1075         }
1076 }
1077
1078 /**
1079  * DPDK callback to bring the link DOWN.
1080  *
1081  * @param dev
1082  *   Pointer to Ethernet device structure.
1083  *
1084  * @return
1085  *   0 on success, a negative errno value otherwise and rte_errno is set.
1086  */
1087 int
1088 mlx5_set_link_down(struct rte_eth_dev *dev)
1089 {
1090         return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
1091 }
1092
1093 /**
1094  * DPDK callback to bring the link UP.
1095  *
1096  * @param dev
1097  *   Pointer to Ethernet device structure.
1098  *
1099  * @return
1100  *   0 on success, a negative errno value otherwise and rte_errno is set.
1101  */
1102 int
1103 mlx5_set_link_up(struct rte_eth_dev *dev)
1104 {
1105         return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
1106 }
1107
1108 /**
1109  * Configure the TX function to use.
1110  *
1111  * @param dev
1112  *   Pointer to private data structure.
1113  *
1114  * @return
1115  *   Pointer to selected Tx burst function.
1116  */
1117 eth_tx_burst_t
1118 mlx5_select_tx_function(struct rte_eth_dev *dev)
1119 {
1120         struct priv *priv = dev->data->dev_private;
1121         eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
1122         struct mlx5_dev_config *config = &priv->config;
1123         uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
1124         int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1125                                     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1126                                     DEV_TX_OFFLOAD_GRE_TNL_TSO |
1127                                     DEV_TX_OFFLOAD_IP_TNL_TSO |
1128                                     DEV_TX_OFFLOAD_UDP_TNL_TSO));
1129         int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
1130                                     DEV_TX_OFFLOAD_UDP_TNL_TSO |
1131                                     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
1132         int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
1133
1134         assert(priv != NULL);
1135         /* Select appropriate TX function. */
1136         if (vlan_insert || tso || swp)
1137                 return tx_pkt_burst;
1138         if (config->mps == MLX5_MPW_ENHANCED) {
1139                 if (mlx5_check_vec_tx_support(dev) > 0) {
1140                         if (mlx5_check_raw_vec_tx_support(dev) > 0)
1141                                 tx_pkt_burst = mlx5_tx_burst_raw_vec;
1142                         else
1143                                 tx_pkt_burst = mlx5_tx_burst_vec;
1144                         DRV_LOG(DEBUG,
1145                                 "port %u selected enhanced MPW Tx vectorized"
1146                                 " function",
1147                                 dev->data->port_id);
1148                 } else {
1149                         tx_pkt_burst = mlx5_tx_burst_empw;
1150                         DRV_LOG(DEBUG,
1151                                 "port %u selected enhanced MPW Tx function",
1152                                 dev->data->port_id);
1153                 }
1154         } else if (config->mps && (config->txq_inline > 0)) {
1155                 tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1156                 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
1157                         dev->data->port_id);
1158         } else if (config->mps) {
1159                 tx_pkt_burst = mlx5_tx_burst_mpw;
1160                 DRV_LOG(DEBUG, "port %u selected MPW Tx function",
1161                         dev->data->port_id);
1162         }
1163         return tx_pkt_burst;
1164 }
1165
1166 /**
1167  * Configure the RX function to use.
1168  *
1169  * @param dev
1170  *   Pointer to private data structure.
1171  *
1172  * @return
1173  *   Pointer to selected Rx burst function.
1174  */
1175 eth_rx_burst_t
1176 mlx5_select_rx_function(struct rte_eth_dev *dev)
1177 {
1178         eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
1179
1180         assert(dev != NULL);
1181         if (mlx5_check_vec_rx_support(dev) > 0) {
1182                 rx_pkt_burst = mlx5_rx_burst_vec;
1183                 DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
1184                         dev->data->port_id);
1185         }
1186         return rx_pkt_burst;
1187 }
1188
1189 /**
1190  * Check if mlx5 device was removed.
1191  *
1192  * @param dev
1193  *   Pointer to Ethernet device structure.
1194  *
1195  * @return
1196  *   1 when device is removed, otherwise 0.
1197  */
1198 int
1199 mlx5_is_removed(struct rte_eth_dev *dev)
1200 {
1201         struct ibv_device_attr device_attr;
1202         struct priv *priv = dev->data->dev_private;
1203
1204         if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
1205                 return 1;
1206         return 0;
1207 }