4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 #include <sys/ioctl.h>
45 #include <sys/socket.h>
46 #include <netinet/in.h>
48 #include <linux/ethtool.h>
49 #include <linux/sockios.h>
52 /* DPDK headers don't like -pedantic. */
54 #pragma GCC diagnostic ignored "-pedantic"
56 #include <rte_atomic.h>
57 #include <rte_ethdev.h>
59 #include <rte_common.h>
60 #include <rte_interrupts.h>
61 #include <rte_alarm.h>
63 #pragma GCC diagnostic error "-pedantic"
67 #include "mlx5_rxtx.h"
68 #include "mlx5_utils.h"
71 * Get interface name from private structure.
74 * Pointer to private structure.
76 * Interface name output buffer.
79 * 0 on success, -1 on failure and errno is set.
82 priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
86 unsigned int dev_type = 0;
87 unsigned int dev_port_prev = ~0u;
88 char match[IF_NAMESIZE] = "";
91 MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
97 while ((dent = readdir(dir)) != NULL) {
98 char *name = dent->d_name;
100 unsigned int dev_port;
103 if ((name[0] == '.') &&
104 ((name[1] == '\0') ||
105 ((name[1] == '.') && (name[2] == '\0'))))
108 MKSTR(path, "%s/device/net/%s/%s",
109 priv->ctx->device->ibdev_path, name,
110 (dev_type ? "dev_id" : "dev_port"));
112 file = fopen(path, "rb");
117 * Switch to dev_id when dev_port does not exist as
118 * is the case with Linux kernel versions < 3.15.
129 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
134 * Switch to dev_id when dev_port returns the same value for
135 * all ports. May happen when using a MOFED release older than
136 * 3.0 with a Linux kernel >= 3.15.
138 if (dev_port == dev_port_prev)
140 dev_port_prev = dev_port;
141 if (dev_port == (priv->port - 1u))
142 snprintf(match, sizeof(match), "%s", name);
145 if (match[0] == '\0')
147 strncpy(*ifname, match, sizeof(*ifname));
152 * Read from sysfs entry.
155 * Pointer to private structure.
157 * Entry name relative to sysfs path.
159 * Data output buffer.
164 * 0 on success, -1 on failure and errno is set.
167 priv_sysfs_read(const struct priv *priv, const char *entry,
168 char *buf, size_t size)
170 char ifname[IF_NAMESIZE];
175 if (priv_get_ifname(priv, &ifname))
178 MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
181 file = fopen(path, "rb");
184 ret = fread(buf, 1, size, file);
186 if (((size_t)ret < size) && (ferror(file)))
196 * Write to sysfs entry.
199 * Pointer to private structure.
201 * Entry name relative to sysfs path.
208 * 0 on success, -1 on failure and errno is set.
211 priv_sysfs_write(const struct priv *priv, const char *entry,
212 char *buf, size_t size)
214 char ifname[IF_NAMESIZE];
219 if (priv_get_ifname(priv, &ifname))
222 MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
225 file = fopen(path, "wb");
228 ret = fwrite(buf, 1, size, file);
230 if (((size_t)ret < size) || (ferror(file)))
240 * Get unsigned long sysfs property.
243 * Pointer to private structure.
245 * Entry name relative to sysfs path.
247 * Value output buffer.
250 * 0 on success, -1 on failure and errno is set.
253 priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
256 unsigned long value_ret;
259 ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
261 DEBUG("cannot read %s value from sysfs: %s",
262 name, strerror(errno));
265 value_str[ret] = '\0';
267 value_ret = strtoul(value_str, NULL, 0);
269 DEBUG("invalid %s value `%s': %s", name, value_str,
278 * Set unsigned long sysfs property.
281 * Pointer to private structure.
283 * Entry name relative to sysfs path.
288 * 0 on success, -1 on failure and errno is set.
291 priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
294 MKSTR(value_str, "%lu", value);
296 ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
298 DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
299 name, value_str, value, strerror(errno));
306 * Perform ifreq ioctl() on associated Ethernet device.
309 * Pointer to private structure.
311 * Request number to pass to ioctl().
313 * Interface request structure output buffer.
316 * 0 on success, -1 on failure and errno is set.
319 priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
321 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
326 if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
327 ret = ioctl(sock, req, ifr);
336 * Pointer to private structure.
338 * MTU value output buffer.
341 * 0 on success, -1 on failure and errno is set.
344 priv_get_mtu(struct priv *priv, uint16_t *mtu)
346 unsigned long ulong_mtu;
348 if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
358 * Pointer to private structure.
363 * 0 on success, -1 on failure and errno is set.
366 priv_set_mtu(struct priv *priv, uint16_t mtu)
368 return priv_set_sysfs_ulong(priv, "mtu", mtu);
375 * Pointer to private structure.
377 * Bitmask for flags that must remain untouched.
379 * Bitmask for flags to modify.
382 * 0 on success, -1 on failure and errno is set.
385 priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
389 if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
393 return priv_set_sysfs_ulong(priv, "flags", tmp);
397 * Ethernet device configuration.
399 * Prepare the driver for a given number of TX and RX queues.
402 * Pointer to Ethernet device structure.
405 * 0 on success, errno value on failure.
408 dev_configure(struct rte_eth_dev *dev)
410 struct priv *priv = dev->data->dev_private;
411 unsigned int rxqs_n = dev->data->nb_rx_queues;
412 unsigned int txqs_n = dev->data->nb_tx_queues;
415 unsigned int reta_idx_n;
417 priv->rxqs = (void *)dev->data->rx_queues;
418 priv->txqs = (void *)dev->data->tx_queues;
419 if (txqs_n != priv->txqs_n) {
420 INFO("%p: TX queues number update: %u -> %u",
421 (void *)dev, priv->txqs_n, txqs_n);
422 priv->txqs_n = txqs_n;
424 if (rxqs_n > priv->ind_table_max_size) {
425 ERROR("cannot handle this many RX queues (%u)", rxqs_n);
428 if (rxqs_n == priv->rxqs_n)
430 INFO("%p: RX queues number update: %u -> %u",
431 (void *)dev, priv->rxqs_n, rxqs_n);
432 priv->rxqs_n = rxqs_n;
433 /* If the requested number of RX queues is not a power of two, use the
434 * maximum indirection table size for better balancing.
435 * The result is always rounded to the next power of two. */
436 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
437 priv->ind_table_max_size :
439 if (priv_rss_reta_index_resize(priv, reta_idx_n))
441 /* When the number of RX queues is not a power of two, the remaining
442 * table entries are padded with reused WQs and hashes are not spread
444 for (i = 0, j = 0; (i != reta_idx_n); ++i) {
445 (*priv->reta_idx)[i] = j;
453 * DPDK callback for Ethernet device configuration.
456 * Pointer to Ethernet device structure.
459 * 0 on success, negative errno value on failure.
462 mlx5_dev_configure(struct rte_eth_dev *dev)
464 struct priv *priv = dev->data->dev_private;
468 ret = dev_configure(dev);
475 * DPDK callback to get information about the device.
478 * Pointer to Ethernet device structure.
480 * Info structure output buffer.
483 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
485 struct priv *priv = dev->data->dev_private;
487 char ifname[IF_NAMESIZE];
490 /* FIXME: we should ask the device for these values. */
491 info->min_rx_bufsize = 32;
492 info->max_rx_pktlen = 65536;
494 * Since we need one CQ per QP, the limit is the minimum number
495 * between the two values.
497 max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
498 priv->device_attr.max_qp : priv->device_attr.max_cq);
499 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
502 info->max_rx_queues = max;
503 info->max_tx_queues = max;
504 info->max_mac_addrs = RTE_DIM(priv->mac);
505 info->rx_offload_capa =
507 (DEV_RX_OFFLOAD_IPV4_CKSUM |
508 DEV_RX_OFFLOAD_UDP_CKSUM |
509 DEV_RX_OFFLOAD_TCP_CKSUM) :
511 info->tx_offload_capa =
513 (DEV_TX_OFFLOAD_IPV4_CKSUM |
514 DEV_TX_OFFLOAD_UDP_CKSUM |
515 DEV_TX_OFFLOAD_TCP_CKSUM) :
517 if (priv_get_ifname(priv, &ifname) == 0)
518 info->if_index = if_nametoindex(ifname);
519 /* FIXME: RETA update/query API expects the callee to know the size of
520 * the indirection table, for this PMD the size varies depending on
521 * the number of RX queues, it becomes impossible to find the correct
522 * size if it is not fixed.
523 * The API should be updated to solve this problem. */
524 info->reta_size = priv->ind_table_max_size;
529 * DPDK callback to retrieve physical link information (unlocked version).
532 * Pointer to Ethernet device structure.
533 * @param wait_to_complete
534 * Wait for request completion (ignored).
537 mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
539 struct priv *priv = dev->data->dev_private;
540 struct ethtool_cmd edata = {
544 struct rte_eth_link dev_link;
547 (void)wait_to_complete;
548 if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
549 WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
552 memset(&dev_link, 0, sizeof(dev_link));
553 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
554 (ifr.ifr_flags & IFF_RUNNING));
555 ifr.ifr_data = &edata;
556 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
557 WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
561 link_speed = ethtool_cmd_speed(&edata);
562 if (link_speed == -1)
563 dev_link.link_speed = 0;
565 dev_link.link_speed = link_speed;
566 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
567 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
568 if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
569 /* Link status changed. */
570 dev->data->dev_link = dev_link;
573 /* Link status is still the same. */
578 * DPDK callback to retrieve physical link information.
581 * Pointer to Ethernet device structure.
582 * @param wait_to_complete
583 * Wait for request completion (ignored).
586 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
588 struct priv *priv = dev->data->dev_private;
592 ret = mlx5_link_update_unlocked(dev, wait_to_complete);
598 * DPDK callback to change the MTU.
600 * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
601 * received). Use this as a hint to enable/disable scattered packets support
602 * and improve performance when not needed.
603 * Since failure is not an option, reconfiguring queues on the fly is not
607 * Pointer to Ethernet device structure.
612 * 0 on success, negative errno value on failure.
615 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
617 struct priv *priv = dev->data->dev_private;
620 uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
624 /* Set kernel interface MTU first. */
625 if (priv_set_mtu(priv, mtu)) {
627 WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
631 DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
633 /* Temporarily replace RX handler with a fake one, assuming it has not
634 * been copied elsewhere. */
635 dev->rx_pkt_burst = removed_rx_burst;
636 /* Make sure everyone has left mlx5_rx_burst() and uses
637 * removed_rx_burst() instead. */
640 /* Reconfigure each RX queue. */
641 for (i = 0; (i != priv->rxqs_n); ++i) {
642 struct rxq *rxq = (*priv->rxqs)[i];
643 unsigned int max_frame_len;
648 /* Calculate new maximum frame length according to MTU and
649 * toggle scattered support (sp) if necessary. */
650 max_frame_len = (priv->mtu + ETHER_HDR_LEN +
651 (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
652 sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
653 /* Provide new values to rxq_setup(). */
654 dev->data->dev_conf.rxmode.jumbo_frame = sp;
655 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
656 ret = rxq_rehash(dev, rxq);
658 /* Force SP RX if that queue requires it and abort. */
660 rx_func = mlx5_rx_burst_sp;
663 /* Scattered burst function takes priority. */
665 rx_func = mlx5_rx_burst_sp;
667 /* Burst functions can now be called again. */
669 dev->rx_pkt_burst = rx_func;
677 * DPDK callback to get flow control status.
680 * Pointer to Ethernet device structure.
681 * @param[out] fc_conf
682 * Flow control output buffer.
685 * 0 on success, negative errno value on failure.
688 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
690 struct priv *priv = dev->data->dev_private;
692 struct ethtool_pauseparam ethpause = {
693 .cmd = ETHTOOL_GPAUSEPARAM
697 ifr.ifr_data = ðpause;
699 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
701 WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
707 fc_conf->autoneg = ethpause.autoneg;
708 if (ethpause.rx_pause && ethpause.tx_pause)
709 fc_conf->mode = RTE_FC_FULL;
710 else if (ethpause.rx_pause)
711 fc_conf->mode = RTE_FC_RX_PAUSE;
712 else if (ethpause.tx_pause)
713 fc_conf->mode = RTE_FC_TX_PAUSE;
715 fc_conf->mode = RTE_FC_NONE;
725 * DPDK callback to modify flow control parameters.
728 * Pointer to Ethernet device structure.
730 * Flow control parameters.
733 * 0 on success, negative errno value on failure.
736 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
738 struct priv *priv = dev->data->dev_private;
740 struct ethtool_pauseparam ethpause = {
741 .cmd = ETHTOOL_SPAUSEPARAM
745 ifr.ifr_data = ðpause;
746 ethpause.autoneg = fc_conf->autoneg;
747 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
748 (fc_conf->mode & RTE_FC_RX_PAUSE))
749 ethpause.rx_pause = 1;
751 ethpause.rx_pause = 0;
753 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
754 (fc_conf->mode & RTE_FC_TX_PAUSE))
755 ethpause.tx_pause = 1;
757 ethpause.tx_pause = 0;
760 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
762 WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
776 * Get PCI information from struct ibv_device.
779 * Pointer to Ethernet device structure.
780 * @param[out] pci_addr
781 * PCI bus address output buffer.
784 * 0 on success, -1 on failure and errno is set.
787 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
788 struct rte_pci_addr *pci_addr)
792 MKSTR(path, "%s/device/uevent", device->ibdev_path);
794 file = fopen(path, "rb");
797 while (fgets(line, sizeof(line), file) == line) {
798 size_t len = strlen(line);
801 /* Truncate long lines. */
802 if (len == (sizeof(line) - 1))
803 while (line[(len - 1)] != '\n') {
807 line[(len - 1)] = ret;
809 /* Extract information. */
812 "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
816 &pci_addr->function) == 4) {
826 * Link status handler.
829 * Pointer to private structure.
831 * Pointer to the rte_eth_dev structure.
834 * Nonzero if the callback process can be called immediately.
837 priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
839 struct ibv_async_event event;
843 /* Read all message and acknowledge them. */
845 if (ibv_get_async_event(priv->ctx, &event))
848 if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
849 event.event_type == IBV_EVENT_PORT_ERR)
852 DEBUG("event type %d on port %d not handled",
853 event.event_type, event.element.port_num);
854 ibv_ack_async_event(&event);
857 if (port_change ^ priv->pending_alarm) {
858 struct rte_eth_link *link = &dev->data->dev_link;
860 priv->pending_alarm = 0;
861 mlx5_link_update_unlocked(dev, 0);
862 if (((link->link_speed == 0) && link->link_status) ||
863 ((link->link_speed != 0) && !link->link_status)) {
864 /* Inconsistent status, check again later. */
865 priv->pending_alarm = 1;
866 rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
867 mlx5_dev_link_status_handler,
876 * Handle delayed link status event.
879 * Registered argument.
882 mlx5_dev_link_status_handler(void *arg)
884 struct rte_eth_dev *dev = arg;
885 struct priv *priv = dev->data->dev_private;
889 assert(priv->pending_alarm == 1);
890 ret = priv_dev_link_status_handler(priv, dev);
893 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
897 * Handle interrupts from the NIC.
899 * @param[in] intr_handle
905 mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
907 struct rte_eth_dev *dev = cb_arg;
908 struct priv *priv = dev->data->dev_private;
913 ret = priv_dev_link_status_handler(priv, dev);
916 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
920 * Uninstall interrupt handler.
923 * Pointer to private structure.
925 * Pointer to the rte_eth_dev structure.
928 priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
930 if (!dev->data->dev_conf.intr_conf.lsc)
932 rte_intr_callback_unregister(&priv->intr_handle,
933 mlx5_dev_interrupt_handler,
935 if (priv->pending_alarm)
936 rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
937 priv->pending_alarm = 0;
938 priv->intr_handle.fd = 0;
939 priv->intr_handle.type = 0;
943 * Install interrupt handler.
946 * Pointer to private structure.
948 * Pointer to the rte_eth_dev structure.
951 priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
955 if (!dev->data->dev_conf.intr_conf.lsc)
957 assert(priv->ctx->async_fd > 0);
958 flags = fcntl(priv->ctx->async_fd, F_GETFL);
959 rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
961 INFO("failed to change file descriptor async event queue");
962 dev->data->dev_conf.intr_conf.lsc = 0;
964 priv->intr_handle.fd = priv->ctx->async_fd;
965 priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
966 rte_intr_callback_register(&priv->intr_handle,
967 mlx5_dev_interrupt_handler,