* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define _GNU_SOURCE
+
#include <stddef.h>
#include <assert.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
+#include <sys/utsname.h>
#include <netinet/in.h>
-#include <linux/if.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
+#include <linux/version.h>
+#include <fcntl.h>
+#include <stdalign.h>
+#include <sys/un.h>
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-pedantic"
-#endif
#include <rte_atomic.h>
#include <rte_ethdev.h>
+#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-pedantic"
-#endif
+#include <rte_interrupts.h>
+#include <rte_alarm.h>
+#include <rte_malloc.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Add defines in case the running kernel is not the same as user headers. */
+#ifndef ETHTOOL_GLINKSETTINGS
+struct ethtool_link_settings {
+ uint32_t cmd;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t port;
+ uint8_t phy_address;
+ uint8_t autoneg;
+ uint8_t mdio_support;
+ uint8_t eth_to_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ int8_t link_mode_masks_nwords;
+ uint32_t reserved[8];
+ uint32_t link_mode_masks[];
+};
+
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
+#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
+#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
+#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
+#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
+#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
+#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
+#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
+#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
+#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
+#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
+#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
+#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
+#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
+#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_25G
+#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
+#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
+#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_50G
+#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
+#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_100G
+#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
+#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
+#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
+#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
+#endif
+
/**
* Get interface name from private structure.
*
char match[IF_NAMESIZE] = "";
{
- MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
+ MKSTR(path, "%s/device/net", priv->ibdev_path);
dir = opendir(path);
if (dir == NULL)
continue;
MKSTR(path, "%s/device/net/%s/%s",
- priv->ctx->device->ibdev_path, name,
+ priv->ibdev_path, name,
(dev_type ? "dev_id" : "dev_port"));
file = fopen(path, "rb");
return 0;
}
+/**
+ * Check if the counter is located on ib counters file.
+ *
+ * @param[in] cntr
+ * Counter name.
+ *
+ * @return
+ * 1 if counter is located on ib counters file , 0 otherwise.
+ */
+int
+priv_is_ib_cntr(const char *cntr)
+{
+ if (!strcmp(cntr, "out_of_buffer"))
+ return 1;
+ return 0;
+}
+
/**
* Read from sysfs entry.
*
if (priv_get_ifname(priv, &ifname))
return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
+ if (priv_is_ib_cntr(entry)) {
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ibdev_path, entry);
+ file = fopen(path, "rb");
+ } else {
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ibdev_path, ifname, entry);
+ file = fopen(path, "rb");
+ }
if (file == NULL)
return -1;
ret = fread(buf, 1, size, file);
if (priv_get_ifname(priv, &ifname))
return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
+ MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry);
file = fopen(path, "wb");
if (file == NULL)
return ret;
}
+/**
+ * Return the number of active VFs for the current device.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] num_vfs
+ * Number of active VFs.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
+ */
+int
+priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
+{
+ /* The sysfs entry name depends on the operating system. */
+ const char **name = (const char *[]){
+ "device/sriov_numvfs",
+ "device/mlx5_num_vfs",
+ NULL,
+ };
+ int ret;
+
+ do {
+ unsigned long ulong_num_vfs;
+
+ ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
+ if (!ret)
+ *num_vfs = ulong_num_vfs;
+ } while (*(++name) && ret);
+ return ret;
+}
+
/**
* Get device MTU.
*
return 0;
}
+/**
+ * Read device counter from sysfs.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param name
+ * Counter name.
+ * @param[out] cntr
+ * Counter output buffer.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
+ */
+int
+priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
+{
+ unsigned long ulong_ctr;
+
+ if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
+ return -1;
+ *cntr = ulong_ctr;
+ return 0;
+}
+
/**
* Set device MTU.
*
static int
priv_set_mtu(struct priv *priv, uint16_t mtu)
{
- return priv_set_sysfs_ulong(priv, "mtu", mtu);
+ uint16_t new_mtu;
+
+ if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
+ priv_get_mtu(priv, &new_mtu))
+ return -1;
+ if (new_mtu == mtu)
+ return 0;
+ errno = EINVAL;
+ return -1;
}
/**
if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
return -1;
tmp &= keep;
- tmp |= flags;
+ tmp |= (flags & (~keep));
return priv_set_sysfs_ulong(priv, "flags", tmp);
}
struct priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
+ unsigned int i;
+ unsigned int j;
+ unsigned int reta_idx_n;
+ const uint8_t use_app_rss_key =
+ !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ if (use_app_rss_key &&
+ (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
+ rss_hash_default_key_len)) {
+ /* MLX5 RSS only support 40bytes key. */
+ return EINVAL;
+ }
+ priv->rss_conf.rss_key =
+ rte_realloc(priv->rss_conf.rss_key,
+ rss_hash_default_key_len, 0);
+ if (!priv->rss_conf.rss_key) {
+ ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
+ return ENOMEM;
+ }
+ memcpy(priv->rss_conf.rss_key,
+ use_app_rss_key ?
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
+ rss_hash_default_key,
+ rss_hash_default_key_len);
+ priv->rss_conf.rss_key_len = rss_hash_default_key_len;
+ priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
(void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
+ if (rxqs_n > priv->ind_table_max_size) {
+ ERROR("cannot handle this many RX queues (%u)", rxqs_n);
+ return EINVAL;
+ }
if (rxqs_n == priv->rxqs_n)
return 0;
INFO("%p: RX queues number update: %u -> %u",
(void *)dev, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
+ /* If the requested number of RX queues is not a power of two, use the
+ * maximum indirection table size for better balancing.
+ * The result is always rounded to the next power of two. */
+ reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
+ priv->ind_table_max_size :
+ rxqs_n));
+ if (priv_rss_reta_index_resize(priv, reta_idx_n))
+ return ENOMEM;
+ /* When the number of RX queues is not a power of two, the remaining
+ * table entries are padded with reused WQs and hashes are not spread
+ * uniformly. */
+ for (i = 0, j = 0; (i != reta_idx_n); ++i) {
+ (*priv->reta_idx)[i] = j;
+ if (++j == rxqs_n)
+ j = 0;
+ }
return 0;
}
unsigned int max;
char ifname[IF_NAMESIZE];
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
* Since we need one CQ per QP, the limit is the minimum number
* between the two values.
*/
- max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
- priv->device_attr.max_qp : priv->device_attr.max_cq);
+ max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
+ priv->device_attr.orig_attr.max_qp);
/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
if (max >= 65535)
max = 65535;
info->max_rx_queues = max;
info->max_tx_queues = max;
- /* Last array entry is reserved for broadcast. */
- info->max_mac_addrs = (RTE_DIM(priv->mac) - 1);
+ info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa =
(priv->hw_csum ?
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0);
- info->tx_offload_capa =
- (priv->hw_csum ?
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM) :
- 0);
+ 0) |
+ (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
+ DEV_RX_OFFLOAD_TIMESTAMP;
+
+ if (!priv->mps)
+ info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (priv->hw_csum)
+ info->tx_offload_capa |=
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (priv->tso)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->tunnel_en)
+ info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
+ info->reta_size = priv->reta_idx_n ?
+ priv->reta_idx_n : priv->ind_table_max_size;
+ info->hash_key_size = priv->rss_conf.rss_key_len;
+ info->speed_capa = priv->link_speed_capa;
priv_unlock(priv);
}
+const uint32_t *
+mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_vec)
+ return ptypes;
+ return NULL;
+}
+
/**
- * DPDK callback to retrieve physical link information (unlocked version).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
* Wait for request completion (ignored).
*/
static int
-mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
- .cmd = ETHTOOL_GSET
+ .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
};
struct ifreq ifr;
struct rte_eth_link dev_link;
int link_speed = 0;
+ /* priv_lock() is not taken to allow concurrent calls. */
+
(void)wait_to_complete;
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = &edata;
+ ifr.ifr_data = (void *)&edata;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
strerror(errno));
dev_link.link_speed = 0;
else
dev_link.link_speed = link_speed;
+ priv->link_speed_capa = 0;
+ if (edata.supported & SUPPORTED_Autoneg)
+ priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
+ if (edata.supported & (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
+ priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ if (edata.supported & SUPPORTED_10000baseKR_Full)
+ priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ if (edata.supported & (SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
+ priv->link_speed_capa |= ETH_LINK_SPEED_40G;
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
+ /* Link status changed. */
+ dev->data->dev_link = dev_link;
+ return 0;
+ }
+ /* Link status is still the same. */
+ return -1;
+}
+
+/**
+ * Retrieve physical link information (unlocked version using new ioctl).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ */
+static int
+mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ uint64_t sc;
+
+ (void)wait_to_complete;
+ if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
+ return -1;
+ }
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&gcmd;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+ alignas(struct ethtool_link_settings)
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+ struct ethtool_link_settings *ecmd = (void *)data;
+
+ *ecmd = gcmd;
+ ifr.ifr_data = (void *)ecmd;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ dev_link.link_speed = ecmd->speed;
+ sc = ecmd->link_mode_masks[0] |
+ ((uint64_t)ecmd->link_mode_masks[1] << 32);
+ priv->link_speed_capa = 0;
+ if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
+ priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+ dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
/* Link status changed. */
dev->data->dev_link = dev_link;
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
+ struct utsname utsname;
+ int ver[3];
- priv_lock(priv);
- ret = mlx5_link_update_unlocked(dev, wait_to_complete);
- priv_unlock(priv);
- return ret;
+ if (uname(&utsname) == -1 ||
+ sscanf(utsname.release, "%d.%d.%d",
+ &ver[0], &ver[1], &ver[2]) != 3 ||
+ KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
+ return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+ return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
}
/**
* DPDK callback to change the MTU.
*
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
* @param dev
* Pointer to Ethernet device structure.
* @param in_mtu
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
+ uint16_t kern_mtu;
int ret = 0;
- unsigned int i;
- uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
- mlx5_rx_burst;
priv_lock(priv);
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
/* Set kernel interface MTU first. */
- if (priv_set_mtu(priv, mtu)) {
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
+ ret = priv_set_mtu(priv, mtu);
+ if (ret)
goto out;
- } else
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
+ if (kern_mtu == mtu) {
+ priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
- priv->mtu = mtu;
- /* Temporarily replace RX handler with a fake one, assuming it has not
- * been copied elsewhere. */
- dev->rx_pkt_burst = removed_rx_burst;
- /* Make sure everyone has left mlx5_rx_burst() and uses
- * removed_rx_burst() instead. */
- rte_wmb();
- usleep(1000);
- /* Reconfigure each RX queue. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int max_frame_len;
- int sp;
-
- if (rxq == NULL)
- continue;
- /* Calculate new maximum frame length according to MTU and
- * toggle scattered support (sp) if necessary. */
- max_frame_len = (priv->mtu + ETHER_HDR_LEN +
- (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
- /* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame = sp;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- ret = rxq_rehash(dev, rxq);
- if (ret) {
- /* Force SP RX if that queue requires it and abort. */
- if (rxq->sp)
- rx_func = mlx5_rx_burst_sp;
- break;
- }
- /* Scattered burst function takes priority. */
- if (rxq->sp)
- rx_func = mlx5_rx_burst_sp;
}
- /* Burst functions can now be called again. */
- rte_wmb();
- dev->rx_pkt_burst = rx_func;
+ priv_unlock(priv);
+ return 0;
out:
+ ret = errno;
+ WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+ strerror(ret));
priv_unlock(priv);
assert(ret >= 0);
return -ret;
};
int ret;
- ifr.ifr_data = ðpause;
+ ifr.ifr_data = (void *)ðpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
};
int ret;
- ifr.ifr_data = ðpause;
+ ifr.ifr_data = (void *)ðpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
(fc_conf->mode & RTE_FC_RX_PAUSE))
/* Extract information. */
if (sscanf(line,
"PCI_SLOT_NAME="
- "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
&pci_addr->domain,
&pci_addr->bus,
&pci_addr->devid,
fclose(file);
return 0;
}
+
+/**
+ * Update the link status.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Zero if the callback process can be called immediately.
+ */
+static int
+priv_link_status_update(struct priv *priv)
+{
+ struct rte_eth_link *link = &priv->dev->data->dev_link;
+
+ mlx5_link_update(priv->dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ /*
+ * Inconsistent status. Event likely occurred before the
+ * kernel netdevice exposes the new status.
+ */
+ if (!priv->pending_alarm) {
+ priv->pending_alarm = 1;
+ rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
+ mlx5_dev_link_status_handler,
+ priv->dev);
+ }
+ return 1;
+ } else if (unlikely(priv->pending_alarm)) {
+ /* Link interrupt occurred while alarm is already scheduled. */
+ priv->pending_alarm = 0;
+ rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
+ }
+ return 0;
+}
+
+/**
+ * Device status handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param events
+ * Pointer to event flags holder.
+ *
+ * @return
+ * Events bitmap of callback process which can be called immediately.
+ */
+static uint32_t
+priv_dev_status_handler(struct priv *priv)
+{
+ struct ibv_async_event event;
+ uint32_t ret = 0;
+
+ /* Read all message and acknowledge them. */
+ for (;;) {
+ if (ibv_get_async_event(priv->ctx, &event))
+ break;
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ else
+ DEBUG("event type %d on port %d not handled",
+ event.event_type, event.element.port_num);
+ ibv_ack_async_event(&event);
+ }
+ if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
+ if (priv_link_status_update(priv))
+ ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
+ return ret;
+}
+
+/**
+ * Handle delayed link status event.
+ *
+ * @param arg
+ * Registered argument.
+ */
+void
+mlx5_dev_link_status_handler(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ priv_lock(priv);
+ assert(priv->pending_alarm == 1);
+ priv->pending_alarm = 0;
+ ret = priv_link_status_update(priv);
+ priv_unlock(priv);
+ if (!ret)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
+}
+
+/**
+ * Handle interrupts from the NIC.
+ *
+ * @param[in] intr_handle
+ * Interrupt handler.
+ * @param cb_arg
+ * Callback argument.
+ */
+void
+mlx5_dev_interrupt_handler(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct priv *priv = dev->data->dev_private;
+ uint32_t events;
+
+ priv_lock(priv);
+ events = priv_dev_status_handler(priv);
+ priv_unlock(priv);
+ if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
+ if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
+ NULL);
+}
+
+/**
+ * Handle interrupts from the socket.
+ *
+ * @param cb_arg
+ * Callback argument.
+ */
+static void
+mlx5_dev_handler_socket(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ priv_socket_handle(priv);
+ priv_unlock(priv);
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ */
+void
+priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv)
+ rte_intr_callback_unregister(&priv->intr_handle,
+ mlx5_dev_interrupt_handler, dev);
+ if (priv->primary_socket)
+ rte_intr_callback_unregister(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
+ if (priv->pending_alarm)
+ rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
+ priv->pending_alarm = 0;
+ priv->intr_handle.fd = 0;
+ priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ priv->intr_handle_socket.fd = 0;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
+}
+
+/**
+ * Install interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ */
+void
+priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+{
+ int rc, flags;
+
+ assert(priv->ctx->async_fd > 0);
+ flags = fcntl(priv->ctx->async_fd, F_GETFL);
+ rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ INFO("failed to change file descriptor async event queue");
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ }
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv) {
+ priv->intr_handle.fd = priv->ctx->async_fd;
+ priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle,
+ mlx5_dev_interrupt_handler, dev);
+ }
+
+ rc = priv_socket_init(priv);
+ if (!rc && priv->primary_socket) {
+ priv->intr_handle_socket.fd = priv->primary_socket;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
+ }
+}
+
+/**
+ * Change the link state (UP / DOWN).
+ *
+ * @param priv
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ * @param up
+ * Nonzero for link up, otherwise link down.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
+{
+ int err;
+
+ if (up) {
+ err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ if (err)
+ return err;
+ priv_dev_select_tx_function(priv, dev);
+ priv_dev_select_rx_function(priv, dev);
+ } else {
+ err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
+ if (err)
+ return err;
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_set_link_down(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int err;
+
+ priv_lock(priv);
+ err = priv_dev_set_link(priv, dev, 0);
+ priv_unlock(priv);
+ return err;
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_set_link_up(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int err;
+
+ priv_lock(priv);
+ err = priv_dev_set_link(priv, dev, 1);
+ priv_unlock(priv);
+ return err;
+}
+
+/**
+ * Configure the TX function to use.
+ *
+ * @param priv
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ */
+void
+priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+{
+ assert(priv != NULL);
+ assert(dev != NULL);
+ dev->tx_pkt_burst = mlx5_tx_burst;
+ /* Select appropriate TX function. */
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (priv_check_vec_tx_support(priv) > 0) {
+ if (priv_check_raw_vec_tx_support(priv) > 0)
+ dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ else
+ dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ DEBUG("selected Enhanced MPW TX vectorized function");
+ } else {
+ dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ DEBUG("selected Enhanced MPW TX function");
+ }
+ } else if (priv->mps && priv->txq_inline) {
+ dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ DEBUG("selected MPW inline TX function");
+ } else if (priv->mps) {
+ dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ DEBUG("selected MPW TX function");
+ }
+}
+
+/**
+ * Configure the RX function to use.
+ *
+ * @param priv
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ */
+void
+priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+{
+ assert(priv != NULL);
+ assert(dev != NULL);
+ if (priv_check_vec_rx_support(priv) > 0) {
+ dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ DEBUG("selected RX vectorized function");
+ } else {
+ dev->rx_pkt_burst = mlx5_rx_burst;
+ }
+}