CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
#
-# Compile burst-oriented Mellanox ConnectX-4 (MLX5) PMD
+# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD
#
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
MLX5 poll mode driver
=====================
-The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support for
-**Mellanox ConnectX-4** and **Mellanox ConnectX-4 Lx** families of
-10/25/40/50/100 Gb/s adapters as well as their virtual functions (VF) in
-SR-IOV context.
+The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support
+for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** and **Mellanox
+ConnectX-5** families of 10/25/40/50/100 Gb/s adapters as well as their
+virtual functions (VF) in SR-IOV context.
Information and documentation about these adapters can be found on the
`Mellanox website <http://www.mellanox.com>`__. Help is also provided by the
save PCI bandwidth and improve performance at the cost of a slightly
higher CPU usage.
- It is currently only supported on the ConnectX-4 Lx family of adapters.
- Enabled by default.
+ It is currently only supported on the ConnectX-4 Lx and ConnectX-5
+ families of adapters. Enabled by default.
Prerequisites
-------------
- **libmlx5**
- Low-level user space driver library for Mellanox ConnectX-4 devices,
- it is automatically loaded by libibverbs.
+ Low-level user space driver library for Mellanox ConnectX-4/ConnectX-5
+ devices, it is automatically loaded by libibverbs.
This library basically implements send/receive calls to the hardware
queues.
Unlike most other PMDs, these modules must remain loaded and bound to
their devices:
- - mlx5_core: hardware driver managing Mellanox ConnectX-4 devices and
- related Ethernet kernel network devices.
+ - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5
+ devices and related Ethernet kernel network devices.
- mlx5_ib: InifiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
- Mellanox OFED releases include firmware updates for ConnectX-4 adapters.
+ Mellanox OFED releases include firmware updates for ConnectX-4/ConnectX-5
+ adapters.
Because each release provides new features, these updates must be applied to
match the kernel modules and libraries they come with.
Currently supported by DPDK:
-- Mellanox OFED **3.4-1.0.0.0**.
+- Mellanox OFED version:
+
+ - ConnectX-4: **3.4-1.0.0.0**
+ - ConnectX-4 Lx: **3.4-1.0.0.0**
+ - ConnectX-5: **4.0-0.0.8.1**
- firmware version:
- ConnectX-4: **12.17.1010**
- ConnectX-4 Lx: **14.17.1010**
+ - ConnectX-5: **16.18.0296**
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~
Usage example
-------------
-This section demonstrates how to launch **testpmd** with Mellanox ConnectX-4
-devices managed by librte_pmd_mlx5.
+This section demonstrates how to launch **testpmd** with Mellanox
+ConnectX-4/ConnectX-5 devices managed by librte_pmd_mlx5.
#. Load the kernel modules:
A new network PMD which supports Solarflare SFN7xxx and SFN8xxx family
of 10/40 Gbps adapters has been added.
+* **Added support for Mellanox ConnectX-5 adapters (mlx5).**
+
+ Support for Mellanox ConnectX-5 family of 10/25/40/50/100 Gbps adapters
+ has been added to the existing mlx5 PMD.
+
* **virtio-user with vhost-kernel as another exceptional path.**
Previously, we upstreamed a virtual device, virtio-user with vhost-user
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
priv->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- priv->mps = !!tmp;
+ priv->mps &= !!tmp; /* Enable MPW only if HW supports */
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
sriov = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
- /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
- mps = (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ mps = 1;
+ break;
+ default:
+ mps = 0;
+ }
INFO("PCI information matches, using device \"%s\""
" (SR-IOV: %s, MPS: %s)",
list[i]->name,
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
+ },
{
.vendor_id = 0
}
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
};
struct priv {
priv_select_tx_function(struct priv *priv)
{
priv->dev->tx_pkt_burst = mlx5_tx_burst;
- /* Display warning for unsupported configurations. */
- if (priv->sriov && priv->mps)
- WARN("multi-packet send WQE cannot be used on a SR-IOV setup");
/* Select appropriate TX function. */
- if ((priv->sriov == 0) && priv->mps && priv->txq_inline) {
+ if (priv->mps && priv->txq_inline) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if ((priv->sriov == 0) && priv->mps) {
+ } else if (priv->mps) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
.obj = tmpl.qp,
/* Enable multi-packet send if supported. */
.family_flags =
- ((priv->mps && !priv->sriov) ?
+ (priv->mps ?
IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
0),
};