VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
- KVM and VMware ESX SR-IOV modes are supported.
- RSS hash result is supported.
- Hardware TSO.
+- Hardware checksum TX offload for VXLAN and GRE.
Limitations
-----------
- Inner RSS for VXLAN frames is not supported yet.
- Port statistics through software counters only.
-- Hardware checksum offloads for VXLAN inner header are not supported yet.
+- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
- Secondary process RX is not supported.
Configuration
struct ibv_device_attr device_attr;
unsigned int sriov;
unsigned int mps;
+ unsigned int tunnel_en;
int idx;
int i;
* as all ConnectX-5 devices.
*/
switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ tunnel_en = 1;
+ mps = 0;
+ break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
mps = 1;
+ tunnel_en = 1;
break;
default:
mps = 0;
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
+ priv->tunnel_en = tunnel_en;
err = mlx5_args(priv, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
+ unsigned int tunnel_en:1;
+ /* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
DEV_TX_OFFLOAD_TCP_CKSUM);
if (priv->tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->tunnel_en)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
/* FIXME: RETA update/query API expects the callee to know the size of
/* Tunnel packet bit in the CQE. */
#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
/* INVALID is used by packets matching no flow rules. */
#define MLX5_FLOW_MARK_INVALID 0
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ const uint64_t is_tunneled = buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
}
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_en:1; /* When set inline is enabled. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
+ uint16_t tunnel_en:1;
+ /* When set TX offload for tunneled packets are supported. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
max_tso_inline);
tmpl.txq.tso_en = 1;
}
+ if (priv->tunnel_en)
+ tmpl.txq.tunnel_en = 1;
tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);