RSS hash = Y
SR-IOV = Y
VLAN filter = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Basic stats = Y
Stats per queue = Y
Other kdrv = Y
priv->pd = pd;
priv->mtu = ETHER_MTU;
priv->vf = vf;
+ priv->hw_csum = !!(device_attr.device_cap_flags &
+ IBV_DEVICE_RAW_IP_CSUM);
+ DEBUG("checksum offloading is %ssupported",
+ (priv->hw_csum ? "" : "not "));
+ /* Only ConnectX-3 Pro supports tunneling. */
+ priv->hw_csum_l2tun =
+ priv->hw_csum &&
+ (device_attr.vendor_part_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
+ DEBUG("L2 tunnel checksum offloads are %ssupported",
+ (priv->hw_csum_l2tun ? "" : "not "));
/* Configure the first MAC address by default. */
if (mlx4_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
uint32_t vf:1; /**< This is a VF device. */
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
uint32_t isolated:1; /**< Toggle isolated mode. */
+ uint32_t hw_csum:1; /* Checksum offload is supported. */
+ uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
info->tx_offload_capa = 0;
+ if (priv->hw_csum)
+ info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (priv->hw_csum_l2tun)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
/* Work queue element (WQE) flags. */
#define MLX4_BIT_WQE_OWN 0x80000000
+#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
+#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
#define MLX4_SIZE_TO_TXBBS(size) \
(RTE_ALIGN((size), (MLX4_TXBB_SIZE)) >> (MLX4_TXBB_SHIFT))
} else {
srcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
}
+ /* Enable HW checksum offload if requested */
+ if (txq->csum &&
+ (pkt->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
+ const uint64_t is_tunneled = (pkt->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN));
+
+ if (is_tunneled && txq->csum_l2tun) {
+ owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
+ MLX4_WQE_CTRL_IL4_HDR_CSUM;
+ if (pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ srcrb_flags |=
+ RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
+ } else {
+ srcrb_flags |= RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ }
+ }
ctrl->srcrb_flags = srcrb_flags;
/*
* Make sure descriptor is fully written before
struct txq_elt (*elts)[]; /**< Tx elements. */
struct mlx4_txq_stats stats; /**< Tx queue counters. */
uint32_t max_inline; /**< Max inline send size. */
+ uint32_t csum:1; /**< Enable checksum offloading. */
+ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
uint8_t *bounce_buf;
/**< Memory used for storing the first DWORD of data TXBBs. */
struct {
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .csum = priv->hw_csum,
+ .csum_l2tun = priv->hw_csum_l2tun,
.bounce_buf = bounce_buf,
};
txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);