When hardware TSO is enabled, packets marked with TCP segmentation
offload will be divided into segments by the hardware. Disabled by default.
+- ``tx_vec_en`` parameter [int]
+
+ A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
+ global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+
+ Enabled by default on ConnectX-5.
+
+- ``rx_vec_en`` parameter [int]
+
+ A nonzero value enables Rx vector if the port is not configured in
+ multi-segment otherwise this parameter is ignored.
+
+ Enabled by default.
+
Prerequisites
-------------
/* Device parameter to enable hardware TSO offload. */
#define MLX5_TSO "tso"
+/* Device parameter to enable hardware Tx vector. */
+#define MLX5_TX_VEC_EN "tx_vec_en"
+
+/* Device parameter to enable hardware Rx vector. */
+#define MLX5_RX_VEC_EN "rx_vec_en"
+
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
int mpw_hdr_dseg;
int inline_max_packet_sz;
int tso;
+ int tx_vec_en;
+ int rx_vec_en;
};
/**
* Retrieve integer value from environment variable.
args->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
args->tso = !!tmp;
+ } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
+ args->tx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
+ args->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TSO,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
NULL,
};
struct rte_kvargs *kvlist;
priv->inline_max_packet_sz = args->inline_max_packet_sz;
if (args->tso != MLX5_ARG_UNSET)
priv->tso = args->tso;
+ if (args->tx_vec_en != MLX5_ARG_UNSET)
+ priv->tx_vec_en = args->tx_vec_en;
+ if (args->rx_vec_en != MLX5_ARG_UNSET)
+ priv->rx_vec_en = args->rx_vec_en;
}
/**
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
.tso = MLX5_ARG_UNSET,
+ .tx_vec_en = MLX5_ARG_UNSET,
+ .rx_vec_en = MLX5_ARG_UNSET,
};
exp_device_attr.comp_mask =
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
priv->tunnel_en = tunnel_en;
+ /* Enable vector by default if supported. */
+ priv->tx_vec_en = 1;
+ priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ if (!priv->tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->mps != MLX5_MPW_ENHANCED ||
priv->tso)
return -ENOTSUP;
int __attribute__((cold))
rxq_check_vec_support(struct rxq *rxq)
{
- if (rxq->sges_n != 0)
+ struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+
+ if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
{
uint16_t i;
+ if (!priv->rx_vec_en)
+ return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];