+ struct mlx5_priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ unsigned int inlen_send; /* Inline data for ordinary SEND.*/
+ unsigned int inlen_empw; /* Inline data for enhanced MPW. */
+ unsigned int inlen_mode; /* Minimal required Inline data. */
+ unsigned int txqs_inline; /* Min Tx queues to enable inline. */
+ uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
+ bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ bool vlan_inline;
+ unsigned int temp;
+
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline =
+#if defined(RTE_ARCH_ARM64)
+ (priv->pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
+ MLX5_INLINE_MAX_TXQS_BLUEFIELD :
+#endif
+ MLX5_INLINE_MAX_TXQS;
+ else
+ txqs_inline = (unsigned int)config->txqs_inline;
+ inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
+ MLX5_SEND_DEF_INLINE_LEN :
+ (unsigned int)config->txq_inline_max;
+ inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
+ MLX5_EMPW_DEF_INLINE_LEN :
+ (unsigned int)config->txq_inline_mpw;
+ inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
+ 0 : (unsigned int)config->txq_inline_min;
+ if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
+ inlen_empw = 0;
+ /*
+ * If there is requested minimal amount of data to inline
+ * we MUST enable inlining. This is a case for ConnectX-4
+ * which usually requires L2 inlined for correct operating
+ * and ConnectX-4LX which requires L2-L4 inlined to
+ * support E-Switch Flows.
+ */
+ if (inlen_mode) {
+ if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
+ /*
+ * Optimize minimal inlining for single
+ * segment packets to fill one WQEBB
+ * without gaps.
+ */
+ temp = MLX5_ESEG_MIN_INLINE_SIZE;
+ } else {
+ temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
+ temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
+ MLX5_ESEG_MIN_INLINE_SIZE;
+ temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
+ }
+ if (temp != inlen_mode) {
+ DRV_LOG(INFO,
+ "port %u minimal required inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_mode, temp);
+ inlen_mode = temp;
+ }
+ }
+ /*
+ * If port is configured to support VLAN insertion and device
+ * does not support this feature by HW (for NICs before ConnectX-5
+ * or in case of wqe_vlan_insert flag is not set) we must enable
+ * data inline on all queues because it is supported by single
+ * tx_burst routine.
+ */
+ txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
+ vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+ !config->hw_vlan_insert;
+ /*
+ * If there are few Tx queues it is prioritized
+ * to save CPU cycles and disable data inlining at all.
+ */
+ if (inlen_send && priv->txqs_n >= txqs_inline) {
+ /*
+ * The data sent with ordinal MLX5_OPCODE_SEND
+ * may be inlined in Ethernet Segment, align the
+ * length accordingly to fit entire WQEBBs.
+ */
+ temp = RTE_MAX(inlen_send,
+ MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
+ temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
+ temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
+ temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
+ temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
+ MLX5_ESEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE * 2);
+ temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
+ temp = RTE_MAX(temp, inlen_mode);
+ if (temp != inlen_send) {
+ DRV_LOG(INFO,
+ "port %u ordinary send inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_send, temp);
+ inlen_send = temp;
+ }
+ /*
+ * Not aligned to cache lines, but to WQEs.
+ * First bytes of data (initial alignment)
+ * is going to be copied explicitly at the
+ * beginning of inlining buffer in Ethernet
+ * Segment.
+ */
+ assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ assert(inlen_send <= MLX5_WQE_SIZE_MAX +
+ MLX5_ESEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE * 2);
+ } else if (inlen_mode) {
+ /*
+ * If minimal inlining is requested we must
+ * enable inlining in general, despite the
+ * number of configured queues. Ignore the
+ * txq_inline_max devarg, this is not
+ * full-featured inline.
+ */
+ inlen_send = inlen_mode;
+ inlen_empw = 0;
+ } else if (vlan_inline) {
+ /*
+ * Hardware does not report offload for
+ * VLAN insertion, we must enable data inline
+ * to implement feature by software.
+ */
+ inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
+ inlen_empw = 0;
+ } else {
+ inlen_send = 0;
+ inlen_empw = 0;
+ }
+ txq_ctrl->txq.inlen_send = inlen_send;
+ txq_ctrl->txq.inlen_mode = inlen_mode;
+ txq_ctrl->txq.inlen_empw = 0;
+ if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
+ /*
+ * The data sent with MLX5_OPCODE_ENHANCED_MPSW
+ * may be inlined in Data Segment, align the
+ * length accordingly to fit entire WQEBBs.
+ */
+ temp = RTE_MAX(inlen_empw,
+ MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
+ temp -= MLX5_DSEG_MIN_INLINE_SIZE;
+ temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
+ temp += MLX5_DSEG_MIN_INLINE_SIZE;
+ temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
+ MLX5_DSEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE);
+ temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
+ if (temp != inlen_empw) {
+ DRV_LOG(INFO,
+ "port %u enhanced empw inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_empw, temp);
+ inlen_empw = temp;
+ }
+ assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+ assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
+ MLX5_DSEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE);
+ txq_ctrl->txq.inlen_empw = inlen_empw;
+ }
+ txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
+ if (tso) {
+ txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
+ txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
+ MLX5_MAX_TSO_HEADER);
+ txq_ctrl->txq.tso_en = 1;
+ }
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
+}
+
+/**
+ * Adjust Tx queue data inline parameters for large queue sizes.
+ * The data inline feature requires multiple WQEs to fit the packets,
+ * and if the large amount of Tx descriptors is requested by application
+ * the total WQE amount may exceed the hardware capabilities. If the
+ * default inline setting are used we can try to adjust these ones and
+ * meet the hardware requirements and not exceed the queue size.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ *
+ * @return
+ * Zero on success, otherwise the parameters can not be adjusted.
+ */
+static int
+txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ struct mlx5_priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ unsigned int max_inline;
+
+ max_inline = txq_calc_inline_max(txq_ctrl);
+ if (!txq_ctrl->txq.inlen_send) {
+ /*
+ * Inline data feature is not engaged at all.
+ * There is nothing to adjust.
+ */
+ return 0;
+ }
+ if (txq_ctrl->max_inline_data <= max_inline) {
+ /*
+ * The requested inline data length does not
+ * exceed queue capabilities.
+ */
+ return 0;
+ }
+ if (txq_ctrl->txq.inlen_mode > max_inline) {
+ DRV_LOG(ERR,
+ "minimal data inline requirements (%u) are not"
+ " satisfied (%u) on port %u, try the smaller"
+ " Tx queue size (%d)",
+ txq_ctrl->txq.inlen_mode, max_inline,
+ priv->dev_data->port_id,
+ priv->sh->device_attr.orig_attr.max_qp_wr);
+ goto error;
+ }
+ if (txq_ctrl->txq.inlen_send > max_inline &&
+ config->txq_inline_max != MLX5_ARG_UNSET &&
+ config->txq_inline_max > (int)max_inline) {
+ DRV_LOG(ERR,
+ "txq_inline_max requirements (%u) are not"
+ " satisfied (%u) on port %u, try the smaller"
+ " Tx queue size (%d)",
+ txq_ctrl->txq.inlen_send, max_inline,
+ priv->dev_data->port_id,
+ priv->sh->device_attr.orig_attr.max_qp_wr);
+ goto error;
+ }
+ if (txq_ctrl->txq.inlen_empw > max_inline &&
+ config->txq_inline_mpw != MLX5_ARG_UNSET &&
+ config->txq_inline_mpw > (int)max_inline) {
+ DRV_LOG(ERR,
+ "txq_inline_mpw requirements (%u) are not"
+ " satisfied (%u) on port %u, try the smaller"
+ " Tx queue size (%d)",
+ txq_ctrl->txq.inlen_empw, max_inline,
+ priv->dev_data->port_id,
+ priv->sh->device_attr.orig_attr.max_qp_wr);
+ goto error;
+ }
+ if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
+ DRV_LOG(ERR,
+ "tso header inline requirements (%u) are not"
+ " satisfied (%u) on port %u, try the smaller"
+ " Tx queue size (%d)",
+ MLX5_MAX_TSO_HEADER, max_inline,
+ priv->dev_data->port_id,
+ priv->sh->device_attr.orig_attr.max_qp_wr);
+ goto error;
+ }
+ if (txq_ctrl->txq.inlen_send > max_inline) {
+ DRV_LOG(WARNING,
+ "adjust txq_inline_max (%u->%u)"
+ " due to large Tx queue on port %u",
+ txq_ctrl->txq.inlen_send, max_inline,
+ priv->dev_data->port_id);
+ txq_ctrl->txq.inlen_send = max_inline;
+ }
+ if (txq_ctrl->txq.inlen_empw > max_inline) {
+ DRV_LOG(WARNING,
+ "adjust txq_inline_mpw (%u->%u)"
+ "due to large Tx queue on port %u",
+ txq_ctrl->txq.inlen_empw, max_inline,
+ priv->dev_data->port_id);
+ txq_ctrl->txq.inlen_empw = max_inline;
+ }
+ txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
+ txq_ctrl->txq.inlen_empw);
+ assert(txq_ctrl->max_inline_data <= max_inline);
+ assert(txq_ctrl->txq.inlen_mode <= max_inline);
+ assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
+ assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
+ !txq_ctrl->txq.inlen_empw);
+ return 0;
+error:
+ rte_errno = ENOMEM;
+ return -ENOMEM;