unsigned int comp;
volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
+ const unsigned int max_inline = txq->max_inline;
if (unlikely(!pkts_n))
return 0;
if (unlikely(!max_wqe))
return 0;
do {
- unsigned int max_inline = txq->max_inline;
- const unsigned int inline_en = !!max_inline && txq->inline_en;
struct rte_mbuf *buf = NULL;
uint8_t *raw;
volatile struct mlx5_wqe_v *wqe = NULL;
}
}
/* Inline if enough room. */
- if (inline_en || tso) {
+ if (max_inline || tso) {
uint32_t inl;
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
- uint16_t inline_en:1; /* When set inline is enabled. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
.pd = priv->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
- if (txq_data->inline_en)
+ if (txq_data->max_inline)
attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
if (txq_data->tso_en) {
attr.init.max_tso_header = txq_ctrl->max_tso_header;
tmpl->txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- tmpl->txq.inline_en = 1;
/* TSO and MPS can't be enabled concurrently. */
assert(!priv->tso || !priv->mps);
if (priv->mps == MLX5_MPW_ENHANCED) {