rxq->stats.ipackets -= (pkts_n - n);
rxq->stats.ibytes -= err_bytes;
#endif
- rxq->pending_err = 0;
return n;
}
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
uint16_t nb_rx;
+ uint64_t err = 0;
- nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
- if (unlikely(rxq->pending_err))
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
+ if (unlikely(err))
nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
return nb_rx;
}
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (!priv->tx_vec_en ||
+ if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->config.mps != MLX5_MPW_ENHANCED ||
+ priv->config.tso)
return -ENOTSUP;
return 1;
}
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->config.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {