for (i = 0; i < pkts_n; ++i) {
struct rte_mbuf *pkt = pkts[i];
- if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
#ifdef MLX5_PMD_SOFT_COUNTERS
err_bytes += PKT_LEN(pkt);
#endif
rxq->stats.ipackets -= (pkts_n - n);
rxq->stats.ibytes -= err_bytes;
#endif
+ mlx5_rx_err_handle(rxq, 1);
return n;
}
uint64_t err = 0;
nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
- if (unlikely(err))
+ if (unlikely(err | rxq->err_state))
nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
return nb_rx;
}
int __attribute__((cold))
mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
if (!priv->config.tx_vec_en ||
- priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->txqs_n > (unsigned int)priv->config.txqs_vec ||
priv->config.mps != MLX5_MPW_ENHANCED ||
offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
return -ENOTSUP;
int __attribute__((cold))
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)