(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
- rxq->crc_len = ETHER_CRC_LEN;
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
(unsigned int)rxq->queue_id);
rte_eth_devices[
rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_mbuf_alloc_failed++;
break;
}
pidx = idx + 1;
const struct rte_memzone *tz;
tx_desc = nb_desc;
- pdata = (struct axgbe_port *)dev->data->dev_private;
+ pdata = dev->data->dev_private;
/*
* validate tx descriptors count
if (txq->nb_desc % txq->free_thresh != 0)
txq->vector_disable = 1;
- if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
- ETH_TXQ_FLAGS_NOOFFLOADS) {
+ if (tx_conf->offloads != 0)
txq->vector_disable = 1;
- }
/* Allocate TX ring hardware descriptors */
tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);