I40E_RXD_QW1_STATUS_SHIFT;
}
- rte_smp_rmb();
+ /* This barrier is to order loads of different words in the descriptor */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_pf *pf = NULL;
- struct i40e_vf *vf = NULL;
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- vsi = &vf->vsi;
- if (!vsi)
- return -EINVAL;
- reg_idx = queue_idx;
- } else {
- pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
- if (!vsi)
- return -EINVAL;
- q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
- if (q_offset < 0)
- return -EINVAL;
- reg_idx = vsi->base_queue + q_offset;
- }
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
- i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
return -ENOMEM;
}
+ rxq->mz = rz;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_rx_queue_release(void *rxq)
+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+i40e_rx_queue_release(void *rxq)
{
struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
i40e_rx_queue_release_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
uint32_t
-i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+i40e_dev_rx_queue_count(void *rx_queue)
{
#define I40E_RXQ_SCAN_INTERVAL 4
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
return desc;
}
-int
-i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union i40e_rx_desc *rxdp;
- struct i40e_rx_queue *rxq = rx_queue;
- uint16_t desc;
- int ret;
-
- if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
- return 0;
- }
-
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &(rxq->rx_ring[desc]);
-
- ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
- I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
- (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
-
- return ret;
-}
-
int
i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_pf *pf = NULL;
- struct i40e_vf *vf = NULL;
struct i40e_tx_queue *txq;
const struct rte_memzone *tz;
uint32_t ring_size;
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- vsi = &vf->vsi;
- reg_idx = queue_idx;
- } else {
- pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
- if (!vsi)
- return -EINVAL;
- q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
- if (q_offset < 0)
- return -EINVAL;
- reg_idx = vsi->base_queue + q_offset;
- }
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx]) {
- i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->tx_rs_thresh = tx_rs_thresh;
txq->tx_free_thresh = tx_free_thresh;
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_tx_queue_release(void *txq)
+i40e_tx_queue_release(void *txq)
{
struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
i40e_tx_queue_release_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+
+ if (rxq->pkt_first_seg != NULL)
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
}
rxq->max_pkt_len =
- RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
- rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
- rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)I40E_ETH_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
- return I40E_ERR_CONFIG;
- }
- } else {
- if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > I40E_ETH_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)I40E_ETH_MAX_LEN);
- return I40E_ERR_CONFIG;
- }
+ RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
+ data->mtu + I40E_ETH_OVERHEAD);
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
}
return 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!dev->data->rx_queues[i])
continue;
- i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ i40e_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (!dev->data->tx_queues[i])
continue;
- i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ i40e_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
return I40E_ERR_NO_MEMORY;
}
+ txq->mz = tz;
txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
txq->queue_id = I40E_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
return I40E_ERR_NO_MEMORY;
}
+ rxq->mz = rz;
rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
rxq->queue_id = I40E_FDIR_QUEUE_ID;
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
qinfo->conf.offloads = txq->offloads;
}
+#ifdef RTE_ARCH_X86
static inline bool
get_avx_supported(bool request_avx512)
{
-#ifdef RTE_ARCH_X86
if (request_avx512) {
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
return false;
#endif
}
-#else
- RTE_SET_USED(request_avx512);
-#endif /* RTE_ARCH_X86 */
return false;
}
+#endif /* RTE_ARCH_X86 */
void __rte_cold