}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
PMD_INIT_FUNC_TRACE();
+ if (ad->closed)
+ return -EIO;
+
offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
PMD_INIT_FUNC_TRACE();
+ if (adapter->closed)
+ return -EIO;
+
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
- uint64_t ts_ns;
-
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
struct iavf_tx_queue *txq = tx_queue;
struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (adapter->closed)
+ return 0;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];