}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
PMD_INIT_FUNC_TRACE();
+ if (ad->closed)
+ return -EIO;
+
offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
PMD_INIT_FUNC_TRACE();
+ if (adapter->closed)
+ return -EIO;
+
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
&rxq->stats.ipsec_crypto);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(rxm,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= iavf_timestamp_dynflag;
+ }
+
rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(first_seg,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= iavf_timestamp_dynflag;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
/* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
*/
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(mb,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= iavf_timestamp_dynflag;
+ }
+
mb->ol_flags |= pkt_flags;
/* Put up to nb_pkts directly into buffers */
struct iavf_tx_queue *txq = tx_queue;
struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (adapter->closed)
+ return 0;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int i;
+ struct iavf_rx_queue *rxq;
+ bool use_flex = true;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, "
+ "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
+ use_flex = false;
+ } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, "
+ "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
+ use_flex = false;
+ }
+ }
#ifdef RTE_ARCH_X86
- struct iavf_rx_queue *rxq;
- int i;
int check_ret;
bool use_avx2 = false;
bool use_avx512 = false;
- bool use_flex = false;
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
use_avx512 = true;
#endif
- if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
- use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
- if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
- if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;