}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
PMD_INIT_FUNC_TRACE();
+ if (ad->closed)
+ return -EIO;
+
offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
PMD_INIT_FUNC_TRACE();
+ if (adapter->closed)
+ return -EIO;
+
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
&rxq->stats.ipsec_crypto);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(rxm,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= iavf_timestamp_dynflag;
+ }
+
rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(first_seg,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= iavf_timestamp_dynflag;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
#define IAVF_LOOK_AHEAD 8
static inline int
-iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile union iavf_rx_flex_desc *rxdp;
struct rte_mbuf **rxep;
uint16_t pkt_len;
int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
int32_t i, j, nb_rx = 0;
+ int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ rxq->hw_time_update = sw_cur_time;
+ }
+ }
+
/* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
*/
#endif
}
- nb_rx += nb_dd;
-
/* Translate descriptor info to mbuf parameters */
for (j = 0; j < nb_dd; j++) {
IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+
+ *RTE_MBUF_DYNFIELD(mb,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= iavf_timestamp_dynflag;
+ }
+
mb->ol_flags |= pkt_flags;
- }
- for (j = 0; j < IAVF_LOOK_AHEAD; j++)
- rxq->rx_stage[i + j] = rxep[j];
+ /* Put up to nb_pkts directly into buffers */
+ if ((i + j) < nb_pkts) {
+ rx_pkts[i + j] = rxep[j];
+ nb_rx++;
+ } else {
+ /* Stage excess pkts received */
+ rxq->rx_stage[nb_staged] = rxep[j];
+ nb_staged++;
+ }
+ }
if (nb_dd != IAVF_LOOK_AHEAD)
break;
}
+ /* Update rxq->rx_nb_avail to reflect number of staged pkts */
+ rxq->rx_nb_avail = nb_staged;
+
/* Clear software ring entries */
- for (i = 0; i < nb_rx; i++)
+ for (i = 0; i < (nb_rx + nb_staged); i++)
rxq->sw_ring[rxq->rx_tail + i] = NULL;
return nb_rx;
}
static inline int
-iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
volatile union iavf_rx_desc *rxdp;
struct rte_mbuf **rxep;
uint32_t rx_status;
int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
int32_t i, j, nb_rx = 0;
+ int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#endif
}
- nb_rx += nb_dd;
-
/* Translate descriptor info to mbuf parameters */
for (j = 0; j < nb_dd; j++) {
IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
mb->ol_flags |= pkt_flags;
- }
- for (j = 0; j < IAVF_LOOK_AHEAD; j++)
- rxq->rx_stage[i + j] = rxep[j];
+ /* Put up to nb_pkts directly into buffers */
+ if ((i + j) < nb_pkts) {
+ rx_pkts[i + j] = rxep[j];
+ nb_rx++;
+ } else { /* Stage excess pkts received */
+ rxq->rx_stage[nb_staged] = rxep[j];
+ nb_staged++;
+ }
+ }
if (nb_dd != IAVF_LOOK_AHEAD)
break;
}
+ /* Update rxq->rx_nb_avail to reflect number of staged pkts */
+ rxq->rx_nb_avail = nb_staged;
+
/* Clear software ring entries */
- for (i = 0; i < nb_rx; i++)
+ for (i = 0; i < (nb_rx + nb_staged); i++)
rxq->sw_ring[rxq->rx_tail + i] = NULL;
return nb_rx;
return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
- nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
+ nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq, rx_pkts, nb_pkts);
else
- nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
+ nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq, rx_pkts, nb_pkts);
+
rxq->rx_next_avail = 0;
- rxq->rx_nb_avail = nb_rx;
- rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx + rxq->rx_nb_avail);
if (rxq->rx_tail > rxq->rx_free_trigger) {
if (iavf_rx_alloc_bufs(rxq) != 0) {
- uint16_t i, j;
+ uint16_t i, j, nb_staged;
/* TODO: count rx_mbuf_alloc_failed here */
+ nb_staged = rxq->rx_nb_avail;
rxq->rx_nb_avail = 0;
- rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
- for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - (nb_rx + nb_staged));
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) {
+ rxq->sw_ring[j] = rx_pkts[i];
+ rx_pkts[i] = NULL;
+ }
+ for (i = 0, j = rxq->rx_tail + nb_rx; i < nb_staged; i++, j++) {
rxq->sw_ring[j] = rxq->rx_stage[i];
+ rx_pkts[i] = NULL;
+ }
return 0;
}
rxq->port_id, rxq->queue_id,
rxq->rx_tail, nb_rx);
- if (rxq->rx_nb_avail)
- return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
-
- return 0;
+ return nb_rx;
}
static uint16_t
struct iavf_tx_queue *txq = tx_queue;
struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (adapter->closed)
+ return 0;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int i;
+ struct iavf_rx_queue *rxq;
+ bool use_flex = true;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, "
+ "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
+ use_flex = false;
+ } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, "
+ "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
+ use_flex = false;
+ }
+ }
#ifdef RTE_ARCH_X86
- struct iavf_rx_queue *rxq;
- int i;
int check_ret;
bool use_avx2 = false;
bool use_avx512 = false;
- bool use_flex = false;
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
use_avx512 = true;
#endif
- if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
- use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
- if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
- if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;